aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/Kconfig.debug9
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/boot/Makefile4
-rw-r--r--arch/powerpc/boot/crt0.S26
-rw-r--r--arch/powerpc/boot/dts/b4860emu.dts223
-rw-r--r--arch/powerpc/boot/dts/b4qds.dtsi17
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-post.dtsi60
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi89
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-post.dtsi37
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi65
-rw-r--r--arch/powerpc/boot/dts/fsl/t2081si-post.dtsi105
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi265
-rw-r--r--arch/powerpc/boot/dts/kmcoge4.dts15
-rw-r--r--arch/powerpc/boot/dts/oca4080.dts15
-rw-r--r--arch/powerpc/boot/dts/p1023rdb.dts18
-rw-r--r--arch/powerpc/boot/dts/p2041rdb.dts17
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts17
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts17
-rw-r--r--arch/powerpc/boot/dts/p5020ds.dts17
-rw-r--r--arch/powerpc/boot/dts/p5040ds.dts17
-rw-r--r--arch/powerpc/boot/dts/t104xqds.dtsi17
-rw-r--r--arch/powerpc/boot/dts/t104xrdb.dtsi14
-rw-r--r--arch/powerpc/boot/dts/t208xqds.dtsi19
-rw-r--r--arch/powerpc/boot/dts/t208xrdb.dtsi15
-rw-r--r--arch/powerpc/boot/dts/t4240qds.dts17
-rw-r--r--arch/powerpc/boot/dts/t4240rdb.dts15
-rw-r--r--arch/powerpc/boot/libfdt-wrapper.c6
-rw-r--r--arch/powerpc/boot/libfdt_env.h14
-rw-r--r--arch/powerpc/boot/of.h8
-rw-r--r--arch/powerpc/boot/planetcore.c33
-rw-r--r--arch/powerpc/boot/planetcore.h3
-rwxr-xr-xarch/powerpc/boot/wrapper2
-rw-r--r--arch/powerpc/configs/cell_defconfig3
-rw-r--r--arch/powerpc/configs/celleb_defconfig152
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig7
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig15
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig3
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig3
-rw-r--r--arch/powerpc/include/asm/Kbuild4
-rw-r--r--arch/powerpc/include/asm/cache.h3
-rw-r--r--arch/powerpc/include/asm/cputable.h8
-rw-r--r--arch/powerpc/include/asm/dbdma.h12
-rw-r--r--arch/powerpc/include/asm/dcr-native.h2
-rw-r--r--arch/powerpc/include/asm/device.h6
-rw-r--r--arch/powerpc/include/asm/div64.h1
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h4
-rw-r--r--arch/powerpc/include/asm/eeh.h29
-rw-r--r--arch/powerpc/include/asm/firmware.h10
-rw-r--r--arch/powerpc/include/asm/iommu.h6
-rw-r--r--arch/powerpc/include/asm/irq_regs.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h16
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/local64.h1
-rw-r--r--arch/powerpc/include/asm/machdep.h19
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h1
-rw-r--r--arch/powerpc/include/asm/mpc85xx.h1
-rw-r--r--arch/powerpc/include/asm/mpic.h17
-rw-r--r--arch/powerpc/include/asm/nmi.h4
-rw-r--r--arch/powerpc/include/asm/nvram.h50
-rw-r--r--arch/powerpc/include/asm/opal-api.h735
-rw-r--r--arch/powerpc/include/asm/opal.h770
-rw-r--r--arch/powerpc/include/asm/paca.h4
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h60
-rw-r--r--arch/powerpc/include/asm/pci.h2
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h8
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h192
-rw-r--r--arch/powerpc/include/asm/rtas.h33
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/smp.h5
-rw-r--r--arch/powerpc/include/asm/swab.h26
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/ucc_slow.h13
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/asm/vga.h4
-rw-r--r--arch/powerpc/include/asm/xics.h2
-rw-r--r--arch/powerpc/include/uapi/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/cacheinfo.c44
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S10
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c11
-rw-r--r--arch/powerpc/kernel/eeh.c176
-rw-r--r--arch/powerpc/kernel/eeh_cache.c25
-rw-r--r--arch/powerpc/kernel/eeh_dev.c14
-rw-r--r--arch/powerpc/kernel/eeh_driver.c22
-rw-r--r--arch/powerpc/kernel/eeh_pe.c129
-rw-r--r--arch/powerpc/kernel/entry_64.S24
-rw-r--r--arch/powerpc/kernel/idle_power7.S1
-rw-r--r--arch/powerpc/kernel/mce_power.c53
-rw-r--r--arch/powerpc/kernel/nvram_64.c677
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c57
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c9
-rw-r--r--arch/powerpc/kernel/pci_dn.c309
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c9
-rw-r--r--arch/powerpc/kernel/process.c9
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kernel/rtas.c30
-rw-r--r--arch/powerpc/kernel/rtas_pci.c49
-rw-r--r--arch/powerpc/kernel/setup_64.c20
-rw-r--r--arch/powerpc/kernel/syscalls.c17
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/powerpc/kernel/systbl_chk.c2
-rw-r--r--arch/powerpc/kernel/tm.S8
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/vector.S24
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c38
-rw-r--r--arch/powerpc/lib/alloc.c2
-rw-r--r--arch/powerpc/lib/copy_32.S127
-rw-r--r--arch/powerpc/lib/copypage_power7.S32
-rw-r--r--arch/powerpc/lib/copyuser_power7.S226
-rw-r--r--arch/powerpc/lib/crtsavres.S96
-rw-r--r--arch/powerpc/lib/ldstfp.S32
-rw-r--r--arch/powerpc/lib/locks.c1
-rw-r--r--arch/powerpc/lib/memcpy_power7.S226
-rw-r--r--arch/powerpc/lib/ppc_ksyms.c4
-rw-r--r--arch/powerpc/lib/rheap.c2
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c2
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c62
-rw-r--r--arch/powerpc/mm/pgtable_32.c18
-rw-r--r--arch/powerpc/mm/pgtable_64.c6
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c5
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
-rw-r--r--arch/powerpc/mm/vphn.c70
-rw-r--r--arch/powerpc/mm/vphn.h16
-rw-r--r--arch/powerpc/perf/callchain.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c4
-rw-r--r--arch/powerpc/perf/hv-24x7.c251
-rw-r--r--arch/powerpc/perf/hv-24x7.h8
-rw-r--r--arch/powerpc/platforms/85xx/common.c1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c12
-rw-r--r--arch/powerpc/platforms/85xx/smp.c4
-rw-r--r--arch/powerpc/platforms/Kconfig5
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype3
-rw-r--r--arch/powerpc/platforms/cell/Kconfig11
-rw-r--r--arch/powerpc/platforms/cell/Makefile15
-rw-r--r--arch/powerpc/platforms/cell/beat.c264
-rw-r--r--arch/powerpc/platforms/cell/beat.h39
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c445
-rw-r--r--arch/powerpc/platforms/cell/beat_hvCall.S285
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c253
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.h30
-rw-r--r--arch/powerpc/platforms/cell/beat_iommu.c115
-rw-r--r--arch/powerpc/platforms/cell/beat_spu_priv1.c205
-rw-r--r--arch/powerpc/platforms/cell/beat_syscall.h164
-rw-r--r--arch/powerpc/platforms/cell/beat_udbg.c98
-rw-r--r--arch/powerpc/platforms/cell/beat_wrapper.h290
-rw-r--r--arch/powerpc/platforms/cell/cell.h24
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c500
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.h46
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc.h232
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_epci.c428
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c538
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_sio.c99
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_uhc.c95
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c243
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c11
-rw-r--r--arch/powerpc/platforms/cell/setup.c5
-rw-r--r--arch/powerpc/platforms/cell/smp.c9
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c1
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/maple/maple.h2
-rw-r--r--arch/powerpc/platforms/maple/pci.c4
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c6
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h1
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c5
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c38
-rw-r--r--arch/powerpc/platforms/powermac/pic.c3
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h3
-rw-r--r--arch/powerpc/platforms/powermac/setup.c22
-rw-r--r--arch/powerpc/platforms/powermac/smp.c18
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig7
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c1149
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c1300
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c10
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor.c30
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S5
-rw-r--r--arch/powerpc/platforms/powernv/opal.c92
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c797
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c190
-rw-r--r--arch/powerpc/platforms/powernv/pci.h38
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c54
-rw-r--r--arch/powerpc/platforms/powernv/smp.c13
-rw-r--r--arch/powerpc/platforms/ps3/smp.c4
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c118
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c98
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c489
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c9
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c26
-rw-r--r--arch/powerpc/platforms/pseries/msi.c6
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c674
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h14
-rw-r--r--arch/powerpc/platforms/pseries/setup.c48
-rw-r--r--arch/powerpc/platforms/pseries/smp.c6
-rwxr-xr-xarch/powerpc/relocs_check.pl66
-rwxr-xr-xarch/powerpc/relocs_check.sh59
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c10
-rw-r--r--arch/powerpc/sysdev/dcr.c2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c29
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c15
-rw-r--r--arch/powerpc/sysdev/mpic.c30
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c25
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c5
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c4
233 files changed, 7188 insertions, 9344 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9b780e0d2c18..190cc48abc0c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -152,6 +152,7 @@ config PPC
152 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN 152 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
153 select NO_BOOTMEM 153 select NO_BOOTMEM
154 select HAVE_GENERIC_RCU_GUP 154 select HAVE_GENERIC_RCU_GUP
155 select HAVE_PERF_EVENTS_NMI if PPC64
155 156
156config GENERIC_CSUM 157config GENERIC_CSUM
157 def_bool CPU_LITTLE_ENDIAN 158 def_bool CPU_LITTLE_ENDIAN
@@ -189,9 +190,6 @@ config ARCH_MAY_HAVE_PC_FDC
189 bool 190 bool
190 default PCI 191 default PCI
191 192
192config PPC_OF
193 def_bool y
194
195config PPC_UDBG_16550 193config PPC_UDBG_16550
196 bool 194 bool
197 default n 195 default n
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index ec2e40f2cc11..0efa8f90a8f1 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -117,7 +117,7 @@ config BDI_SWITCH
117 117
118config BOOTX_TEXT 118config BOOTX_TEXT
119 bool "Support for early boot text console (BootX or OpenFirmware only)" 119 bool "Support for early boot text console (BootX or OpenFirmware only)"
120 depends on PPC_OF && PPC_BOOK3S 120 depends on PPC_BOOK3S
121 help 121 help
122 Say Y here to see progress messages from the boot firmware in text 122 Say Y here to see progress messages from the boot firmware in text
123 mode. Requires either BootX or Open Firmware. 123 mode. Requires either BootX or Open Firmware.
@@ -193,13 +193,6 @@ config PPC_EARLY_DEBUG_PAS_REALMODE
193 Select this to enable early debugging for PA Semi. 193 Select this to enable early debugging for PA Semi.
194 Output will be on UART0. 194 Output will be on UART0.
195 195
196config PPC_EARLY_DEBUG_BEAT
197 bool "Beat HV Console"
198 depends on PPC_CELLEB
199 select PPC_UDBG_BEAT
200 help
201 Select this to enable early debugging for Celleb with Beat.
202
203config PPC_EARLY_DEBUG_44x 196config PPC_EARLY_DEBUG_44x
204 bool "Early serial debugging for IBM/AMCC 44x CPUs" 197 bool "Early serial debugging for IBM/AMCC 44x CPUs"
205 depends on 44x 198 depends on 44x
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index fc502e042438..07a480861f78 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -248,10 +248,10 @@ boot := arch/$(ARCH)/boot
248 248
249ifeq ($(CONFIG_RELOCATABLE),y) 249ifeq ($(CONFIG_RELOCATABLE),y)
250quiet_cmd_relocs_check = CALL $< 250quiet_cmd_relocs_check = CALL $<
251 cmd_relocs_check = perl $< "$(OBJDUMP)" "$(obj)/vmlinux" 251 cmd_relocs_check = $(CONFIG_SHELL) $< "$(OBJDUMP)" "$(obj)/vmlinux"
252 252
253PHONY += relocs_check 253PHONY += relocs_check
254relocs_check: arch/powerpc/relocs_check.pl vmlinux 254relocs_check: arch/powerpc/relocs_check.sh vmlinux
255 $(call cmd,relocs_check) 255 $(call cmd,relocs_check)
256 256
257zImage: relocs_check 257zImage: relocs_check
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 8a5bc1cfc6aa..73eddda53b8e 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -110,7 +110,6 @@ src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
110src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S 110src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S
111src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S 111src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
112src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S 112src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
113src-plat-$(CONFIG_PPC_CELLEB) += pseries-head.S
114src-plat-$(CONFIG_PPC_CELL_QPACE) += pseries-head.S 113src-plat-$(CONFIG_PPC_CELL_QPACE) += pseries-head.S
115 114
116src-wlib := $(sort $(src-wlib-y)) 115src-wlib := $(sort $(src-wlib-y))
@@ -215,7 +214,6 @@ image-$(CONFIG_PPC_POWERNV) += zImage.pseries
215image-$(CONFIG_PPC_MAPLE) += zImage.maple 214image-$(CONFIG_PPC_MAPLE) += zImage.maple
216image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries 215image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
217image-$(CONFIG_PPC_PS3) += dtbImage.ps3 216image-$(CONFIG_PPC_PS3) += dtbImage.ps3
218image-$(CONFIG_PPC_CELLEB) += zImage.pseries
219image-$(CONFIG_PPC_CELL_QPACE) += zImage.pseries 217image-$(CONFIG_PPC_CELL_QPACE) += zImage.pseries
220image-$(CONFIG_PPC_CHRP) += zImage.chrp 218image-$(CONFIG_PPC_CHRP) += zImage.chrp
221image-$(CONFIG_PPC_EFIKA) += zImage.chrp 219image-$(CONFIG_PPC_EFIKA) += zImage.chrp
@@ -317,7 +315,7 @@ endif
317# Allow extra targets to be added to the defconfig 315# Allow extra targets to be added to the defconfig
318image-y += $(subst ",,$(CONFIG_EXTRA_TARGETS)) 316image-y += $(subst ",,$(CONFIG_EXTRA_TARGETS))
319 317
320initrd- := $(patsubst zImage%, zImage.initrd%, $(image-n) $(image-)) 318initrd- := $(patsubst zImage%, zImage.initrd%, $(image-))
321initrd-y := $(patsubst zImage%, zImage.initrd%, \ 319initrd-y := $(patsubst zImage%, zImage.initrd%, \
322 $(patsubst dtbImage%, dtbImage.initrd%, \ 320 $(patsubst dtbImage%, dtbImage.initrd%, \
323 $(patsubst simpleImage%, simpleImage.initrd%, \ 321 $(patsubst simpleImage%, simpleImage.initrd%, \
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 14de4f8778a7..12866ccb5694 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -155,29 +155,29 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
155 ld r9,(p_rela-p_base)(r10) 155 ld r9,(p_rela-p_base)(r10)
156 add r9,r9,r10 156 add r9,r9,r10
157 157
158 li r7,0 158 li r13,0
159 li r8,0 159 li r8,0
1609: ld r6,0(r11) /* get tag */ 1609: ld r12,0(r11) /* get tag */
161 cmpdi r6,0 161 cmpdi r12,0
162 beq 12f /* end of list */ 162 beq 12f /* end of list */
163 cmpdi r6,RELA 163 cmpdi r12,RELA
164 bne 10f 164 bne 10f
165 ld r7,8(r11) /* get RELA pointer in r7 */ 165 ld r13,8(r11) /* get RELA pointer in r13 */
166 b 11f 166 b 11f
16710: addis r6,r6,(-RELACOUNT)@ha 16710: addis r12,r12,(-RELACOUNT)@ha
168 cmpdi r6,RELACOUNT@l 168 cmpdi r12,RELACOUNT@l
169 bne 11f 169 bne 11f
170 ld r8,8(r11) /* get RELACOUNT value in r8 */ 170 ld r8,8(r11) /* get RELACOUNT value in r8 */
17111: addi r11,r11,16 17111: addi r11,r11,16
172 b 9b 172 b 9b
17312: 17312:
174 cmpdi r7,0 /* check we have both RELA and RELACOUNT */ 174 cmpdi r13,0 /* check we have both RELA and RELACOUNT */
175 cmpdi cr1,r8,0 175 cmpdi cr1,r8,0
176 beq 3f 176 beq 3f
177 beq cr1,3f 177 beq cr1,3f
178 178
179 /* Calcuate the runtime offset. */ 179 /* Calcuate the runtime offset. */
180 subf r7,r7,r9 180 subf r13,r13,r9
181 181
182 /* Run through the list of relocations and process the 182 /* Run through the list of relocations and process the
183 * R_PPC64_RELATIVE ones. */ 183 * R_PPC64_RELATIVE ones. */
@@ -185,10 +185,10 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
18513: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */ 18513: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */
186 cmpdi r0,22 /* R_PPC64_RELATIVE */ 186 cmpdi r0,22 /* R_PPC64_RELATIVE */
187 bne 3f 187 bne 3f
188 ld r6,0(r9) /* reloc->r_offset */ 188 ld r12,0(r9) /* reloc->r_offset */
189 ld r0,16(r9) /* reloc->r_addend */ 189 ld r0,16(r9) /* reloc->r_addend */
190 add r0,r0,r7 190 add r0,r0,r13
191 stdx r0,r7,r6 191 stdx r0,r13,r12
192 addi r9,r9,24 192 addi r9,r9,24
193 bdnz 13b 193 bdnz 13b
194 194
@@ -218,7 +218,7 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
218 beq 6f 218 beq 6f
219 ld r1,0(r8) 219 ld r1,0(r8)
220 li r0,0 220 li r0,0
221 stdu r0,-16(r1) /* establish a stack frame */ 221 stdu r0,-112(r1) /* establish a stack frame */
2226: 2226:
223#endif /* __powerpc64__ */ 223#endif /* __powerpc64__ */
224 /* Call platform_init() */ 224 /* Call platform_init() */
diff --git a/arch/powerpc/boot/dts/b4860emu.dts b/arch/powerpc/boot/dts/b4860emu.dts
deleted file mode 100644
index 2aa5cd318ce8..000000000000
--- a/arch/powerpc/boot/dts/b4860emu.dts
+++ /dev/null
@@ -1,223 +0,0 @@
1/*
2 * B4860 emulator Device Tree Source
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * This software is provided by Freescale Semiconductor "as is" and any
24 * express or implied warranties, including, but not limited to, the implied
25 * warranties of merchantability and fitness for a particular purpose are
26 * disclaimed. In no event shall Freescale Semiconductor be liable for any
27 * direct, indirect, incidental, special, exemplary, or consequential damages
28 * (including, but not limited to, procurement of substitute goods or services;
29 * loss of use, data, or profits; or business interruption) however caused and
30 * on any theory of liability, whether in contract, strict liability, or tort
31 * (including negligence or otherwise) arising in any way out of the use of
32 * this software, even if advised of the possibility of such damage.
33 */
34
35/dts-v1/;
36
37/include/ "fsl/e6500_power_isa.dtsi"
38
39/ {
40 compatible = "fsl,B4860";
41 #address-cells = <2>;
42 #size-cells = <2>;
43 interrupt-parent = <&mpic>;
44
45 aliases {
46 ccsr = &soc;
47
48 serial0 = &serial0;
49 serial1 = &serial1;
50 serial2 = &serial2;
51 serial3 = &serial3;
52 dma0 = &dma0;
53 dma1 = &dma1;
54 };
55
56 cpus {
57 #address-cells = <1>;
58 #size-cells = <0>;
59
60 cpu0: PowerPC,e6500@0 {
61 device_type = "cpu";
62 reg = <0 1>;
63 next-level-cache = <&L2>;
64 fsl,portid-mapping = <0x80000000>;
65 };
66 cpu1: PowerPC,e6500@2 {
67 device_type = "cpu";
68 reg = <2 3>;
69 next-level-cache = <&L2>;
70 fsl,portid-mapping = <0x80000000>;
71 };
72 cpu2: PowerPC,e6500@4 {
73 device_type = "cpu";
74 reg = <4 5>;
75 next-level-cache = <&L2>;
76 fsl,portid-mapping = <0x80000000>;
77 };
78 cpu3: PowerPC,e6500@6 {
79 device_type = "cpu";
80 reg = <6 7>;
81 next-level-cache = <&L2>;
82 fsl,portid-mapping = <0x80000000>;
83 };
84 };
85};
86
87/ {
88 model = "fsl,B4860QDS";
89 compatible = "fsl,B4860EMU", "fsl,B4860QDS";
90 #address-cells = <2>;
91 #size-cells = <2>;
92 interrupt-parent = <&mpic>;
93
94 ifc: localbus@ffe124000 {
95 reg = <0xf 0xfe124000 0 0x2000>;
96 ranges = <0 0 0xf 0xe8000000 0x08000000
97 2 0 0xf 0xff800000 0x00010000
98 3 0 0xf 0xffdf0000 0x00008000>;
99
100 nor@0,0 {
101 #address-cells = <1>;
102 #size-cells = <1>;
103 compatible = "cfi-flash";
104 reg = <0x0 0x0 0x8000000>;
105 bank-width = <2>;
106 device-width = <1>;
107 };
108 };
109
110 memory {
111 device_type = "memory";
112 };
113
114 soc: soc@ffe000000 {
115 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
116 reg = <0xf 0xfe000000 0 0x00001000>;
117 };
118};
119
120&ifc {
121 #address-cells = <2>;
122 #size-cells = <1>;
123 compatible = "fsl,ifc", "simple-bus";
124 interrupts = <25 2 0 0>;
125};
126
127&soc {
128 #address-cells = <1>;
129 #size-cells = <1>;
130 device_type = "soc";
131 compatible = "simple-bus";
132
133 soc-sram-error {
134 compatible = "fsl,soc-sram-error";
135 interrupts = <16 2 1 2>;
136 };
137
138 corenet-law@0 {
139 compatible = "fsl,corenet-law";
140 reg = <0x0 0x1000>;
141 fsl,num-laws = <32>;
142 };
143
144 ddr1: memory-controller@8000 {
145 compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
146 reg = <0x8000 0x1000>;
147 interrupts = <16 2 1 8>;
148 };
149
150 ddr2: memory-controller@9000 {
151 compatible = "fsl,qoriq-memory-controller-v4.5","fsl,qoriq-memory-controller";
152 reg = <0x9000 0x1000>;
153 interrupts = <16 2 1 9>;
154 };
155
156 cpc: l3-cache-controller@10000 {
157 compatible = "fsl,b4-l3-cache-controller", "cache";
158 reg = <0x10000 0x1000
159 0x11000 0x1000>;
160 interrupts = <16 2 1 4>;
161 };
162
163 corenet-cf@18000 {
164 compatible = "fsl,corenet2-cf", "fsl,corenet-cf";
165 reg = <0x18000 0x1000>;
166 interrupts = <16 2 1 0>;
167 fsl,ccf-num-csdids = <32>;
168 fsl,ccf-num-snoopids = <32>;
169 };
170
171 iommu@20000 {
172 compatible = "fsl,pamu-v1.0", "fsl,pamu";
173 reg = <0x20000 0x4000>;
174 fsl,portid-mapping = <0x8000>;
175 #address-cells = <1>;
176 #size-cells = <1>;
177 interrupts = <
178 24 2 0 0
179 16 2 1 1>;
180 pamu0: pamu@0 {
181 reg = <0 0x1000>;
182 fsl,primary-cache-geometry = <8 1>;
183 fsl,secondary-cache-geometry = <32 2>;
184 };
185 };
186
187/include/ "fsl/qoriq-mpic.dtsi"
188
189 guts: global-utilities@e0000 {
190 compatible = "fsl,b4-device-config";
191 reg = <0xe0000 0xe00>;
192 fsl,has-rstcr;
193 fsl,liodn-bits = <12>;
194 };
195
196/include/ "fsl/qoriq-clockgen2.dtsi"
197 global-utilities@e1000 {
198 compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0";
199 };
200
201/include/ "fsl/qoriq-dma-0.dtsi"
202 dma@100300 {
203 fsl,iommu-parent = <&pamu0>;
204 fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */
205 };
206
207/include/ "fsl/qoriq-dma-1.dtsi"
208 dma@101300 {
209 fsl,iommu-parent = <&pamu0>;
210 fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */
211 };
212
213/include/ "fsl/qoriq-i2c-0.dtsi"
214/include/ "fsl/qoriq-i2c-1.dtsi"
215/include/ "fsl/qoriq-duart-0.dtsi"
216/include/ "fsl/qoriq-duart-1.dtsi"
217
218 L2: l2-cache-controller@c20000 {
219 compatible = "fsl,b4-l2-cache-controller";
220 reg = <0xc20000 0x1000>;
221 next-level-cache = <&cpc>;
222 };
223};
diff --git a/arch/powerpc/boot/dts/b4qds.dtsi b/arch/powerpc/boot/dts/b4qds.dtsi
index e5bde0b85135..24ed80dc2120 100644
--- a/arch/powerpc/boot/dts/b4qds.dtsi
+++ b/arch/powerpc/boot/dts/b4qds.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * B4420DS Device Tree Source 2 * B4420DS Device Tree Source
3 * 3 *
4 * Copyright 2012 Freescale Semiconductor, Inc. 4 * Copyright 2012 - 2014 Freescale Semiconductor, Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -97,10 +97,25 @@
97 device_type = "memory"; 97 device_type = "memory";
98 }; 98 };
99 99
100 reserved-memory {
101 #address-cells = <2>;
102 #size-cells = <2>;
103 ranges;
104
105 bman_fbpr: bman-fbpr {
106 size = <0 0x1000000>;
107 alignment = <0 0x1000000>;
108 };
109 };
110
100 dcsr: dcsr@f00000000 { 111 dcsr: dcsr@f00000000 {
101 ranges = <0x00000000 0xf 0x00000000 0x01052000>; 112 ranges = <0x00000000 0xf 0x00000000 0x01052000>;
102 }; 113 };
103 114
115 bportals: bman-portals@ff4000000 {
116 ranges = <0x0 0xf 0xf4000000 0x2000000>;
117 };
118
104 soc: soc@ffe000000 { 119 soc: soc@ffe000000 {
105 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 120 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
106 reg = <0xf 0xfe000000 0 0x00001000>; 121 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
index 65100b9636b7..f35e9e0a5445 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * B4860 Silicon/SoC Device Tree Source (post include) 2 * B4860 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2012 Freescale Semiconductor Inc. 4 * Copyright 2012 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -109,6 +109,64 @@
109 }; 109 };
110}; 110};
111 111
112&bportals {
113 bman-portal@38000 {
114 compatible = "fsl,bman-portal";
115 reg = <0x38000 0x4000>, <0x100e000 0x1000>;
116 interrupts = <133 2 0 0>;
117 };
118 bman-portal@3c000 {
119 compatible = "fsl,bman-portal";
120 reg = <0x3c000 0x4000>, <0x100f000 0x1000>;
121 interrupts = <135 2 0 0>;
122 };
123 bman-portal@40000 {
124 compatible = "fsl,bman-portal";
125 reg = <0x40000 0x4000>, <0x1010000 0x1000>;
126 interrupts = <137 2 0 0>;
127 };
128 bman-portal@44000 {
129 compatible = "fsl,bman-portal";
130 reg = <0x44000 0x4000>, <0x1011000 0x1000>;
131 interrupts = <139 2 0 0>;
132 };
133 bman-portal@48000 {
134 compatible = "fsl,bman-portal";
135 reg = <0x48000 0x4000>, <0x1012000 0x1000>;
136 interrupts = <141 2 0 0>;
137 };
138 bman-portal@4c000 {
139 compatible = "fsl,bman-portal";
140 reg = <0x4c000 0x4000>, <0x1013000 0x1000>;
141 interrupts = <143 2 0 0>;
142 };
143 bman-portal@50000 {
144 compatible = "fsl,bman-portal";
145 reg = <0x50000 0x4000>, <0x1014000 0x1000>;
146 interrupts = <145 2 0 0>;
147 };
148 bman-portal@54000 {
149 compatible = "fsl,bman-portal";
150 reg = <0x54000 0x4000>, <0x1015000 0x1000>;
151 interrupts = <147 2 0 0>;
152 };
153 bman-portal@58000 {
154 compatible = "fsl,bman-portal";
155 reg = <0x58000 0x4000>, <0x1016000 0x1000>;
156 interrupts = <149 2 0 0>;
157 };
158 bman-portal@5c000 {
159 compatible = "fsl,bman-portal";
160 reg = <0x5c000 0x4000>, <0x1017000 0x1000>;
161 interrupts = <151 2 0 0>;
162 };
163 bman-portal@60000 {
164 compatible = "fsl,bman-portal";
165 reg = <0x60000 0x4000>, <0x1018000 0x1000>;
166 interrupts = <153 2 0 0>;
167 };
168};
169
112&soc { 170&soc {
113 ddr2: memory-controller@9000 { 171 ddr2: memory-controller@9000 {
114 compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller"; 172 compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 1a54ba71f685..73136c0029d2 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * B4420 Silicon/SoC Device Tree Source (post include) 2 * B4420 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2012 Freescale Semiconductor, Inc. 4 * Copyright 2012 - 2014 Freescale Semiconductor, Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
32 * this software, even if advised of the possibility of such damage. 32 * this software, even if advised of the possibility of such damage.
33 */ 33 */
34 34
35&bman_fbpr {
36 compatible = "fsl,bman-fbpr";
37 alloc-ranges = <0 0 0x10000 0>;
38};
39
35&ifc { 40&ifc {
36 #address-cells = <2>; 41 #address-cells = <2>;
37 #size-cells = <1>; 42 #size-cells = <1>;
@@ -128,6 +133,83 @@
128 }; 133 };
129}; 134};
130 135
136&bportals {
137 #address-cells = <0x1>;
138 #size-cells = <0x1>;
139 compatible = "simple-bus";
140
141 bman-portal@0 {
142 compatible = "fsl,bman-portal";
143 reg = <0x0 0x4000>, <0x1000000 0x1000>;
144 interrupts = <105 2 0 0>;
145 };
146 bman-portal@4000 {
147 compatible = "fsl,bman-portal";
148 reg = <0x4000 0x4000>, <0x1001000 0x1000>;
149 interrupts = <107 2 0 0>;
150 };
151 bman-portal@8000 {
152 compatible = "fsl,bman-portal";
153 reg = <0x8000 0x4000>, <0x1002000 0x1000>;
154 interrupts = <109 2 0 0>;
155 };
156 bman-portal@c000 {
157 compatible = "fsl,bman-portal";
158 reg = <0xc000 0x4000>, <0x1003000 0x1000>;
159 interrupts = <111 2 0 0>;
160 };
161 bman-portal@10000 {
162 compatible = "fsl,bman-portal";
163 reg = <0x10000 0x4000>, <0x1004000 0x1000>;
164 interrupts = <113 2 0 0>;
165 };
166 bman-portal@14000 {
167 compatible = "fsl,bman-portal";
168 reg = <0x14000 0x4000>, <0x1005000 0x1000>;
169 interrupts = <115 2 0 0>;
170 };
171 bman-portal@18000 {
172 compatible = "fsl,bman-portal";
173 reg = <0x18000 0x4000>, <0x1006000 0x1000>;
174 interrupts = <117 2 0 0>;
175 };
176 bman-portal@1c000 {
177 compatible = "fsl,bman-portal";
178 reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
179 interrupts = <119 2 0 0>;
180 };
181 bman-portal@20000 {
182 compatible = "fsl,bman-portal";
183 reg = <0x20000 0x4000>, <0x1008000 0x1000>;
184 interrupts = <121 2 0 0>;
185 };
186 bman-portal@24000 {
187 compatible = "fsl,bman-portal";
188 reg = <0x24000 0x4000>, <0x1009000 0x1000>;
189 interrupts = <123 2 0 0>;
190 };
191 bman-portal@28000 {
192 compatible = "fsl,bman-portal";
193 reg = <0x28000 0x4000>, <0x100a000 0x1000>;
194 interrupts = <125 2 0 0>;
195 };
196 bman-portal@2c000 {
197 compatible = "fsl,bman-portal";
198 reg = <0x2c000 0x4000>, <0x100b000 0x1000>;
199 interrupts = <127 2 0 0>;
200 };
201 bman-portal@30000 {
202 compatible = "fsl,bman-portal";
203 reg = <0x30000 0x4000>, <0x100c000 0x1000>;
204 interrupts = <129 2 0 0>;
205 };
206 bman-portal@34000 {
207 compatible = "fsl,bman-portal";
208 reg = <0x34000 0x4000>, <0x100d000 0x1000>;
209 interrupts = <131 2 0 0>;
210 };
211};
212
131&soc { 213&soc {
132 #address-cells = <1>; 214 #address-cells = <1>;
133 #size-cells = <1>; 215 #size-cells = <1>;
@@ -261,6 +343,11 @@
261/include/ "qoriq-duart-1.dtsi" 343/include/ "qoriq-duart-1.dtsi"
262/include/ "qoriq-sec5.3-0.dtsi" 344/include/ "qoriq-sec5.3-0.dtsi"
263 345
346/include/ "qoriq-bman1.dtsi"
347 bman: bman@31a000 {
348 interrupts = <16 2 1 29>;
349 };
350
264 L2: l2-cache-controller@c20000 { 351 L2: l2-cache-controller@c20000 {
265 compatible = "fsl,b4-l2-cache-controller"; 352 compatible = "fsl,b4-l2-cache-controller";
266 reg = <0xc20000 0x1000>; 353 reg = <0xc20000 0x1000>;
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
index 81437fdf1db4..7780f21430cb 100644
--- a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * P1023/P1017 Silicon/SoC Device Tree Source (post include) 2 * P1023/P1017 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35&bman_fbpr {
36 compatible = "fsl,bman-fbpr";
37 alloc-ranges = <0 0 0x10 0>;
38};
39
35&lbc { 40&lbc {
36 #address-cells = <2>; 41 #address-cells = <2>;
37 #size-cells = <1>; 42 #size-cells = <1>;
@@ -97,6 +102,28 @@
97 }; 102 };
98}; 103};
99 104
105&bportals {
106 #address-cells = <1>;
107 #size-cells = <1>;
108 compatible = "simple-bus";
109
110 bman-portal@0 {
111 compatible = "fsl,bman-portal";
112 reg = <0x0 0x4000>, <0x100000 0x1000>;
113 interrupts = <30 2 0 0>;
114 };
115 bman-portal@4000 {
116 compatible = "fsl,bman-portal";
117 reg = <0x4000 0x4000>, <0x101000 0x1000>;
118 interrupts = <32 2 0 0>;
119 };
120 bman-portal@8000 {
121 compatible = "fsl,bman-portal";
122 reg = <0x8000 0x4000>, <0x102000 0x1000>;
123 interrupts = <34 2 0 0>;
124 };
125};
126
100&soc { 127&soc {
101 #address-cells = <1>; 128 #address-cells = <1>;
102 #size-cells = <1>; 129 #size-cells = <1>;
@@ -221,6 +248,14 @@
221/include/ "pq3-mpic.dtsi" 248/include/ "pq3-mpic.dtsi"
222/include/ "pq3-mpic-timer-B.dtsi" 249/include/ "pq3-mpic-timer-B.dtsi"
223 250
251 bman: bman@8a000 {
252 compatible = "fsl,bman";
253 reg = <0x8a000 0x1000>;
254 interrupts = <16 2 0 0>;
255 fsl,bman-portals = <&bportals>;
256 memory-region = <&bman_fbpr>;
257 };
258
224 global-utilities@e0000 { 259 global-utilities@e0000 {
225 compatible = "fsl,p1023-guts"; 260 compatible = "fsl,p1023-guts";
226 reg = <0xe0000 0x1000>; 261 reg = <0xe0000 0x1000>;
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index efd74db4f9b0..f2feacfd9a25 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * P2041/P2040 Silicon/SoC Device Tree Source (post include) 2 * P2041/P2040 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35&bman_fbpr {
36 compatible = "fsl,bman-fbpr";
37 alloc-ranges = <0 0 0x10 0>;
38};
39
35&lbc { 40&lbc {
36 compatible = "fsl,p2041-elbc", "fsl,elbc", "simple-bus"; 41 compatible = "fsl,p2041-elbc", "fsl,elbc", "simple-bus";
37 interrupts = <25 2 0 0>; 42 interrupts = <25 2 0 0>;
@@ -216,6 +221,8 @@
216 }; 221 };
217}; 222};
218 223
224/include/ "qoriq-bman1-portals.dtsi"
225
219&soc { 226&soc {
220 #address-cells = <1>; 227 #address-cells = <1>;
221 #size-cells = <1>; 228 #size-cells = <1>;
@@ -407,4 +414,6 @@
407crypto: crypto@300000 { 414crypto: crypto@300000 {
408 fsl,iommu-parent = <&pamu1>; 415 fsl,iommu-parent = <&pamu1>;
409 }; 416 };
417
418/include/ "qoriq-bman1.dtsi"
410}; 419};
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
index d7425ef1ae41..d6fea37395ad 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * P3041 Silicon/SoC Device Tree Source (post include) 2 * P3041 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35&bman_fbpr {
36 compatible = "fsl,bman-fbpr";
37 alloc-ranges = <0 0 0x10 0>;
38};
39
35&lbc { 40&lbc {
36 compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus"; 41 compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus";
37 interrupts = <25 2 0 0>; 42 interrupts = <25 2 0 0>;
@@ -243,6 +248,8 @@
243 }; 248 };
244}; 249};
245 250
251/include/ "qoriq-bman1-portals.dtsi"
252
246&soc { 253&soc {
247 #address-cells = <1>; 254 #address-cells = <1>;
248 #size-cells = <1>; 255 #size-cells = <1>;
@@ -434,4 +441,6 @@
434crypto: crypto@300000 { 441crypto: crypto@300000 {
435 fsl,iommu-parent = <&pamu1>; 442 fsl,iommu-parent = <&pamu1>;
436 }; 443 };
444
445/include/ "qoriq-bman1.dtsi"
437}; 446};
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 7005a4a4cef0..89482c9b2301 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * P4080/P4040 Silicon/SoC Device Tree Source (post include) 2 * P4080/P4040 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35&bman_fbpr {
36 compatible = "fsl,bman-fbpr";
37 alloc-ranges = <0 0 0x10 0>;
38};
39
35&lbc { 40&lbc {
36 compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus"; 41 compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
37 interrupts = <25 2 0 0>; 42 interrupts = <25 2 0 0>;
@@ -243,6 +248,8 @@
243 248
244}; 249};
245 250
251/include/ "qoriq-bman1-portals.dtsi"
252
246&soc { 253&soc {
247 #address-cells = <1>; 254 #address-cells = <1>;
248 #size-cells = <1>; 255 #size-cells = <1>;
@@ -490,4 +497,6 @@
490crypto: crypto@300000 { 497crypto: crypto@300000 {
491 fsl,iommu-parent = <&pamu1>; 498 fsl,iommu-parent = <&pamu1>;
492 }; 499 };
500
501/include/ "qoriq-bman1.dtsi"
493}; 502};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
index 55834211bd28..6e04851e2fc9 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * P5020/5010 Silicon/SoC Device Tree Source (post include) 2 * P5020/5010 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35&bman_fbpr {
36 compatible = "fsl,bman-fbpr";
37 alloc-ranges = <0 0 0x10000 0>;
38};
39
35&lbc { 40&lbc {
36 compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus"; 41 compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus";
37 interrupts = <25 2 0 0>; 42 interrupts = <25 2 0 0>;
@@ -240,6 +245,8 @@
240 }; 245 };
241}; 246};
242 247
248/include/ "qoriq-bman1-portals.dtsi"
249
243&soc { 250&soc {
244 #address-cells = <1>; 251 #address-cells = <1>;
245 #size-cells = <1>; 252 #size-cells = <1>;
@@ -421,6 +428,8 @@
421 fsl,iommu-parent = <&pamu1>; 428 fsl,iommu-parent = <&pamu1>;
422 }; 429 };
423 430
431/include/ "qoriq-bman1.dtsi"
432
424/include/ "qoriq-raid1.0-0.dtsi" 433/include/ "qoriq-raid1.0-0.dtsi"
425 raideng@320000 { 434 raideng@320000 {
426 fsl,iommu-parent = <&pamu1>; 435 fsl,iommu-parent = <&pamu1>;
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
index 6e4cd6ce363c..5e44dfa1e1a5 100644
--- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * P5040 Silicon/SoC Device Tree Source (post include) 2 * P5040 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2012 Freescale Semiconductor Inc. 4 * Copyright 2012 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
32 * software, even if advised of the possibility of such damage. 32 * software, even if advised of the possibility of such damage.
33 */ 33 */
34 34
35&bman_fbpr {
36 compatible = "fsl,bman-fbpr";
37 alloc-ranges = <0 0 0x10000 0>;
38};
39
35&lbc { 40&lbc {
36 compatible = "fsl,p5040-elbc", "fsl,elbc", "simple-bus"; 41 compatible = "fsl,p5040-elbc", "fsl,elbc", "simple-bus";
37 interrupts = <25 2 0 0>; 42 interrupts = <25 2 0 0>;
@@ -195,6 +200,8 @@
195 }; 200 };
196}; 201};
197 202
203/include/ "qoriq-bman1-portals.dtsi"
204
198&soc { 205&soc {
199 #address-cells = <1>; 206 #address-cells = <1>;
200 #size-cells = <1>; 207 #size-cells = <1>;
@@ -399,4 +406,6 @@
399 crypto@300000 { 406 crypto@300000 {
400 fsl,iommu-parent = <&pamu4>; 407 fsl,iommu-parent = <&pamu4>;
401 }; 408 };
409
410/include/ "qoriq-bman1.dtsi"
402}; 411};
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 15ae462e758f..5cc01be5b152 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * T1040 Silicon/SoC Device Tree Source (post include) 2 * T1040 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2013 Freescale Semiconductor Inc. 4 * Copyright 2013 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35&bman_fbpr {
36 compatible = "fsl,bman-fbpr";
37 alloc-ranges = <0 0 0x10000 0>;
38};
39
35&ifc { 40&ifc {
36 #address-cells = <2>; 41 #address-cells = <2>;
37 #size-cells = <1>; 42 #size-cells = <1>;
@@ -218,6 +223,63 @@
218 }; 223 };
219}; 224};
220 225
226&bportals {
227 #address-cells = <0x1>;
228 #size-cells = <0x1>;
229 compatible = "simple-bus";
230
231 bman-portal@0 {
232 compatible = "fsl,bman-portal";
233 reg = <0x0 0x4000>, <0x1000000 0x1000>;
234 interrupts = <105 2 0 0>;
235 };
236 bman-portal@4000 {
237 compatible = "fsl,bman-portal";
238 reg = <0x4000 0x4000>, <0x1001000 0x1000>;
239 interrupts = <107 2 0 0>;
240 };
241 bman-portal@8000 {
242 compatible = "fsl,bman-portal";
243 reg = <0x8000 0x4000>, <0x1002000 0x1000>;
244 interrupts = <109 2 0 0>;
245 };
246 bman-portal@c000 {
247 compatible = "fsl,bman-portal";
248 reg = <0xc000 0x4000>, <0x1003000 0x1000>;
249 interrupts = <111 2 0 0>;
250 };
251 bman-portal@10000 {
252 compatible = "fsl,bman-portal";
253 reg = <0x10000 0x4000>, <0x1004000 0x1000>;
254 interrupts = <113 2 0 0>;
255 };
256 bman-portal@14000 {
257 compatible = "fsl,bman-portal";
258 reg = <0x14000 0x4000>, <0x1005000 0x1000>;
259 interrupts = <115 2 0 0>;
260 };
261 bman-portal@18000 {
262 compatible = "fsl,bman-portal";
263 reg = <0x18000 0x4000>, <0x1006000 0x1000>;
264 interrupts = <117 2 0 0>;
265 };
266 bman-portal@1c000 {
267 compatible = "fsl,bman-portal";
268 reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
269 interrupts = <119 2 0 0>;
270 };
271 bman-portal@20000 {
272 compatible = "fsl,bman-portal";
273 reg = <0x20000 0x4000>, <0x1008000 0x1000>;
274 interrupts = <121 2 0 0>;
275 };
276 bman-portal@24000 {
277 compatible = "fsl,bman-portal";
278 reg = <0x24000 0x4000>, <0x1009000 0x1000>;
279 interrupts = <123 2 0 0>;
280 };
281};
282
221&soc { 283&soc {
222 #address-cells = <1>; 284 #address-cells = <1>;
223 #size-cells = <1>; 285 #size-cells = <1>;
@@ -401,4 +463,5 @@
401 fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */ 463 fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */
402 }; 464 };
403/include/ "qoriq-sec5.0-0.dtsi" 465/include/ "qoriq-sec5.0-0.dtsi"
466/include/ "qoriq-bman1.dtsi"
404}; 467};
diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
index 1ce91e3485a9..86bdaf6cbd14 100644
--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * T2081 Silicon/SoC Device Tree Source (post include) 2 * T2081 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2013 Freescale Semiconductor Inc. 4 * Copyright 2013 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35&bman_fbpr {
36 compatible = "fsl,bman-fbpr";
37 alloc-ranges = <0 0 0x10000 0>;
38};
39
35&ifc { 40&ifc {
36 #address-cells = <2>; 41 #address-cells = <2>;
37 #size-cells = <1>; 42 #size-cells = <1>;
@@ -224,6 +229,103 @@
224 }; 229 };
225}; 230};
226 231
232&bportals {
233 #address-cells = <0x1>;
234 #size-cells = <0x1>;
235 compatible = "simple-bus";
236
237 bman-portal@0 {
238 compatible = "fsl,bman-portal";
239 reg = <0x0 0x4000>, <0x1000000 0x1000>;
240 interrupts = <105 2 0 0>;
241 };
242 bman-portal@4000 {
243 compatible = "fsl,bman-portal";
244 reg = <0x4000 0x4000>, <0x1001000 0x1000>;
245 interrupts = <107 2 0 0>;
246 };
247 bman-portal@8000 {
248 compatible = "fsl,bman-portal";
249 reg = <0x8000 0x4000>, <0x1002000 0x1000>;
250 interrupts = <109 2 0 0>;
251 };
252 bman-portal@c000 {
253 compatible = "fsl,bman-portal";
254 reg = <0xc000 0x4000>, <0x1003000 0x1000>;
255 interrupts = <111 2 0 0>;
256 };
257 bman-portal@10000 {
258 compatible = "fsl,bman-portal";
259 reg = <0x10000 0x4000>, <0x1004000 0x1000>;
260 interrupts = <113 2 0 0>;
261 };
262 bman-portal@14000 {
263 compatible = "fsl,bman-portal";
264 reg = <0x14000 0x4000>, <0x1005000 0x1000>;
265 interrupts = <115 2 0 0>;
266 };
267 bman-portal@18000 {
268 compatible = "fsl,bman-portal";
269 reg = <0x18000 0x4000>, <0x1006000 0x1000>;
270 interrupts = <117 2 0 0>;
271 };
272 bman-portal@1c000 {
273 compatible = "fsl,bman-portal";
274 reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
275 interrupts = <119 2 0 0>;
276 };
277 bman-portal@20000 {
278 compatible = "fsl,bman-portal";
279 reg = <0x20000 0x4000>, <0x1008000 0x1000>;
280 interrupts = <121 2 0 0>;
281 };
282 bman-portal@24000 {
283 compatible = "fsl,bman-portal";
284 reg = <0x24000 0x4000>, <0x1009000 0x1000>;
285 interrupts = <123 2 0 0>;
286 };
287 bman-portal@28000 {
288 compatible = "fsl,bman-portal";
289 reg = <0x28000 0x4000>, <0x100a000 0x1000>;
290 interrupts = <125 2 0 0>;
291 };
292 bman-portal@2c000 {
293 compatible = "fsl,bman-portal";
294 reg = <0x2c000 0x4000>, <0x100b000 0x1000>;
295 interrupts = <127 2 0 0>;
296 };
297 bman-portal@30000 {
298 compatible = "fsl,bman-portal";
299 reg = <0x30000 0x4000>, <0x100c000 0x1000>;
300 interrupts = <129 2 0 0>;
301 };
302 bman-portal@34000 {
303 compatible = "fsl,bman-portal";
304 reg = <0x34000 0x4000>, <0x100d000 0x1000>;
305 interrupts = <131 2 0 0>;
306 };
307 bman-portal@38000 {
308 compatible = "fsl,bman-portal";
309 reg = <0x38000 0x4000>, <0x100e000 0x1000>;
310 interrupts = <133 2 0 0>;
311 };
312 bman-portal@3c000 {
313 compatible = "fsl,bman-portal";
314 reg = <0x3c000 0x4000>, <0x100f000 0x1000>;
315 interrupts = <135 2 0 0>;
316 };
317 bman-portal@40000 {
318 compatible = "fsl,bman-portal";
319 reg = <0x40000 0x4000>, <0x1010000 0x1000>;
320 interrupts = <137 2 0 0>;
321 };
322 bman-portal@44000 {
323 compatible = "fsl,bman-portal";
324 reg = <0x44000 0x4000>, <0x1011000 0x1000>;
325 interrupts = <139 2 0 0>;
326 };
327};
328
227&soc { 329&soc {
228 #address-cells = <1>; 330 #address-cells = <1>;
229 #size-cells = <1>; 331 #size-cells = <1>;
@@ -400,6 +502,7 @@
400 phy_type = "utmi"; 502 phy_type = "utmi";
401 }; 503 };
402/include/ "qoriq-sec5.2-0.dtsi" 504/include/ "qoriq-sec5.2-0.dtsi"
505/include/ "qoriq-bman1.dtsi"
403 506
404 L2_1: l2-cache-controller@c20000 { 507 L2_1: l2-cache-controller@c20000 {
405 /* Cluster 0 L2 cache */ 508 /* Cluster 0 L2 cache */
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index 0e96fcabe812..4d4f25895d8c 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * T4240 Silicon/SoC Device Tree Source (post include) 2 * T4240 Silicon/SoC Device Tree Source (post include)
3 * 3 *
4 * Copyright 2012 Freescale Semiconductor Inc. 4 * Copyright 2012 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35&bman_fbpr {
36 compatible = "fsl,bman-fbpr";
37 alloc-ranges = <0 0 0x10000 0>;
38};
39
35&ifc { 40&ifc {
36 #address-cells = <2>; 41 #address-cells = <2>;
37 #size-cells = <1>; 42 #size-cells = <1>;
@@ -294,6 +299,263 @@
294 }; 299 };
295}; 300};
296 301
302&bportals {
303 #address-cells = <0x1>;
304 #size-cells = <0x1>;
305 compatible = "simple-bus";
306
307 bman-portal@0 {
308 compatible = "fsl,bman-portal";
309 reg = <0x0 0x4000>, <0x1000000 0x1000>;
310 interrupts = <105 2 0 0>;
311 };
312 bman-portal@4000 {
313 compatible = "fsl,bman-portal";
314 reg = <0x4000 0x4000>, <0x1001000 0x1000>;
315 interrupts = <107 2 0 0>;
316 };
317 bman-portal@8000 {
318 compatible = "fsl,bman-portal";
319 reg = <0x8000 0x4000>, <0x1002000 0x1000>;
320 interrupts = <109 2 0 0>;
321 };
322 bman-portal@c000 {
323 compatible = "fsl,bman-portal";
324 reg = <0xc000 0x4000>, <0x1003000 0x1000>;
325 interrupts = <111 2 0 0>;
326 };
327 bman-portal@10000 {
328 compatible = "fsl,bman-portal";
329 reg = <0x10000 0x4000>, <0x1004000 0x1000>;
330 interrupts = <113 2 0 0>;
331 };
332 bman-portal@14000 {
333 compatible = "fsl,bman-portal";
334 reg = <0x14000 0x4000>, <0x1005000 0x1000>;
335 interrupts = <115 2 0 0>;
336 };
337 bman-portal@18000 {
338 compatible = "fsl,bman-portal";
339 reg = <0x18000 0x4000>, <0x1006000 0x1000>;
340 interrupts = <117 2 0 0>;
341 };
342 bman-portal@1c000 {
343 compatible = "fsl,bman-portal";
344 reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
345 interrupts = <119 2 0 0>;
346 };
347 bman-portal@20000 {
348 compatible = "fsl,bman-portal";
349 reg = <0x20000 0x4000>, <0x1008000 0x1000>;
350 interrupts = <121 2 0 0>;
351 };
352 bman-portal@24000 {
353 compatible = "fsl,bman-portal";
354 reg = <0x24000 0x4000>, <0x1009000 0x1000>;
355 interrupts = <123 2 0 0>;
356 };
357 bman-portal@28000 {
358 compatible = "fsl,bman-portal";
359 reg = <0x28000 0x4000>, <0x100a000 0x1000>;
360 interrupts = <125 2 0 0>;
361 };
362 bman-portal@2c000 {
363 compatible = "fsl,bman-portal";
364 reg = <0x2c000 0x4000>, <0x100b000 0x1000>;
365 interrupts = <127 2 0 0>;
366 };
367 bman-portal@30000 {
368 compatible = "fsl,bman-portal";
369 reg = <0x30000 0x4000>, <0x100c000 0x1000>;
370 interrupts = <129 2 0 0>;
371 };
372 bman-portal@34000 {
373 compatible = "fsl,bman-portal";
374 reg = <0x34000 0x4000>, <0x100d000 0x1000>;
375 interrupts = <131 2 0 0>;
376 };
377 bman-portal@38000 {
378 compatible = "fsl,bman-portal";
379 reg = <0x38000 0x4000>, <0x100e000 0x1000>;
380 interrupts = <133 2 0 0>;
381 };
382 bman-portal@3c000 {
383 compatible = "fsl,bman-portal";
384 reg = <0x3c000 0x4000>, <0x100f000 0x1000>;
385 interrupts = <135 2 0 0>;
386 };
387 bman-portal@40000 {
388 compatible = "fsl,bman-portal";
389 reg = <0x40000 0x4000>, <0x1010000 0x1000>;
390 interrupts = <137 2 0 0>;
391 };
392 bman-portal@44000 {
393 compatible = "fsl,bman-portal";
394 reg = <0x44000 0x4000>, <0x1011000 0x1000>;
395 interrupts = <139 2 0 0>;
396 };
397 bman-portal@48000 {
398 compatible = "fsl,bman-portal";
399 reg = <0x48000 0x4000>, <0x1012000 0x1000>;
400 interrupts = <141 2 0 0>;
401 };
402 bman-portal@4c000 {
403 compatible = "fsl,bman-portal";
404 reg = <0x4c000 0x4000>, <0x1013000 0x1000>;
405 interrupts = <143 2 0 0>;
406 };
407 bman-portal@50000 {
408 compatible = "fsl,bman-portal";
409 reg = <0x50000 0x4000>, <0x1014000 0x1000>;
410 interrupts = <145 2 0 0>;
411 };
412 bman-portal@54000 {
413 compatible = "fsl,bman-portal";
414 reg = <0x54000 0x4000>, <0x1015000 0x1000>;
415 interrupts = <147 2 0 0>;
416 };
417 bman-portal@58000 {
418 compatible = "fsl,bman-portal";
419 reg = <0x58000 0x4000>, <0x1016000 0x1000>;
420 interrupts = <149 2 0 0>;
421 };
422 bman-portal@5c000 {
423 compatible = "fsl,bman-portal";
424 reg = <0x5c000 0x4000>, <0x1017000 0x1000>;
425 interrupts = <151 2 0 0>;
426 };
427 bman-portal@60000 {
428 compatible = "fsl,bman-portal";
429 reg = <0x60000 0x4000>, <0x1018000 0x1000>;
430 interrupts = <153 2 0 0>;
431 };
432 bman-portal@64000 {
433 compatible = "fsl,bman-portal";
434 reg = <0x64000 0x4000>, <0x1019000 0x1000>;
435 interrupts = <155 2 0 0>;
436 };
437 bman-portal@68000 {
438 compatible = "fsl,bman-portal";
439 reg = <0x68000 0x4000>, <0x101a000 0x1000>;
440 interrupts = <157 2 0 0>;
441 };
442 bman-portal@6c000 {
443 compatible = "fsl,bman-portal";
444 reg = <0x6c000 0x4000>, <0x101b000 0x1000>;
445 interrupts = <159 2 0 0>;
446 };
447 bman-portal@70000 {
448 compatible = "fsl,bman-portal";
449 reg = <0x70000 0x4000>, <0x101c000 0x1000>;
450 interrupts = <161 2 0 0>;
451 };
452 bman-portal@74000 {
453 compatible = "fsl,bman-portal";
454 reg = <0x74000 0x4000>, <0x101d000 0x1000>;
455 interrupts = <163 2 0 0>;
456 };
457 bman-portal@78000 {
458 compatible = "fsl,bman-portal";
459 reg = <0x78000 0x4000>, <0x101e000 0x1000>;
460 interrupts = <165 2 0 0>;
461 };
462 bman-portal@7c000 {
463 compatible = "fsl,bman-portal";
464 reg = <0x7c000 0x4000>, <0x101f000 0x1000>;
465 interrupts = <167 2 0 0>;
466 };
467 bman-portal@80000 {
468 compatible = "fsl,bman-portal";
469 reg = <0x80000 0x4000>, <0x1020000 0x1000>;
470 interrupts = <169 2 0 0>;
471 };
472 bman-portal@84000 {
473 compatible = "fsl,bman-portal";
474 reg = <0x84000 0x4000>, <0x1021000 0x1000>;
475 interrupts = <171 2 0 0>;
476 };
477 bman-portal@88000 {
478 compatible = "fsl,bman-portal";
479 reg = <0x88000 0x4000>, <0x1022000 0x1000>;
480 interrupts = <173 2 0 0>;
481 };
482 bman-portal@8c000 {
483 compatible = "fsl,bman-portal";
484 reg = <0x8c000 0x4000>, <0x1023000 0x1000>;
485 interrupts = <175 2 0 0>;
486 };
487 bman-portal@90000 {
488 compatible = "fsl,bman-portal";
489 reg = <0x90000 0x4000>, <0x1024000 0x1000>;
490 interrupts = <385 2 0 0>;
491 };
492 bman-portal@94000 {
493 compatible = "fsl,bman-portal";
494 reg = <0x94000 0x4000>, <0x1025000 0x1000>;
495 interrupts = <387 2 0 0>;
496 };
497 bman-portal@98000 {
498 compatible = "fsl,bman-portal";
499 reg = <0x98000 0x4000>, <0x1026000 0x1000>;
500 interrupts = <389 2 0 0>;
501 };
502 bman-portal@9c000 {
503 compatible = "fsl,bman-portal";
504 reg = <0x9c000 0x4000>, <0x1027000 0x1000>;
505 interrupts = <391 2 0 0>;
506 };
507 bman-portal@a0000 {
508 compatible = "fsl,bman-portal";
509 reg = <0xa0000 0x4000>, <0x1028000 0x1000>;
510 interrupts = <393 2 0 0>;
511 };
512 bman-portal@a4000 {
513 compatible = "fsl,bman-portal";
514 reg = <0xa4000 0x4000>, <0x1029000 0x1000>;
515 interrupts = <395 2 0 0>;
516 };
517 bman-portal@a8000 {
518 compatible = "fsl,bman-portal";
519 reg = <0xa8000 0x4000>, <0x102a000 0x1000>;
520 interrupts = <397 2 0 0>;
521 };
522 bman-portal@ac000 {
523 compatible = "fsl,bman-portal";
524 reg = <0xac000 0x4000>, <0x102b000 0x1000>;
525 interrupts = <399 2 0 0>;
526 };
527 bman-portal@b0000 {
528 compatible = "fsl,bman-portal";
529 reg = <0xb0000 0x4000>, <0x102c000 0x1000>;
530 interrupts = <401 2 0 0>;
531 };
532 bman-portal@b4000 {
533 compatible = "fsl,bman-portal";
534 reg = <0xb4000 0x4000>, <0x102d000 0x1000>;
535 interrupts = <403 2 0 0>;
536 };
537 bman-portal@b8000 {
538 compatible = "fsl,bman-portal";
539 reg = <0xb8000 0x4000>, <0x102e000 0x1000>;
540 interrupts = <405 2 0 0>;
541 };
542 bman-portal@bc000 {
543 compatible = "fsl,bman-portal";
544 reg = <0xbc000 0x4000>, <0x102f000 0x1000>;
545 interrupts = <407 2 0 0>;
546 };
547 bman-portal@c0000 {
548 compatible = "fsl,bman-portal";
549 reg = <0xc0000 0x4000>, <0x1030000 0x1000>;
550 interrupts = <409 2 0 0>;
551 };
552 bman-portal@c4000 {
553 compatible = "fsl,bman-portal";
554 reg = <0xc4000 0x4000>, <0x1031000 0x1000>;
555 interrupts = <411 2 0 0>;
556 };
557};
558
297&soc { 559&soc {
298 #address-cells = <1>; 560 #address-cells = <1>;
299 #size-cells = <1>; 561 #size-cells = <1>;
@@ -486,6 +748,7 @@
486/include/ "qoriq-sata2-0.dtsi" 748/include/ "qoriq-sata2-0.dtsi"
487/include/ "qoriq-sata2-1.dtsi" 749/include/ "qoriq-sata2-1.dtsi"
488/include/ "qoriq-sec5.0-0.dtsi" 750/include/ "qoriq-sec5.0-0.dtsi"
751/include/ "qoriq-bman1.dtsi"
489 752
490 L2_1: l2-cache-controller@c20000 { 753 L2_1: l2-cache-controller@c20000 {
491 compatible = "fsl,t4240-l2-cache-controller"; 754 compatible = "fsl,t4240-l2-cache-controller";
diff --git a/arch/powerpc/boot/dts/kmcoge4.dts b/arch/powerpc/boot/dts/kmcoge4.dts
index 89b4119f3b19..97e6d11d1e6d 100644
--- a/arch/powerpc/boot/dts/kmcoge4.dts
+++ b/arch/powerpc/boot/dts/kmcoge4.dts
@@ -25,10 +25,25 @@
25 device_type = "memory"; 25 device_type = "memory";
26 }; 26 };
27 27
28 reserved-memory {
29 #address-cells = <2>;
30 #size-cells = <2>;
31 ranges;
32
33 bman_fbpr: bman-fbpr {
34 size = <0 0x1000000>;
35 alignment = <0 0x1000000>;
36 };
37 };
38
28 dcsr: dcsr@f00000000 { 39 dcsr: dcsr@f00000000 {
29 ranges = <0x00000000 0xf 0x00000000 0x01008000>; 40 ranges = <0x00000000 0xf 0x00000000 0x01008000>;
30 }; 41 };
31 42
43 bportals: bman-portals@ff4000000 {
44 ranges = <0x0 0xf 0xf4000000 0x200000>;
45 };
46
32 soc: soc@ffe000000 { 47 soc: soc@ffe000000 {
33 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 48 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
34 reg = <0xf 0xfe000000 0 0x00001000>; 49 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/oca4080.dts b/arch/powerpc/boot/dts/oca4080.dts
index 3d4c751d1608..eb76caae11d9 100644
--- a/arch/powerpc/boot/dts/oca4080.dts
+++ b/arch/powerpc/boot/dts/oca4080.dts
@@ -49,10 +49,25 @@
49 device_type = "memory"; 49 device_type = "memory";
50 }; 50 };
51 51
52 reserved-memory {
53 #address-cells = <2>;
54 #size-cells = <2>;
55 ranges;
56
57 bman_fbpr: bman-fbpr {
58 size = <0 0x1000000>;
59 alignment = <0 0x1000000>;
60 };
61 };
62
52 dcsr: dcsr@f00000000 { 63 dcsr: dcsr@f00000000 {
53 ranges = <0x00000000 0xf 0x00000000 0x01008000>; 64 ranges = <0x00000000 0xf 0x00000000 0x01008000>;
54 }; 65 };
55 66
67 bportals: bman-portals@ff4000000 {
68 ranges = <0x0 0xf 0xf4000000 0x200000>;
69 };
70
56 soc: soc@ffe000000 { 71 soc: soc@ffe000000 {
57 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 72 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
58 reg = <0xf 0xfe000000 0 0x00001000>; 73 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p1023rdb.dts b/arch/powerpc/boot/dts/p1023rdb.dts
index 0a06a88ddbd5..9236e3742a23 100644
--- a/arch/powerpc/boot/dts/p1023rdb.dts
+++ b/arch/powerpc/boot/dts/p1023rdb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * P1023 RDB Device Tree Source 2 * P1023 RDB Device Tree Source
3 * 3 *
4 * Copyright 2013 Freescale Semiconductor Inc. 4 * Copyright 2013 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Author: Chunhe Lan <Chunhe.Lan@freescale.com> 6 * Author: Chunhe Lan <Chunhe.Lan@freescale.com>
7 * 7 *
@@ -47,6 +47,21 @@
47 device_type = "memory"; 47 device_type = "memory";
48 }; 48 };
49 49
50 reserved-memory {
51 #address-cells = <2>;
52 #size-cells = <2>;
53 ranges;
54
55 bman_fbpr: bman-fbpr {
56 size = <0 0x1000000>;
57 alignment = <0 0x1000000>;
58 };
59 };
60
61 bportals: bman-portals@ff200000 {
62 ranges = <0x0 0xf 0xff200000 0x200000>;
63 };
64
50 soc: soc@ff600000 { 65 soc: soc@ff600000 {
51 ranges = <0x0 0x0 0xff600000 0x200000>; 66 ranges = <0x0 0x0 0xff600000 0x200000>;
52 67
@@ -228,7 +243,6 @@
228 0x0 0x100000>; 243 0x0 0x100000>;
229 }; 244 };
230 }; 245 };
231
232}; 246};
233 247
234/include/ "fsl/p1023si-post.dtsi" 248/include/ "fsl/p1023si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2041rdb.dts b/arch/powerpc/boot/dts/p2041rdb.dts
index d97ad74c7279..c1e69dc7188e 100644
--- a/arch/powerpc/boot/dts/p2041rdb.dts
+++ b/arch/powerpc/boot/dts/p2041rdb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * P2041RDB Device Tree Source 2 * P2041RDB Device Tree Source
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
45 device_type = "memory"; 45 device_type = "memory";
46 }; 46 };
47 47
48 reserved-memory {
49 #address-cells = <2>;
50 #size-cells = <2>;
51 ranges;
52
53 bman_fbpr: bman-fbpr {
54 size = <0 0x1000000>;
55 alignment = <0 0x1000000>;
56 };
57 };
58
48 dcsr: dcsr@f00000000 { 59 dcsr: dcsr@f00000000 {
49 ranges = <0x00000000 0xf 0x00000000 0x01008000>; 60 ranges = <0x00000000 0xf 0x00000000 0x01008000>;
50 }; 61 };
51 62
63 bportals: bman-portals@ff4000000 {
64 ranges = <0x0 0xf 0xf4000000 0x200000>;
65 };
66
52 soc: soc@ffe000000 { 67 soc: soc@ffe000000 {
53 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 68 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
54 reg = <0xf 0xfe000000 0 0x00001000>; 69 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index 394ea9c943c9..2192fe94866d 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * P3041DS Device Tree Source 2 * P3041DS Device Tree Source
3 * 3 *
4 * Copyright 2010-2011 Freescale Semiconductor Inc. 4 * Copyright 2010 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
45 device_type = "memory"; 45 device_type = "memory";
46 }; 46 };
47 47
48 reserved-memory {
49 #address-cells = <2>;
50 #size-cells = <2>;
51 ranges;
52
53 bman_fbpr: bman-fbpr {
54 size = <0 0x1000000>;
55 alignment = <0 0x1000000>;
56 };
57 };
58
48 dcsr: dcsr@f00000000 { 59 dcsr: dcsr@f00000000 {
49 ranges = <0x00000000 0xf 0x00000000 0x01008000>; 60 ranges = <0x00000000 0xf 0x00000000 0x01008000>;
50 }; 61 };
51 62
63 bportals: bman-portals@ff4000000 {
64 ranges = <0x0 0xf 0xf4000000 0x200000>;
65 };
66
52 soc: soc@ffe000000 { 67 soc: soc@ffe000000 {
53 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 68 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
54 reg = <0xf 0xfe000000 0 0x00001000>; 69 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index 1cf6148b8b05..fad441654642 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * P4080DS Device Tree Source 2 * P4080DS Device Tree Source
3 * 3 *
4 * Copyright 2009-2011 Freescale Semiconductor Inc. 4 * Copyright 2009 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
45 device_type = "memory"; 45 device_type = "memory";
46 }; 46 };
47 47
48 reserved-memory {
49 #address-cells = <2>;
50 #size-cells = <2>;
51 ranges;
52
53 bman_fbpr: bman-fbpr {
54 size = <0 0x1000000>;
55 alignment = <0 0x1000000>;
56 };
57 };
58
48 dcsr: dcsr@f00000000 { 59 dcsr: dcsr@f00000000 {
49 ranges = <0x00000000 0xf 0x00000000 0x01008000>; 60 ranges = <0x00000000 0xf 0x00000000 0x01008000>;
50 }; 61 };
51 62
63 bportals: bman-portals@ff4000000 {
64 ranges = <0x0 0xf 0xf4000000 0x200000>;
65 };
66
52 soc: soc@ffe000000 { 67 soc: soc@ffe000000 {
53 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 68 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
54 reg = <0xf 0xfe000000 0 0x00001000>; 69 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
index b7f3057cd894..7382636dc560 100644
--- a/arch/powerpc/boot/dts/p5020ds.dts
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * P5020DS Device Tree Source 2 * P5020DS Device Tree Source
3 * 3 *
4 * Copyright 2010-2011 Freescale Semiconductor Inc. 4 * Copyright 2010 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
45 device_type = "memory"; 45 device_type = "memory";
46 }; 46 };
47 47
48 reserved-memory {
49 #address-cells = <2>;
50 #size-cells = <2>;
51 ranges;
52
53 bman_fbpr: bman-fbpr {
54 size = <0 0x1000000>;
55 alignment = <0 0x1000000>;
56 };
57 };
58
48 dcsr: dcsr@f00000000 { 59 dcsr: dcsr@f00000000 {
49 ranges = <0x00000000 0xf 0x00000000 0x01008000>; 60 ranges = <0x00000000 0xf 0x00000000 0x01008000>;
50 }; 61 };
51 62
63 bportals: bman-portals@ff4000000 {
64 ranges = <0x0 0xf 0xf4000000 0x200000>;
65 };
66
52 soc: soc@ffe000000 { 67 soc: soc@ffe000000 {
53 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 68 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
54 reg = <0xf 0xfe000000 0 0x00001000>; 69 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p5040ds.dts b/arch/powerpc/boot/dts/p5040ds.dts
index 7e04bf487c04..35dabf5b6098 100644
--- a/arch/powerpc/boot/dts/p5040ds.dts
+++ b/arch/powerpc/boot/dts/p5040ds.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * P5040DS Device Tree Source 2 * P5040DS Device Tree Source
3 * 3 *
4 * Copyright 2012 Freescale Semiconductor Inc. 4 * Copyright 2012 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
45 device_type = "memory"; 45 device_type = "memory";
46 }; 46 };
47 47
48 reserved-memory {
49 #address-cells = <2>;
50 #size-cells = <2>;
51 ranges;
52
53 bman_fbpr: bman-fbpr {
54 size = <0 0x1000000>;
55 alignment = <0 0x1000000>;
56 };
57 };
58
48 dcsr: dcsr@f00000000 { 59 dcsr: dcsr@f00000000 {
49 ranges = <0x00000000 0xf 0x00000000 0x01008000>; 60 ranges = <0x00000000 0xf 0x00000000 0x01008000>;
50 }; 61 };
51 62
63 bportals: bman-portals@ff4000000 {
64 ranges = <0x0 0xf 0xf4000000 0x200000>;
65 };
66
52 soc: soc@ffe000000 { 67 soc: soc@ffe000000 {
53 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 68 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
54 reg = <0xf 0xfe000000 0 0x00001000>; 69 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t104xqds.dtsi b/arch/powerpc/boot/dts/t104xqds.dtsi
index 234f4b596c5b..f7e9bfbeefc7 100644
--- a/arch/powerpc/boot/dts/t104xqds.dtsi
+++ b/arch/powerpc/boot/dts/t104xqds.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * T104xQDS Device Tree Source 2 * T104xQDS Device Tree Source
3 * 3 *
4 * Copyright 2013 Freescale Semiconductor Inc. 4 * Copyright 2013 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -38,6 +38,17 @@
38 #size-cells = <2>; 38 #size-cells = <2>;
39 interrupt-parent = <&mpic>; 39 interrupt-parent = <&mpic>;
40 40
41 reserved-memory {
42 #address-cells = <2>;
43 #size-cells = <2>;
44 ranges;
45
46 bman_fbpr: bman-fbpr {
47 size = <0 0x1000000>;
48 alignment = <0 0x1000000>;
49 };
50 };
51
41 ifc: localbus@ffe124000 { 52 ifc: localbus@ffe124000 {
42 reg = <0xf 0xfe124000 0 0x2000>; 53 reg = <0xf 0xfe124000 0 0x2000>;
43 ranges = <0 0 0xf 0xe8000000 0x08000000 54 ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -77,6 +88,10 @@
77 ranges = <0x00000000 0xf 0x00000000 0x01072000>; 88 ranges = <0x00000000 0xf 0x00000000 0x01072000>;
78 }; 89 };
79 90
91 bportals: bman-portals@ff4000000 {
92 ranges = <0x0 0xf 0xf4000000 0x2000000>;
93 };
94
80 soc: soc@ffe000000 { 95 soc: soc@ffe000000 {
81 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 96 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
82 reg = <0xf 0xfe000000 0 0x00001000>; 97 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t104xrdb.dtsi b/arch/powerpc/boot/dts/t104xrdb.dtsi
index 187add885cae..76e07a3f2ca8 100644
--- a/arch/powerpc/boot/dts/t104xrdb.dtsi
+++ b/arch/powerpc/boot/dts/t104xrdb.dtsi
@@ -33,6 +33,16 @@
33 */ 33 */
34 34
35/ { 35/ {
36 reserved-memory {
37 #address-cells = <2>;
38 #size-cells = <2>;
39 ranges;
40
41 bman_fbpr: bman-fbpr {
42 size = <0 0x1000000>;
43 alignment = <0 0x1000000>;
44 };
45 };
36 46
37 ifc: localbus@ffe124000 { 47 ifc: localbus@ffe124000 {
38 reg = <0xf 0xfe124000 0 0x2000>; 48 reg = <0xf 0xfe124000 0 0x2000>;
@@ -69,6 +79,10 @@
69 ranges = <0x00000000 0xf 0x00000000 0x01072000>; 79 ranges = <0x00000000 0xf 0x00000000 0x01072000>;
70 }; 80 };
71 81
82 bportals: bman-portals@ff4000000 {
83 ranges = <0x0 0xf 0xf4000000 0x2000000>;
84 };
85
72 soc: soc@ffe000000 { 86 soc: soc@ffe000000 {
73 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 87 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
74 reg = <0xf 0xfe000000 0 0x00001000>; 88 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t208xqds.dtsi b/arch/powerpc/boot/dts/t208xqds.dtsi
index 59061834d54e..c42e07f4f648 100644
--- a/arch/powerpc/boot/dts/t208xqds.dtsi
+++ b/arch/powerpc/boot/dts/t208xqds.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * T2080/T2081 QDS Device Tree Source 2 * T2080/T2081 QDS Device Tree Source
3 * 3 *
4 * Copyright 2013 Freescale Semiconductor Inc. 4 * Copyright 2013 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -39,6 +39,17 @@
39 #size-cells = <2>; 39 #size-cells = <2>;
40 interrupt-parent = <&mpic>; 40 interrupt-parent = <&mpic>;
41 41
42 reserved-memory {
43 #address-cells = <2>;
44 #size-cells = <2>;
45 ranges;
46
47 bman_fbpr: bman-fbpr {
48 size = <0 0x1000000>;
49 alignment = <0 0x1000000>;
50 };
51 };
52
42 ifc: localbus@ffe124000 { 53 ifc: localbus@ffe124000 {
43 reg = <0xf 0xfe124000 0 0x2000>; 54 reg = <0xf 0xfe124000 0 0x2000>;
44 ranges = <0 0 0xf 0xe8000000 0x08000000 55 ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -78,6 +89,10 @@
78 ranges = <0x00000000 0xf 0x00000000 0x01072000>; 89 ranges = <0x00000000 0xf 0x00000000 0x01072000>;
79 }; 90 };
80 91
92 bportals: bman-portals@ff4000000 {
93 ranges = <0x0 0xf 0xf4000000 0x2000000>;
94 };
95
81 soc: soc@ffe000000 { 96 soc: soc@ffe000000 {
82 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 97 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
83 reg = <0xf 0xfe000000 0 0x00001000>; 98 reg = <0xf 0xfe000000 0 0x00001000>;
@@ -137,7 +152,7 @@
137 rtc@68 { 152 rtc@68 {
138 compatible = "dallas,ds3232"; 153 compatible = "dallas,ds3232";
139 reg = <0x68>; 154 reg = <0x68>;
140 interrupts = <0x1 0x1 0 0>; 155 interrupts = <0xb 0x1 0 0>;
141 }; 156 };
142 }; 157 };
143 158
diff --git a/arch/powerpc/boot/dts/t208xrdb.dtsi b/arch/powerpc/boot/dts/t208xrdb.dtsi
index 1481e192e783..e1463b165d0e 100644
--- a/arch/powerpc/boot/dts/t208xrdb.dtsi
+++ b/arch/powerpc/boot/dts/t208xrdb.dtsi
@@ -39,6 +39,17 @@
39 #size-cells = <2>; 39 #size-cells = <2>;
40 interrupt-parent = <&mpic>; 40 interrupt-parent = <&mpic>;
41 41
42 reserved-memory {
43 #address-cells = <2>;
44 #size-cells = <2>;
45 ranges;
46
47 bman_fbpr: bman-fbpr {
48 size = <0 0x1000000>;
49 alignment = <0 0x1000000>;
50 };
51 };
52
42 ifc: localbus@ffe124000 { 53 ifc: localbus@ffe124000 {
43 reg = <0xf 0xfe124000 0 0x2000>; 54 reg = <0xf 0xfe124000 0 0x2000>;
44 ranges = <0 0 0xf 0xe8000000 0x08000000 55 ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -79,6 +90,10 @@
79 ranges = <0x00000000 0xf 0x00000000 0x01072000>; 90 ranges = <0x00000000 0xf 0x00000000 0x01072000>;
80 }; 91 };
81 92
93 bportals: bman-portals@ff4000000 {
94 ranges = <0x0 0xf 0xf4000000 0x2000000>;
95 };
96
82 soc: soc@ffe000000 { 97 soc: soc@ffe000000 {
83 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 98 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
84 reg = <0xf 0xfe000000 0 0x00001000>; 99 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t4240qds.dts b/arch/powerpc/boot/dts/t4240qds.dts
index 97683f6a2936..6df77766410b 100644
--- a/arch/powerpc/boot/dts/t4240qds.dts
+++ b/arch/powerpc/boot/dts/t4240qds.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * T4240QDS Device Tree Source 2 * T4240QDS Device Tree Source
3 * 3 *
4 * Copyright 2012 Freescale Semiconductor Inc. 4 * Copyright 2012 - 2014 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -100,10 +100,25 @@
100 device_type = "memory"; 100 device_type = "memory";
101 }; 101 };
102 102
103 reserved-memory {
104 #address-cells = <2>;
105 #size-cells = <2>;
106 ranges;
107
108 bman_fbpr: bman-fbpr {
109 size = <0 0x1000000>;
110 alignment = <0 0x1000000>;
111 };
112 };
113
103 dcsr: dcsr@f00000000 { 114 dcsr: dcsr@f00000000 {
104 ranges = <0x00000000 0xf 0x00000000 0x01072000>; 115 ranges = <0x00000000 0xf 0x00000000 0x01072000>;
105 }; 116 };
106 117
118 bportals: bman-portals@ff4000000 {
119 ranges = <0x0 0xf 0xf4000000 0x2000000>;
120 };
121
107 soc: soc@ffe000000 { 122 soc: soc@ffe000000 {
108 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 123 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
109 reg = <0xf 0xfe000000 0 0x00001000>; 124 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t4240rdb.dts b/arch/powerpc/boot/dts/t4240rdb.dts
index 53761d4e8c51..46049cf37f02 100644
--- a/arch/powerpc/boot/dts/t4240rdb.dts
+++ b/arch/powerpc/boot/dts/t4240rdb.dts
@@ -69,10 +69,25 @@
69 device_type = "memory"; 69 device_type = "memory";
70 }; 70 };
71 71
72 reserved-memory {
73 #address-cells = <2>;
74 #size-cells = <2>;
75 ranges;
76
77 bman_fbpr: bman-fbpr {
78 size = <0 0x1000000>;
79 alignment = <0 0x1000000>;
80 };
81 };
82
72 dcsr: dcsr@f00000000 { 83 dcsr: dcsr@f00000000 {
73 ranges = <0x00000000 0xf 0x00000000 0x01072000>; 84 ranges = <0x00000000 0xf 0x00000000 0x01072000>;
74 }; 85 };
75 86
87 bportals: bman-portals@ff4000000 {
88 ranges = <0x0 0xf 0xf4000000 0x2000000>;
89 };
90
76 soc: soc@ffe000000 { 91 soc: soc@ffe000000 {
77 ranges = <0x00000000 0xf 0xfe000000 0x1000000>; 92 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
78 reg = <0xf 0xfe000000 0 0x00001000>; 93 reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c
index bb8b9b3505ee..535e8fd8900d 100644
--- a/arch/powerpc/boot/libfdt-wrapper.c
+++ b/arch/powerpc/boot/libfdt-wrapper.c
@@ -44,12 +44,12 @@
44 44
45#define offset_devp(off) \ 45#define offset_devp(off) \
46 ({ \ 46 ({ \
47 int _offset = (off); \ 47 unsigned long _offset = (off); \
48 check_err(_offset) ? NULL : (void *)(_offset+1); \ 48 check_err(_offset) ? NULL : (void *)(_offset+1); \
49 }) 49 })
50 50
51#define devp_offset_find(devp) (((int)(devp))-1) 51#define devp_offset_find(devp) (((unsigned long)(devp))-1)
52#define devp_offset(devp) (devp ? ((int)(devp))-1 : 0) 52#define devp_offset(devp) (devp ? ((unsigned long)(devp))-1 : 0)
53 53
54static void *fdt; 54static void *fdt;
55static void *buf; /* = NULL */ 55static void *buf; /* = NULL */
diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
index c89fdb1b80e1..8dcd744e5728 100644
--- a/arch/powerpc/boot/libfdt_env.h
+++ b/arch/powerpc/boot/libfdt_env.h
@@ -4,15 +4,17 @@
4#include <types.h> 4#include <types.h>
5#include <string.h> 5#include <string.h>
6 6
7#include "of.h"
8
7typedef u32 uint32_t; 9typedef u32 uint32_t;
8typedef u64 uint64_t; 10typedef u64 uint64_t;
9typedef unsigned long uintptr_t; 11typedef unsigned long uintptr_t;
10 12
11#define fdt16_to_cpu(x) (x) 13#define fdt16_to_cpu(x) be16_to_cpu(x)
12#define cpu_to_fdt16(x) (x) 14#define cpu_to_fdt16(x) cpu_to_be16(x)
13#define fdt32_to_cpu(x) (x) 15#define fdt32_to_cpu(x) be32_to_cpu(x)
14#define cpu_to_fdt32(x) (x) 16#define cpu_to_fdt32(x) cpu_to_be32(x)
15#define fdt64_to_cpu(x) (x) 17#define fdt64_to_cpu(x) be64_to_cpu(x)
16#define cpu_to_fdt64(x) (x) 18#define cpu_to_fdt64(x) cpu_to_be64(x)
17 19
18#endif /* _ARCH_POWERPC_BOOT_LIBFDT_ENV_H */ 20#endif /* _ARCH_POWERPC_BOOT_LIBFDT_ENV_H */
diff --git a/arch/powerpc/boot/of.h b/arch/powerpc/boot/of.h
index c8c1750aba0c..5603320dce07 100644
--- a/arch/powerpc/boot/of.h
+++ b/arch/powerpc/boot/of.h
@@ -24,11 +24,19 @@ void of_console_init(void);
24typedef u32 __be32; 24typedef u32 __be32;
25 25
26#ifdef __LITTLE_ENDIAN__ 26#ifdef __LITTLE_ENDIAN__
27#define cpu_to_be16(x) swab16(x)
28#define be16_to_cpu(x) swab16(x)
27#define cpu_to_be32(x) swab32(x) 29#define cpu_to_be32(x) swab32(x)
28#define be32_to_cpu(x) swab32(x) 30#define be32_to_cpu(x) swab32(x)
31#define cpu_to_be64(x) swab64(x)
32#define be64_to_cpu(x) swab64(x)
29#else 33#else
34#define cpu_to_be16(x) (x)
35#define be16_to_cpu(x) (x)
30#define cpu_to_be32(x) (x) 36#define cpu_to_be32(x) (x)
31#define be32_to_cpu(x) (x) 37#define be32_to_cpu(x) (x)
38#define cpu_to_be64(x) (x)
39#define be64_to_cpu(x) (x)
32#endif 40#endif
33 41
34#define PROM_ERROR (-1u) 42#define PROM_ERROR (-1u)
diff --git a/arch/powerpc/boot/planetcore.c b/arch/powerpc/boot/planetcore.c
index 0d8558a475bb..75117e63e6db 100644
--- a/arch/powerpc/boot/planetcore.c
+++ b/arch/powerpc/boot/planetcore.c
@@ -131,36 +131,3 @@ void planetcore_set_stdout_path(const char *table)
131 131
132 setprop_str(chosen, "linux,stdout-path", path); 132 setprop_str(chosen, "linux,stdout-path", path);
133} 133}
134
135void planetcore_set_serial_speed(const char *table)
136{
137 void *chosen, *stdout;
138 u64 baud;
139 u32 baud32;
140 int len;
141
142 chosen = finddevice("/chosen");
143 if (!chosen)
144 return;
145
146 len = getprop(chosen, "linux,stdout-path", prop_buf, MAX_PROP_LEN);
147 if (len <= 0)
148 return;
149
150 stdout = finddevice(prop_buf);
151 if (!stdout) {
152 printf("planetcore_set_serial_speed: "
153 "Bad /chosen/linux,stdout-path.\r\n");
154
155 return;
156 }
157
158 if (!planetcore_get_decimal(table, PLANETCORE_KEY_SERIAL_BAUD,
159 &baud)) {
160 printf("planetcore_set_serial_speed: No SB tag.\r\n");
161 return;
162 }
163
164 baud32 = baud;
165 setprop(stdout, "current-speed", &baud32, 4);
166}
diff --git a/arch/powerpc/boot/planetcore.h b/arch/powerpc/boot/planetcore.h
index 0d4094f1771c..d53c733cc463 100644
--- a/arch/powerpc/boot/planetcore.h
+++ b/arch/powerpc/boot/planetcore.h
@@ -43,7 +43,4 @@ void planetcore_set_mac_addrs(const char *table);
43 */ 43 */
44void planetcore_set_stdout_path(const char *table); 44void planetcore_set_stdout_path(const char *table);
45 45
46/* Sets the current-speed property in the serial node. */
47void planetcore_set_serial_speed(const char *table);
48
49#endif 46#endif
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index ae0f88ec4a32..3f50c27ed8f8 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -277,7 +277,7 @@ treeboot-iss4xx-mpic)
277 platformo="$object/treeboot-iss4xx.o" 277 platformo="$object/treeboot-iss4xx.o"
278 ;; 278 ;;
279epapr) 279epapr)
280 platformo="$object/epapr.o $object/epapr-wrapper.o" 280 platformo="$object/pseries-head.o $object/epapr.o $object/epapr-wrapper.o"
281 link_address='0x20000000' 281 link_address='0x20000000'
282 pie=-pie 282 pie=-pie
283 ;; 283 ;;
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 9788b3c2d563..9227b517560a 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -28,7 +28,6 @@ CONFIG_PS3_ROM=m
28CONFIG_PS3_FLASH=m 28CONFIG_PS3_FLASH=m
29CONFIG_PS3_LPM=m 29CONFIG_PS3_LPM=m
30CONFIG_PPC_IBM_CELL_BLADE=y 30CONFIG_PPC_IBM_CELL_BLADE=y
31CONFIG_PPC_CELLEB=y
32CONFIG_RTAS_FLASH=y 31CONFIG_RTAS_FLASH=y
33CONFIG_CPU_FREQ=y 32CONFIG_CPU_FREQ=y
34CONFIG_CPU_FREQ_GOV_POWERSAVE=y 33CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -113,7 +112,6 @@ CONFIG_IDE=y
113CONFIG_BLK_DEV_GENERIC=y 112CONFIG_BLK_DEV_GENERIC=y
114CONFIG_BLK_DEV_AEC62XX=y 113CONFIG_BLK_DEV_AEC62XX=y
115CONFIG_BLK_DEV_SIIMAGE=y 114CONFIG_BLK_DEV_SIIMAGE=y
116CONFIG_BLK_DEV_CELLEB=y
117CONFIG_BLK_DEV_SD=y 115CONFIG_BLK_DEV_SD=y
118CONFIG_BLK_DEV_SR=m 116CONFIG_BLK_DEV_SR=m
119CONFIG_CHR_DEV_SG=y 117CONFIG_CHR_DEV_SG=y
@@ -156,7 +154,6 @@ CONFIG_SERIAL_TXX9_NR_UARTS=2
156CONFIG_SERIAL_TXX9_CONSOLE=y 154CONFIG_SERIAL_TXX9_CONSOLE=y
157CONFIG_SERIAL_OF_PLATFORM=y 155CONFIG_SERIAL_OF_PLATFORM=y
158CONFIG_HVC_RTAS=y 156CONFIG_HVC_RTAS=y
159CONFIG_HVC_BEAT=y
160CONFIG_IPMI_HANDLER=m 157CONFIG_IPMI_HANDLER=m
161CONFIG_IPMI_DEVICE_INTERFACE=m 158CONFIG_IPMI_DEVICE_INTERFACE=m
162CONFIG_IPMI_SI=m 159CONFIG_IPMI_SI=m
diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig
deleted file mode 100644
index ff454dcd2dd3..000000000000
--- a/arch/powerpc/configs/celleb_defconfig
+++ /dev/null
@@ -1,152 +0,0 @@
1CONFIG_PPC64=y
2CONFIG_TUNE_CELL=y
3CONFIG_ALTIVEC=y
4CONFIG_SMP=y
5CONFIG_NR_CPUS=4
6CONFIG_SYSVIPC=y
7CONFIG_FHANDLE=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=15
13CONFIG_BLK_DEV_INITRD=y
14# CONFIG_COMPAT_BRK is not set
15CONFIG_MODULES=y
16CONFIG_MODULE_UNLOAD=y
17CONFIG_MODVERSIONS=y
18CONFIG_MODULE_SRCVERSION_ALL=y
19CONFIG_PARTITION_ADVANCED=y
20# CONFIG_PPC_POWERNV is not set
21# CONFIG_PPC_PSERIES is not set
22# CONFIG_PPC_PMAC is not set
23CONFIG_PPC_CELLEB=y
24CONFIG_SPU_FS=y
25# CONFIG_CBE_THERM is not set
26CONFIG_UDBG_RTAS_CONSOLE=y
27# CONFIG_RTAS_PROC is not set
28CONFIG_BINFMT_MISC=m
29CONFIG_KEXEC=y
30CONFIG_NUMA=y
31CONFIG_NET=y
32CONFIG_PACKET=y
33CONFIG_UNIX=y
34CONFIG_INET=y
35CONFIG_IP_MULTICAST=y
36CONFIG_SYN_COOKIES=y
37CONFIG_IPV6=y
38CONFIG_INET6_AH=m
39CONFIG_INET6_ESP=m
40CONFIG_INET6_IPCOMP=m
41CONFIG_IPV6_TUNNEL=m
42CONFIG_NETFILTER=y
43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44CONFIG_BLK_DEV_LOOP=y
45CONFIG_BLK_DEV_RAM=y
46CONFIG_BLK_DEV_RAM_SIZE=131072
47CONFIG_IDE=y
48CONFIG_BLK_DEV_IDECD=m
49CONFIG_BLK_DEV_GENERIC=y
50CONFIG_BLK_DEV_CELLEB=y
51CONFIG_SCSI=m
52# CONFIG_SCSI_PROC_FS is not set
53CONFIG_BLK_DEV_SD=m
54CONFIG_BLK_DEV_SR=m
55CONFIG_CHR_DEV_SG=m
56CONFIG_MD=y
57CONFIG_BLK_DEV_MD=m
58CONFIG_MD_LINEAR=m
59CONFIG_MD_RAID0=m
60CONFIG_MD_RAID1=m
61CONFIG_BLK_DEV_DM=m
62CONFIG_DM_CRYPT=m
63CONFIG_DM_SNAPSHOT=m
64CONFIG_DM_MIRROR=m
65CONFIG_DM_ZERO=m
66CONFIG_DM_MULTIPATH=m
67CONFIG_NETDEVICES=y
68CONFIG_SPIDER_NET=y
69# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
70# CONFIG_INPUT_KEYBOARD is not set
71# CONFIG_INPUT_MOUSE is not set
72# CONFIG_SERIO_I8042 is not set
73# CONFIG_LEGACY_PTYS is not set
74CONFIG_SERIAL_NONSTANDARD=y
75CONFIG_SERIAL_TXX9_NR_UARTS=3
76CONFIG_SERIAL_TXX9_CONSOLE=y
77CONFIG_HVC_RTAS=y
78CONFIG_HVC_BEAT=y
79# CONFIG_HW_RANDOM is not set
80CONFIG_GEN_RTC=y
81CONFIG_I2C=y
82# CONFIG_HWMON is not set
83CONFIG_WATCHDOG=y
84# CONFIG_VGA_CONSOLE is not set
85CONFIG_USB_HIDDEV=y
86CONFIG_USB=y
87CONFIG_USB_MON=y
88CONFIG_USB_EHCI_HCD=m
89# CONFIG_USB_EHCI_HCD_PPC_OF is not set
90CONFIG_USB_OHCI_HCD=m
91CONFIG_USB_STORAGE=m
92CONFIG_EXT2_FS=y
93CONFIG_EXT2_FS_XATTR=y
94CONFIG_EXT2_FS_POSIX_ACL=y
95CONFIG_EXT2_FS_SECURITY=y
96CONFIG_EXT2_FS_XIP=y
97CONFIG_EXT3_FS=y
98CONFIG_EXT3_FS_POSIX_ACL=y
99CONFIG_EXT3_FS_SECURITY=y
100CONFIG_ISO9660_FS=m
101CONFIG_JOLIET=y
102CONFIG_UDF_FS=m
103CONFIG_MSDOS_FS=m
104CONFIG_VFAT_FS=m
105CONFIG_PROC_KCORE=y
106CONFIG_TMPFS=y
107CONFIG_HUGETLBFS=y
108CONFIG_NFS_FS=m
109CONFIG_NFS_V3_ACL=y
110CONFIG_NFSD=m
111CONFIG_NFSD_V3=y
112CONFIG_NFSD_V3_ACL=y
113CONFIG_NLS_ISO8859_1=m
114CONFIG_NLS_ISO8859_2=m
115CONFIG_NLS_ISO8859_3=m
116CONFIG_NLS_ISO8859_4=m
117CONFIG_NLS_ISO8859_5=m
118CONFIG_NLS_ISO8859_6=m
119CONFIG_NLS_ISO8859_7=m
120CONFIG_NLS_ISO8859_9=m
121CONFIG_NLS_ISO8859_13=m
122CONFIG_NLS_ISO8859_14=m
123CONFIG_NLS_ISO8859_15=m
124CONFIG_LIBCRC32C=m
125CONFIG_DEBUG_FS=y
126CONFIG_MAGIC_SYSRQ=y
127CONFIG_DEBUG_KERNEL=y
128CONFIG_DEBUG_MUTEXES=y
129CONFIG_XMON=y
130CONFIG_XMON_DEFAULT=y
131CONFIG_CRYPTO_NULL=m
132CONFIG_CRYPTO_TEST=m
133CONFIG_CRYPTO_ECB=m
134CONFIG_CRYPTO_PCBC=m
135CONFIG_CRYPTO_HMAC=y
136CONFIG_CRYPTO_MD4=m
137CONFIG_CRYPTO_MD5=y
138CONFIG_CRYPTO_MICHAEL_MIC=m
139CONFIG_CRYPTO_SHA256=m
140CONFIG_CRYPTO_SHA512=m
141CONFIG_CRYPTO_TGR192=m
142CONFIG_CRYPTO_WP512=m
143CONFIG_CRYPTO_ANUBIS=m
144CONFIG_CRYPTO_ARC4=m
145CONFIG_CRYPTO_BLOWFISH=m
146CONFIG_CRYPTO_CAST5=m
147CONFIG_CRYPTO_CAST6=m
148CONFIG_CRYPTO_KHAZAD=m
149CONFIG_CRYPTO_SERPENT=m
150CONFIG_CRYPTO_TEA=m
151CONFIG_CRYPTO_TWOFISH=m
152# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index ca7957b09a3c..37659937bd12 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -99,6 +99,8 @@ CONFIG_E1000E=y
99CONFIG_AT803X_PHY=y 99CONFIG_AT803X_PHY=y
100CONFIG_VITESSE_PHY=y 100CONFIG_VITESSE_PHY=y
101CONFIG_FIXED_PHY=y 101CONFIG_FIXED_PHY=y
102CONFIG_MDIO_BUS_MUX_GPIO=y
103CONFIG_MDIO_BUS_MUX_MMIOREG=y
102# CONFIG_INPUT_MOUSEDEV is not set 104# CONFIG_INPUT_MOUSEDEV is not set
103# CONFIG_INPUT_KEYBOARD is not set 105# CONFIG_INPUT_KEYBOARD is not set
104# CONFIG_INPUT_MOUSE is not set 106# CONFIG_INPUT_MOUSE is not set
@@ -114,11 +116,14 @@ CONFIG_NVRAM=y
114CONFIG_I2C=y 116CONFIG_I2C=y
115CONFIG_I2C_CHARDEV=y 117CONFIG_I2C_CHARDEV=y
116CONFIG_I2C_MPC=y 118CONFIG_I2C_MPC=y
119CONFIG_I2C_MUX=y
120CONFIG_I2C_MUX_PCA954x=y
117CONFIG_SPI=y 121CONFIG_SPI=y
118CONFIG_SPI_GPIO=y 122CONFIG_SPI_GPIO=y
119CONFIG_SPI_FSL_SPI=y 123CONFIG_SPI_FSL_SPI=y
120CONFIG_SPI_FSL_ESPI=y 124CONFIG_SPI_FSL_ESPI=y
121# CONFIG_HWMON is not set 125CONFIG_SENSORS_LM90=y
126CONFIG_SENSORS_INA2XX=y
122CONFIG_USB_HID=m 127CONFIG_USB_HID=m
123CONFIG_USB=y 128CONFIG_USB=y
124CONFIG_USB_MON=y 129CONFIG_USB_MON=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 04737aaa8b6b..33cd1df818ad 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -12,6 +12,10 @@ CONFIG_BSD_PROCESS_ACCT=y
12CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
13CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
14CONFIG_LOG_BUF_SHIFT=14 14CONFIG_LOG_BUF_SHIFT=14
15CONFIG_CGROUPS=y
16CONFIG_CPUSETS=y
17CONFIG_CGROUP_CPUACCT=y
18CONFIG_CGROUP_SCHED=y
15CONFIG_BLK_DEV_INITRD=y 19CONFIG_BLK_DEV_INITRD=y
16CONFIG_EXPERT=y 20CONFIG_EXPERT=y
17CONFIG_KALLSYMS_ALL=y 21CONFIG_KALLSYMS_ALL=y
@@ -75,6 +79,10 @@ CONFIG_BLK_DEV_LOOP=y
75CONFIG_BLK_DEV_RAM=y 79CONFIG_BLK_DEV_RAM=y
76CONFIG_BLK_DEV_RAM_SIZE=131072 80CONFIG_BLK_DEV_RAM_SIZE=131072
77CONFIG_EEPROM_LEGACY=y 81CONFIG_EEPROM_LEGACY=y
82CONFIG_BLK_DEV_SD=y
83CONFIG_BLK_DEV_SR=y
84CONFIG_BLK_DEV_SR_VENDOR=y
85CONFIG_CHR_DEV_SG=y
78CONFIG_ATA=y 86CONFIG_ATA=y
79CONFIG_SATA_FSL=y 87CONFIG_SATA_FSL=y
80CONFIG_SATA_SIL24=y 88CONFIG_SATA_SIL24=y
@@ -85,6 +93,8 @@ CONFIG_FSL_XGMAC_MDIO=y
85CONFIG_E1000E=y 93CONFIG_E1000E=y
86CONFIG_VITESSE_PHY=y 94CONFIG_VITESSE_PHY=y
87CONFIG_FIXED_PHY=y 95CONFIG_FIXED_PHY=y
96CONFIG_MDIO_BUS_MUX_GPIO=y
97CONFIG_MDIO_BUS_MUX_MMIOREG=y
88CONFIG_INPUT_FF_MEMLESS=m 98CONFIG_INPUT_FF_MEMLESS=m
89# CONFIG_INPUT_MOUSEDEV is not set 99# CONFIG_INPUT_MOUSEDEV is not set
90# CONFIG_INPUT_KEYBOARD is not set 100# CONFIG_INPUT_KEYBOARD is not set
@@ -99,11 +109,14 @@ CONFIG_SERIAL_8250_RSA=y
99CONFIG_I2C=y 109CONFIG_I2C=y
100CONFIG_I2C_CHARDEV=y 110CONFIG_I2C_CHARDEV=y
101CONFIG_I2C_MPC=y 111CONFIG_I2C_MPC=y
112CONFIG_I2C_MUX=y
113CONFIG_I2C_MUX_PCA954x=y
102CONFIG_SPI=y 114CONFIG_SPI=y
103CONFIG_SPI_GPIO=y 115CONFIG_SPI_GPIO=y
104CONFIG_SPI_FSL_SPI=y 116CONFIG_SPI_FSL_SPI=y
105CONFIG_SPI_FSL_ESPI=y 117CONFIG_SPI_FSL_ESPI=y
106# CONFIG_HWMON is not set 118CONFIG_SENSORS_LM90=y
119CONFIG_SENSORS_INA2XX=y
107CONFIG_USB_HID=m 120CONFIG_USB_HID=m
108CONFIG_USB=y 121CONFIG_USB=y
109CONFIG_USB_MON=y 122CONFIG_USB_MON=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 8535c343dd57..6ecf7bdbc2f9 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -150,8 +150,7 @@ CONFIG_SPI=y
150CONFIG_SPI_FSL_SPI=y 150CONFIG_SPI_FSL_SPI=y
151CONFIG_SPI_FSL_ESPI=y 151CONFIG_SPI_FSL_ESPI=y
152CONFIG_GPIO_MPC8XXX=y 152CONFIG_GPIO_MPC8XXX=y
153CONFIG_HWMON=m 153CONFIG_SENSORS_LM90=y
154CONFIG_SENSORS_LM90=m
155CONFIG_FB=y 154CONFIG_FB=y
156CONFIG_FB_FSL_DIU=y 155CONFIG_FB_FSL_DIU=y
157# CONFIG_VGA_CONSOLE is not set 156# CONFIG_VGA_CONSOLE is not set
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index c45ad2e01b0c..b6c7111ea913 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -143,7 +143,7 @@ CONFIG_SPI=y
143CONFIG_SPI_FSL_SPI=y 143CONFIG_SPI_FSL_SPI=y
144CONFIG_SPI_FSL_ESPI=y 144CONFIG_SPI_FSL_ESPI=y
145CONFIG_GPIO_MPC8XXX=y 145CONFIG_GPIO_MPC8XXX=y
146# CONFIG_HWMON is not set 146CONFIG_SENSORS_LM90=y
147CONFIG_FB=y 147CONFIG_FB=y
148CONFIG_FB_FSL_DIU=y 148CONFIG_FB_FSL_DIU=y
149# CONFIG_VGA_CONSOLE is not set 149# CONFIG_VGA_CONSOLE is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 3315c9f0828a..aad501ae3834 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -36,7 +36,6 @@ CONFIG_PS3_ROM=m
36CONFIG_PS3_FLASH=m 36CONFIG_PS3_FLASH=m
37CONFIG_PS3_LPM=m 37CONFIG_PS3_LPM=m
38CONFIG_PPC_IBM_CELL_BLADE=y 38CONFIG_PPC_IBM_CELL_BLADE=y
39CONFIG_PPC_CELLEB=y
40CONFIG_PPC_CELL_QPACE=y 39CONFIG_PPC_CELL_QPACE=y
41CONFIG_RTAS_FLASH=m 40CONFIG_RTAS_FLASH=m
42CONFIG_IBMEBUS=y 41CONFIG_IBMEBUS=y
@@ -89,7 +88,6 @@ CONFIG_IDE=y
89CONFIG_BLK_DEV_IDECD=y 88CONFIG_BLK_DEV_IDECD=y
90CONFIG_BLK_DEV_GENERIC=y 89CONFIG_BLK_DEV_GENERIC=y
91CONFIG_BLK_DEV_AMD74XX=y 90CONFIG_BLK_DEV_AMD74XX=y
92CONFIG_BLK_DEV_CELLEB=y
93CONFIG_BLK_DEV_IDE_PMAC=y 91CONFIG_BLK_DEV_IDE_PMAC=y
94CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y 92CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
95CONFIG_BLK_DEV_SD=y 93CONFIG_BLK_DEV_SD=y
@@ -196,7 +194,6 @@ CONFIG_SERIAL_TXX9_CONSOLE=y
196CONFIG_SERIAL_JSM=m 194CONFIG_SERIAL_JSM=m
197CONFIG_HVC_CONSOLE=y 195CONFIG_HVC_CONSOLE=y
198CONFIG_HVC_RTAS=y 196CONFIG_HVC_RTAS=y
199CONFIG_HVC_BEAT=y
200CONFIG_HVCS=m 197CONFIG_HVCS=m
201CONFIG_VIRTIO_CONSOLE=m 198CONFIG_VIRTIO_CONSOLE=m
202CONFIG_IBM_BSR=m 199CONFIG_IBM_BSR=m
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 382b28e364dc..4b87205c230c 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,6 +1,8 @@
1
2generic-y += clkdev.h 1generic-y += clkdev.h
2generic-y += div64.h
3generic-y += irq_regs.h
3generic-y += irq_work.h 4generic-y += irq_work.h
5generic-y += local64.h
4generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
5generic-y += preempt.h 7generic-y += preempt.h
6generic-y += rwsem.h 8generic-y += rwsem.h
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 34a05a1a990b..0dc42c5082b7 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -76,9 +76,6 @@ extern void _set_L3CR(unsigned long);
76#define _set_L3CR(val) do { } while(0) 76#define _set_L3CR(val) do { } while(0)
77#endif 77#endif
78 78
79extern void cacheable_memzero(void *p, unsigned int nb);
80extern void *cacheable_memcpy(void *, const void *, unsigned int);
81
82#endif /* !__ASSEMBLY__ */ 79#endif /* !__ASSEMBLY__ */
83#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
84#endif /* _ASM_POWERPC_CACHE_H */ 81#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 5cf5a6d10685..6367b8347dad 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -100,7 +100,7 @@ struct cpu_spec {
100 /* 100 /*
101 * Processor specific routine to flush tlbs. 101 * Processor specific routine to flush tlbs.
102 */ 102 */
103 void (*flush_tlb)(unsigned long inval_selector); 103 void (*flush_tlb)(unsigned int action);
104 104
105}; 105};
106 106
@@ -114,6 +114,12 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
114 114
115extern const char *powerpc_base_platform; 115extern const char *powerpc_base_platform;
116 116
117/* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */
118enum {
119 TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
120 TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
121};
122
117#endif /* __ASSEMBLY__ */ 123#endif /* __ASSEMBLY__ */
118 124
119/* CPU kernel features */ 125/* CPU kernel features */
diff --git a/arch/powerpc/include/asm/dbdma.h b/arch/powerpc/include/asm/dbdma.h
index e23f07e73cb3..6c69836b4ec2 100644
--- a/arch/powerpc/include/asm/dbdma.h
+++ b/arch/powerpc/include/asm/dbdma.h
@@ -42,12 +42,12 @@ struct dbdma_regs {
42 * DBDMA command structure. These fields are all little-endian! 42 * DBDMA command structure. These fields are all little-endian!
43 */ 43 */
44struct dbdma_cmd { 44struct dbdma_cmd {
45 unsigned short req_count; /* requested byte transfer count */ 45 __le16 req_count; /* requested byte transfer count */
46 unsigned short command; /* command word (has bit-fields) */ 46 __le16 command; /* command word (has bit-fields) */
47 unsigned int phy_addr; /* physical data address */ 47 __le32 phy_addr; /* physical data address */
48 unsigned int cmd_dep; /* command-dependent field */ 48 __le32 cmd_dep; /* command-dependent field */
49 unsigned short res_count; /* residual count after completion */ 49 __le16 res_count; /* residual count after completion */
50 unsigned short xfer_status; /* transfer status */ 50 __le16 xfer_status; /* transfer status */
51}; 51};
52 52
53/* DBDMA command values in command field */ 53/* DBDMA command values in command field */
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 7d2e6235726d..4efc11dacb98 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -31,7 +31,7 @@ typedef struct {
31 31
32static inline bool dcr_map_ok_native(dcr_host_native_t host) 32static inline bool dcr_map_ok_native(dcr_host_native_t host)
33{ 33{
34 return 1; 34 return true;
35} 35}
36 36
37#define dcr_map_native(dev, dcr_n, dcr_c) \ 37#define dcr_map_native(dev, dcr_n, dcr_c) \
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 38faeded7d59..9f1371bab5fc 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -8,6 +8,9 @@
8 8
9struct dma_map_ops; 9struct dma_map_ops;
10struct device_node; 10struct device_node;
11#ifdef CONFIG_PPC64
12struct pci_dn;
13#endif
11 14
12/* 15/*
13 * Arch extensions to struct device. 16 * Arch extensions to struct device.
@@ -34,6 +37,9 @@ struct dev_archdata {
34#ifdef CONFIG_SWIOTLB 37#ifdef CONFIG_SWIOTLB
35 dma_addr_t max_direct_dma_addr; 38 dma_addr_t max_direct_dma_addr;
36#endif 39#endif
40#ifdef CONFIG_PPC64
41 struct pci_dn *pci_data;
42#endif
37#ifdef CONFIG_EEH 43#ifdef CONFIG_EEH
38 struct eeh_dev *edev; 44 struct eeh_dev *edev;
39#endif 45#endif
diff --git a/arch/powerpc/include/asm/div64.h b/arch/powerpc/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/powerpc/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/div64.h>
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 894d538f3567..9103687b0436 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -191,11 +191,11 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
191 struct dev_archdata *sd = &dev->archdata; 191 struct dev_archdata *sd = &dev->archdata;
192 192
193 if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr) 193 if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
194 return 0; 194 return false;
195#endif 195#endif
196 196
197 if (!dev->dma_mask) 197 if (!dev->dma_mask)
198 return 0; 198 return false;
199 199
200 return addr + size - 1 <= *dev->dma_mask; 200 return addr + size - 1 <= *dev->dma_mask;
201} 201}
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 55abfd09e47f..a52db28ecc1e 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -29,7 +29,7 @@
29 29
30struct pci_dev; 30struct pci_dev;
31struct pci_bus; 31struct pci_bus;
32struct device_node; 32struct pci_dn;
33 33
34#ifdef CONFIG_EEH 34#ifdef CONFIG_EEH
35 35
@@ -136,14 +136,14 @@ struct eeh_dev {
136 struct eeh_pe *pe; /* Associated PE */ 136 struct eeh_pe *pe; /* Associated PE */
137 struct list_head list; /* Form link list in the PE */ 137 struct list_head list; /* Form link list in the PE */
138 struct pci_controller *phb; /* Associated PHB */ 138 struct pci_controller *phb; /* Associated PHB */
139 struct device_node *dn; /* Associated device node */ 139 struct pci_dn *pdn; /* Associated PCI device node */
140 struct pci_dev *pdev; /* Associated PCI device */ 140 struct pci_dev *pdev; /* Associated PCI device */
141 struct pci_bus *bus; /* PCI bus for partial hotplug */ 141 struct pci_bus *bus; /* PCI bus for partial hotplug */
142}; 142};
143 143
144static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev) 144static inline struct pci_dn *eeh_dev_to_pdn(struct eeh_dev *edev)
145{ 145{
146 return edev ? edev->dn : NULL; 146 return edev ? edev->pdn : NULL;
147} 147}
148 148
149static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev) 149static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
@@ -200,8 +200,7 @@ struct eeh_ops {
200 char *name; 200 char *name;
201 int (*init)(void); 201 int (*init)(void);
202 int (*post_init)(void); 202 int (*post_init)(void);
203 void* (*of_probe)(struct device_node *dn, void *flag); 203 void* (*probe)(struct pci_dn *pdn, void *data);
204 int (*dev_probe)(struct pci_dev *dev, void *flag);
205 int (*set_option)(struct eeh_pe *pe, int option); 204 int (*set_option)(struct eeh_pe *pe, int option);
206 int (*get_pe_addr)(struct eeh_pe *pe); 205 int (*get_pe_addr)(struct eeh_pe *pe);
207 int (*get_state)(struct eeh_pe *pe, int *state); 206 int (*get_state)(struct eeh_pe *pe, int *state);
@@ -211,10 +210,10 @@ struct eeh_ops {
211 int (*configure_bridge)(struct eeh_pe *pe); 210 int (*configure_bridge)(struct eeh_pe *pe);
212 int (*err_inject)(struct eeh_pe *pe, int type, int func, 211 int (*err_inject)(struct eeh_pe *pe, int type, int func,
213 unsigned long addr, unsigned long mask); 212 unsigned long addr, unsigned long mask);
214 int (*read_config)(struct device_node *dn, int where, int size, u32 *val); 213 int (*read_config)(struct pci_dn *pdn, int where, int size, u32 *val);
215 int (*write_config)(struct device_node *dn, int where, int size, u32 val); 214 int (*write_config)(struct pci_dn *pdn, int where, int size, u32 val);
216 int (*next_error)(struct eeh_pe **pe); 215 int (*next_error)(struct eeh_pe **pe);
217 int (*restore_config)(struct device_node *dn); 216 int (*restore_config)(struct pci_dn *pdn);
218}; 217};
219 218
220extern int eeh_subsystem_flags; 219extern int eeh_subsystem_flags;
@@ -272,7 +271,7 @@ void eeh_pe_restore_bars(struct eeh_pe *pe);
272const char *eeh_pe_loc_get(struct eeh_pe *pe); 271const char *eeh_pe_loc_get(struct eeh_pe *pe);
273struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); 272struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
274 273
275void *eeh_dev_init(struct device_node *dn, void *data); 274void *eeh_dev_init(struct pci_dn *pdn, void *data);
276void eeh_dev_phb_init_dynamic(struct pci_controller *phb); 275void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
277int eeh_init(void); 276int eeh_init(void);
278int __init eeh_ops_register(struct eeh_ops *ops); 277int __init eeh_ops_register(struct eeh_ops *ops);
@@ -280,8 +279,8 @@ int __exit eeh_ops_unregister(const char *name);
280int eeh_check_failure(const volatile void __iomem *token); 279int eeh_check_failure(const volatile void __iomem *token);
281int eeh_dev_check_failure(struct eeh_dev *edev); 280int eeh_dev_check_failure(struct eeh_dev *edev);
282void eeh_addr_cache_build(void); 281void eeh_addr_cache_build(void);
283void eeh_add_device_early(struct device_node *); 282void eeh_add_device_early(struct pci_dn *);
284void eeh_add_device_tree_early(struct device_node *); 283void eeh_add_device_tree_early(struct pci_dn *);
285void eeh_add_device_late(struct pci_dev *); 284void eeh_add_device_late(struct pci_dev *);
286void eeh_add_device_tree_late(struct pci_bus *); 285void eeh_add_device_tree_late(struct pci_bus *);
287void eeh_add_sysfs_files(struct pci_bus *); 286void eeh_add_sysfs_files(struct pci_bus *);
@@ -323,7 +322,7 @@ static inline int eeh_init(void)
323 return 0; 322 return 0;
324} 323}
325 324
326static inline void *eeh_dev_init(struct device_node *dn, void *data) 325static inline void *eeh_dev_init(struct pci_dn *pdn, void *data)
327{ 326{
328 return NULL; 327 return NULL;
329} 328}
@@ -339,9 +338,9 @@ static inline int eeh_check_failure(const volatile void __iomem *token)
339 338
340static inline void eeh_addr_cache_build(void) { } 339static inline void eeh_addr_cache_build(void) { }
341 340
342static inline void eeh_add_device_early(struct device_node *dn) { } 341static inline void eeh_add_device_early(struct pci_dn *pdn) { }
343 342
344static inline void eeh_add_device_tree_early(struct device_node *dn) { } 343static inline void eeh_add_device_tree_early(struct pci_dn *pdn) { }
345 344
346static inline void eeh_add_device_late(struct pci_dev *dev) { } 345static inline void eeh_add_device_late(struct pci_dev *dev) { }
347 346
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 681bc0314b6b..e05808a328db 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -42,7 +42,7 @@
42#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000) 42#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
43#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000) 43#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
44#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000) 44#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
45#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) 45/* Free ASM_CONST(0x0000000001000000) */
46#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000) 46#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
47#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000) 47#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
48#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000) 48#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
@@ -75,8 +75,6 @@ enum {
75 FW_FEATURE_POWERNV_ALWAYS = 0, 75 FW_FEATURE_POWERNV_ALWAYS = 0,
76 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 76 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
77 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 77 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
78 FW_FEATURE_CELLEB_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_BEAT,
79 FW_FEATURE_CELLEB_ALWAYS = 0,
80 FW_FEATURE_NATIVE_POSSIBLE = 0, 78 FW_FEATURE_NATIVE_POSSIBLE = 0,
81 FW_FEATURE_NATIVE_ALWAYS = 0, 79 FW_FEATURE_NATIVE_ALWAYS = 0,
82 FW_FEATURE_POSSIBLE = 80 FW_FEATURE_POSSIBLE =
@@ -89,9 +87,6 @@ enum {
89#ifdef CONFIG_PPC_PS3 87#ifdef CONFIG_PPC_PS3
90 FW_FEATURE_PS3_POSSIBLE | 88 FW_FEATURE_PS3_POSSIBLE |
91#endif 89#endif
92#ifdef CONFIG_PPC_CELLEB
93 FW_FEATURE_CELLEB_POSSIBLE |
94#endif
95#ifdef CONFIG_PPC_NATIVE 90#ifdef CONFIG_PPC_NATIVE
96 FW_FEATURE_NATIVE_ALWAYS | 91 FW_FEATURE_NATIVE_ALWAYS |
97#endif 92#endif
@@ -106,9 +101,6 @@ enum {
106#ifdef CONFIG_PPC_PS3 101#ifdef CONFIG_PPC_PS3
107 FW_FEATURE_PS3_ALWAYS & 102 FW_FEATURE_PS3_ALWAYS &
108#endif 103#endif
109#ifdef CONFIG_PPC_CELLEB
110 FW_FEATURE_CELLEB_ALWAYS &
111#endif
112#ifdef CONFIG_PPC_NATIVE 104#ifdef CONFIG_PPC_NATIVE
113 FW_FEATURE_NATIVE_ALWAYS & 105 FW_FEATURE_NATIVE_ALWAYS &
114#endif 106#endif
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index f1ea5972f6ec..1e27d6338565 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -29,6 +29,7 @@
29#include <linux/bitops.h> 29#include <linux/bitops.h>
30#include <asm/machdep.h> 30#include <asm/machdep.h>
31#include <asm/types.h> 31#include <asm/types.h>
32#include <asm/pci-bridge.h>
32 33
33#define IOMMU_PAGE_SHIFT_4K 12 34#define IOMMU_PAGE_SHIFT_4K 12
34#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) 35#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
@@ -78,6 +79,9 @@ struct iommu_table {
78 struct iommu_group *it_group; 79 struct iommu_group *it_group;
79#endif 80#endif
80 void (*set_bypass)(struct iommu_table *tbl, bool enable); 81 void (*set_bypass)(struct iommu_table *tbl, bool enable);
82#ifdef CONFIG_PPC_POWERNV
83 void *data;
84#endif
81}; 85};
82 86
83/* Pure 2^n version of get_order */ 87/* Pure 2^n version of get_order */
@@ -169,7 +173,7 @@ extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
169 struct dma_attrs *attrs); 173 struct dma_attrs *attrs);
170 174
171extern void iommu_init_early_pSeries(void); 175extern void iommu_init_early_pSeries(void);
172extern void iommu_init_early_dart(void); 176extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
173extern void iommu_init_early_pasemi(void); 177extern void iommu_init_early_pasemi(void);
174 178
175extern void alloc_dart_table(void); 179extern void alloc_dart_table(void);
diff --git a/arch/powerpc/include/asm/irq_regs.h b/arch/powerpc/include/asm/irq_regs.h
deleted file mode 100644
index ba94b51a0a70..000000000000
--- a/arch/powerpc/include/asm/irq_regs.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <asm-generic/irq_regs.h>
2
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 2d81e202bdcc..14619a59ec09 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -290,11 +290,11 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
290 pte_t old_pte, new_pte = __pte(0); 290 pte_t old_pte, new_pte = __pte(0);
291 291
292 while (1) { 292 while (1) {
293 old_pte = pte_val(*ptep); 293 old_pte = *ptep;
294 /* 294 /*
295 * wait until _PAGE_BUSY is clear then set it atomically 295 * wait until _PAGE_BUSY is clear then set it atomically
296 */ 296 */
297 if (unlikely(old_pte & _PAGE_BUSY)) { 297 if (unlikely(pte_val(old_pte) & _PAGE_BUSY)) {
298 cpu_relax(); 298 cpu_relax();
299 continue; 299 continue;
300 } 300 }
@@ -305,16 +305,18 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
305 return __pte(0); 305 return __pte(0);
306#endif 306#endif
307 /* If pte is not present return None */ 307 /* If pte is not present return None */
308 if (unlikely(!(old_pte & _PAGE_PRESENT))) 308 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
309 return __pte(0); 309 return __pte(0);
310 310
311 new_pte = pte_mkyoung(old_pte); 311 new_pte = pte_mkyoung(old_pte);
312 if (writing && pte_write(old_pte)) 312 if (writing && pte_write(old_pte))
313 new_pte = pte_mkdirty(new_pte); 313 new_pte = pte_mkdirty(new_pte);
314 314
315 if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte, 315 if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
316 new_pte)) 316 pte_val(old_pte),
317 pte_val(new_pte))) {
317 break; 318 break;
319 }
318 } 320 }
319 return new_pte; 321 return new_pte;
320} 322}
@@ -335,7 +337,7 @@ static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
335{ 337{
336 if (key) 338 if (key)
337 return PP_RWRX <= pp && pp <= PP_RXRX; 339 return PP_RWRX <= pp && pp <= PP_RXRX;
338 return 1; 340 return true;
339} 341}
340 342
341static inline bool hpte_write_permission(unsigned long pp, unsigned long key) 343static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
@@ -373,7 +375,7 @@ static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
373 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1; 375 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
374 376
375 if (pagesize <= PAGE_SIZE) 377 if (pagesize <= PAGE_SIZE)
376 return 1; 378 return true;
377 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); 379 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
378} 380}
379 381
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 8ef05121d3cd..c610961720c7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -585,7 +585,7 @@ struct kvm_vcpu_arch {
585 pgd_t *pgdir; 585 pgd_t *pgdir;
586 586
587 u8 io_gpr; /* GPR used as IO source/target */ 587 u8 io_gpr; /* GPR used as IO source/target */
588 u8 mmio_is_bigendian; 588 u8 mmio_host_swabbed;
589 u8 mmio_sign_extend; 589 u8 mmio_sign_extend;
590 u8 osi_needed; 590 u8 osi_needed;
591 u8 osi_enabled; 591 u8 osi_enabled;
diff --git a/arch/powerpc/include/asm/local64.h b/arch/powerpc/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/powerpc/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/local64.h>
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index c8175a3fe560..ef8899432ae7 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -103,9 +103,6 @@ struct machdep_calls {
103#endif 103#endif
104#endif /* CONFIG_PPC64 */ 104#endif /* CONFIG_PPC64 */
105 105
106 void (*pci_dma_dev_setup)(struct pci_dev *dev);
107 void (*pci_dma_bus_setup)(struct pci_bus *bus);
108
109 /* Platform set_dma_mask and dma_get_required_mask overrides */ 106 /* Platform set_dma_mask and dma_get_required_mask overrides */
110 int (*dma_set_mask)(struct device *dev, u64 dma_mask); 107 int (*dma_set_mask)(struct device *dev, u64 dma_mask);
111 u64 (*dma_get_required_mask)(struct device *dev); 108 u64 (*dma_get_required_mask)(struct device *dev);
@@ -125,9 +122,8 @@ struct machdep_calls {
125 unsigned int (*get_irq)(void); 122 unsigned int (*get_irq)(void);
126 123
127 /* PCI stuff */ 124 /* PCI stuff */
128 /* Called after scanning the bus, before allocating resources */ 125 /* Called after allocating resources */
129 void (*pcibios_fixup)(void); 126 void (*pcibios_fixup)(void);
130 int (*pci_probe_mode)(struct pci_bus *);
131 void (*pci_irq_fixup)(struct pci_dev *dev); 127 void (*pci_irq_fixup)(struct pci_dev *dev);
132 int (*pcibios_root_bridge_prepare)(struct pci_host_bridge 128 int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
133 *bridge); 129 *bridge);
@@ -237,18 +233,13 @@ struct machdep_calls {
237 /* Called for each PCI bus in the system when it's probed */ 233 /* Called for each PCI bus in the system when it's probed */
238 void (*pcibios_fixup_bus)(struct pci_bus *); 234 void (*pcibios_fixup_bus)(struct pci_bus *);
239 235
240 /* Called when pci_enable_device() is called. Returns 0 to
241 * allow assignment/enabling of the device. */
242 int (*pcibios_enable_device_hook)(struct pci_dev *);
243
244 /* Called after scan and before resource survey */ 236 /* Called after scan and before resource survey */
245 void (*pcibios_fixup_phb)(struct pci_controller *hose); 237 void (*pcibios_fixup_phb)(struct pci_controller *hose);
246 238
247 /* Called during PCI resource reassignment */ 239#ifdef CONFIG_PCI_IOV
248 resource_size_t (*pcibios_window_alignment)(struct pci_bus *, unsigned long type); 240 void (*pcibios_fixup_sriov)(struct pci_dev *pdev);
249 241 resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno);
250 /* Reset the secondary bus of bridge */ 242#endif /* CONFIG_PCI_IOV */
251 void (*pcibios_reset_secondary_bus)(struct pci_dev *dev);
252 243
253 /* Called to shutdown machine specific hardware not already controlled 244 /* Called to shutdown machine specific hardware not already controlled
254 * by other drivers. 245 * by other drivers.
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 4f13c3ed7acf..1da6a81ce541 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -112,6 +112,7 @@
112#define TLBIEL_INVAL_SET_SHIFT 12 112#define TLBIEL_INVAL_SET_SHIFT 12
113 113
114#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */ 114#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
115#define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
115 116
116#ifndef __ASSEMBLY__ 117#ifndef __ASSEMBLY__
117 118
diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h
index 3bef74a9914b..213f3a81593d 100644
--- a/arch/powerpc/include/asm/mpc85xx.h
+++ b/arch/powerpc/include/asm/mpc85xx.h
@@ -61,6 +61,7 @@
61#define SVR_T4240 0x824000 61#define SVR_T4240 0x824000
62#define SVR_T4120 0x824001 62#define SVR_T4120 0x824001
63#define SVR_T4160 0x824100 63#define SVR_T4160 0x824100
64#define SVR_T4080 0x824102
64#define SVR_C291 0x850000 65#define SVR_C291 0x850000
65#define SVR_C292 0x850020 66#define SVR_C292 0x850020
66#define SVR_C293 0x850030 67#define SVR_C293 0x850030
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 754f93d208fa..98697611e7b3 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -34,10 +34,6 @@
34#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff 34#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
35#define MPIC_GREG_GCONF_MCK 0x08000000 35#define MPIC_GREG_GCONF_MCK 0x08000000
36#define MPIC_GREG_GLOBAL_CONF_1 0x00030 36#define MPIC_GREG_GLOBAL_CONF_1 0x00030
37#define MPIC_GREG_GLOBAL_CONF_1_SIE 0x08000000
38#define MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK 0x70000000
39#define MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(r) \
40 (((r) << 28) & MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK)
41#define MPIC_GREG_VENDOR_0 0x00040 37#define MPIC_GREG_VENDOR_0 0x00040
42#define MPIC_GREG_VENDOR_1 0x00050 38#define MPIC_GREG_VENDOR_1 0x00050
43#define MPIC_GREG_VENDOR_2 0x00060 39#define MPIC_GREG_VENDOR_2 0x00060
@@ -396,14 +392,7 @@ extern struct bus_type mpic_subsys;
396#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */ 392#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
397 393
398/* Get the version of primary MPIC */ 394/* Get the version of primary MPIC */
399#ifdef CONFIG_MPIC
400extern u32 fsl_mpic_primary_get_version(void); 395extern u32 fsl_mpic_primary_get_version(void);
401#else
402static inline u32 fsl_mpic_primary_get_version(void)
403{
404 return 0;
405}
406#endif
407 396
408/* Allocate the controller structure and setup the linux irq descs 397/* Allocate the controller structure and setup the linux irq descs
409 * for the range if interrupts passed in. No HW initialization is 398 * for the range if interrupts passed in. No HW initialization is
@@ -496,11 +485,5 @@ extern unsigned int mpic_get_coreint_irq(void);
496/* Fetch Machine Check interrupt from primary mpic */ 485/* Fetch Machine Check interrupt from primary mpic */
497extern unsigned int mpic_get_mcirq(void); 486extern unsigned int mpic_get_mcirq(void);
498 487
499/* Set the EPIC clock ratio */
500void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
501
502/* Enable/Disable EPIC serial interrupt mode */
503void mpic_set_serial_int(struct mpic *mpic, int enable);
504
505#endif /* __KERNEL__ */ 488#endif /* __KERNEL__ */
506#endif /* _ASM_POWERPC_MPIC_H */ 489#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
new file mode 100644
index 000000000000..ff1ccb375e60
--- /dev/null
+++ b/arch/powerpc/include/asm/nmi.h
@@ -0,0 +1,4 @@
1#ifndef _ASM_NMI_H
2#define _ASM_NMI_H
3
4#endif /* _ASM_NMI_H */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index b0fe0fe4e626..09a518bb7c03 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -9,12 +9,43 @@
9#ifndef _ASM_POWERPC_NVRAM_H 9#ifndef _ASM_POWERPC_NVRAM_H
10#define _ASM_POWERPC_NVRAM_H 10#define _ASM_POWERPC_NVRAM_H
11 11
12 12#include <linux/types.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <uapi/asm/nvram.h> 15#include <uapi/asm/nvram.h>
16 16
17/*
18 * Set oops header version to distinguish between old and new format header.
19 * lnx,oops-log partition max size is 4000, header version > 4000 will
20 * help in identifying new header.
21 */
22#define OOPS_HDR_VERSION 5000
23
24struct err_log_info {
25 __be32 error_type;
26 __be32 seq_num;
27};
28
29struct nvram_os_partition {
30 const char *name;
31 int req_size; /* desired size, in bytes */
32 int min_size; /* minimum acceptable size (0 means req_size) */
33 long size; /* size of data portion (excluding err_log_info) */
34 long index; /* offset of data portion of partition */
35 bool os_partition; /* partition initialized by OS, not FW */
36};
37
38struct oops_log_info {
39 __be16 version;
40 __be16 report_length;
41 __be64 timestamp;
42} __attribute__((packed));
43
44extern struct nvram_os_partition oops_log_partition;
45
17#ifdef CONFIG_PPC_PSERIES 46#ifdef CONFIG_PPC_PSERIES
47extern struct nvram_os_partition rtas_log_partition;
48
18extern int nvram_write_error_log(char * buff, int length, 49extern int nvram_write_error_log(char * buff, int length,
19 unsigned int err_type, unsigned int err_seq); 50 unsigned int err_type, unsigned int err_seq);
20extern int nvram_read_error_log(char * buff, int length, 51extern int nvram_read_error_log(char * buff, int length,
@@ -50,6 +81,23 @@ extern void pmac_xpram_write(int xpaddr, u8 data);
50/* Synchronize NVRAM */ 81/* Synchronize NVRAM */
51extern void nvram_sync(void); 82extern void nvram_sync(void);
52 83
84/* Initialize NVRAM OS partition */
85extern int __init nvram_init_os_partition(struct nvram_os_partition *part);
86
87/* Initialize NVRAM oops partition */
88extern void __init nvram_init_oops_partition(int rtas_partition_exists);
89
90/* Read a NVRAM partition */
91extern int nvram_read_partition(struct nvram_os_partition *part, char *buff,
92 int length, unsigned int *err_type,
93 unsigned int *error_log_cnt);
94
95/* Write to NVRAM OS partition */
96extern int nvram_write_os_partition(struct nvram_os_partition *part,
97 char *buff, int length,
98 unsigned int err_type,
99 unsigned int error_log_cnt);
100
53/* Determine NVRAM size */ 101/* Determine NVRAM size */
54extern ssize_t nvram_get_size(void); 102extern ssize_t nvram_get_size(void);
55 103
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
new file mode 100644
index 000000000000..0321a909e663
--- /dev/null
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -0,0 +1,735 @@
1/*
2 * OPAL API definitions.
3 *
4 * Copyright 2011-2015 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef __OPAL_API_H
13#define __OPAL_API_H
14
15/****** OPAL APIs ******/
16
17/* Return codes */
18#define OPAL_SUCCESS 0
19#define OPAL_PARAMETER -1
20#define OPAL_BUSY -2
21#define OPAL_PARTIAL -3
22#define OPAL_CONSTRAINED -4
23#define OPAL_CLOSED -5
24#define OPAL_HARDWARE -6
25#define OPAL_UNSUPPORTED -7
26#define OPAL_PERMISSION -8
27#define OPAL_NO_MEM -9
28#define OPAL_RESOURCE -10
29#define OPAL_INTERNAL_ERROR -11
30#define OPAL_BUSY_EVENT -12
31#define OPAL_HARDWARE_FROZEN -13
32#define OPAL_WRONG_STATE -14
33#define OPAL_ASYNC_COMPLETION -15
34#define OPAL_EMPTY -16
35#define OPAL_I2C_TIMEOUT -17
36#define OPAL_I2C_INVALID_CMD -18
37#define OPAL_I2C_LBUS_PARITY -19
38#define OPAL_I2C_BKEND_OVERRUN -20
39#define OPAL_I2C_BKEND_ACCESS -21
40#define OPAL_I2C_ARBT_LOST -22
41#define OPAL_I2C_NACK_RCVD -23
42#define OPAL_I2C_STOP_ERR -24
43
44/* API Tokens (in r0) */
45#define OPAL_INVALID_CALL -1
46#define OPAL_TEST 0
47#define OPAL_CONSOLE_WRITE 1
48#define OPAL_CONSOLE_READ 2
49#define OPAL_RTC_READ 3
50#define OPAL_RTC_WRITE 4
51#define OPAL_CEC_POWER_DOWN 5
52#define OPAL_CEC_REBOOT 6
53#define OPAL_READ_NVRAM 7
54#define OPAL_WRITE_NVRAM 8
55#define OPAL_HANDLE_INTERRUPT 9
56#define OPAL_POLL_EVENTS 10
57#define OPAL_PCI_SET_HUB_TCE_MEMORY 11
58#define OPAL_PCI_SET_PHB_TCE_MEMORY 12
59#define OPAL_PCI_CONFIG_READ_BYTE 13
60#define OPAL_PCI_CONFIG_READ_HALF_WORD 14
61#define OPAL_PCI_CONFIG_READ_WORD 15
62#define OPAL_PCI_CONFIG_WRITE_BYTE 16
63#define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17
64#define OPAL_PCI_CONFIG_WRITE_WORD 18
65#define OPAL_SET_XIVE 19
66#define OPAL_GET_XIVE 20
67#define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */
68#define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22
69#define OPAL_PCI_EEH_FREEZE_STATUS 23
70#define OPAL_PCI_SHPC 24
71#define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25
72#define OPAL_PCI_EEH_FREEZE_CLEAR 26
73#define OPAL_PCI_PHB_MMIO_ENABLE 27
74#define OPAL_PCI_SET_PHB_MEM_WINDOW 28
75#define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
76#define OPAL_PCI_SET_PHB_TABLE_MEMORY 30
77#define OPAL_PCI_SET_PE 31
78#define OPAL_PCI_SET_PELTV 32
79#define OPAL_PCI_SET_MVE 33
80#define OPAL_PCI_SET_MVE_ENABLE 34
81#define OPAL_PCI_GET_XIVE_REISSUE 35
82#define OPAL_PCI_SET_XIVE_REISSUE 36
83#define OPAL_PCI_SET_XIVE_PE 37
84#define OPAL_GET_XIVE_SOURCE 38
85#define OPAL_GET_MSI_32 39
86#define OPAL_GET_MSI_64 40
87#define OPAL_START_CPU 41
88#define OPAL_QUERY_CPU_STATUS 42
89#define OPAL_WRITE_OPPANEL 43 /* unimplemented */
90#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
91#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
92#define OPAL_PCI_RESET 49
93#define OPAL_PCI_GET_HUB_DIAG_DATA 50
94#define OPAL_PCI_GET_PHB_DIAG_DATA 51
95#define OPAL_PCI_FENCE_PHB 52
96#define OPAL_PCI_REINIT 53
97#define OPAL_PCI_MASK_PE_ERROR 54
98#define OPAL_SET_SLOT_LED_STATUS 55
99#define OPAL_GET_EPOW_STATUS 56
100#define OPAL_SET_SYSTEM_ATTENTION_LED 57
101#define OPAL_RESERVED1 58
102#define OPAL_RESERVED2 59
103#define OPAL_PCI_NEXT_ERROR 60
104#define OPAL_PCI_EEH_FREEZE_STATUS2 61
105#define OPAL_PCI_POLL 62
106#define OPAL_PCI_MSI_EOI 63
107#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
108#define OPAL_XSCOM_READ 65
109#define OPAL_XSCOM_WRITE 66
110#define OPAL_LPC_READ 67
111#define OPAL_LPC_WRITE 68
112#define OPAL_RETURN_CPU 69
113#define OPAL_REINIT_CPUS 70
114#define OPAL_ELOG_READ 71
115#define OPAL_ELOG_WRITE 72
116#define OPAL_ELOG_ACK 73
117#define OPAL_ELOG_RESEND 74
118#define OPAL_ELOG_SIZE 75
119#define OPAL_FLASH_VALIDATE 76
120#define OPAL_FLASH_MANAGE 77
121#define OPAL_FLASH_UPDATE 78
122#define OPAL_RESYNC_TIMEBASE 79
123#define OPAL_CHECK_TOKEN 80
124#define OPAL_DUMP_INIT 81
125#define OPAL_DUMP_INFO 82
126#define OPAL_DUMP_READ 83
127#define OPAL_DUMP_ACK 84
128#define OPAL_GET_MSG 85
129#define OPAL_CHECK_ASYNC_COMPLETION 86
130#define OPAL_SYNC_HOST_REBOOT 87
131#define OPAL_SENSOR_READ 88
132#define OPAL_GET_PARAM 89
133#define OPAL_SET_PARAM 90
134#define OPAL_DUMP_RESEND 91
135#define OPAL_ELOG_SEND 92 /* Deprecated */
136#define OPAL_PCI_SET_PHB_CAPI_MODE 93
137#define OPAL_DUMP_INFO2 94
138#define OPAL_WRITE_OPPANEL_ASYNC 95
139#define OPAL_PCI_ERR_INJECT 96
140#define OPAL_PCI_EEH_FREEZE_SET 97
141#define OPAL_HANDLE_HMI 98
142#define OPAL_CONFIG_CPU_IDLE_STATE 99
143#define OPAL_SLW_SET_REG 100
144#define OPAL_REGISTER_DUMP_REGION 101
145#define OPAL_UNREGISTER_DUMP_REGION 102
146#define OPAL_WRITE_TPO 103
147#define OPAL_READ_TPO 104
148#define OPAL_GET_DPO_STATUS 105
149#define OPAL_OLD_I2C_REQUEST 106 /* Deprecated */
150#define OPAL_IPMI_SEND 107
151#define OPAL_IPMI_RECV 108
152#define OPAL_I2C_REQUEST 109
153#define OPAL_FLASH_READ 110
154#define OPAL_FLASH_WRITE 111
155#define OPAL_FLASH_ERASE 112
156#define OPAL_LAST 112
157
158/* Device tree flags */
159
160/* Flags set in power-mgmt nodes in device tree if
161 * respective idle states are supported in the platform.
162 */
163#define OPAL_PM_NAP_ENABLED 0x00010000
164#define OPAL_PM_SLEEP_ENABLED 0x00020000
165#define OPAL_PM_WINKLE_ENABLED 0x00040000
166#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 /* with workaround */
167
168#ifndef __ASSEMBLY__
169
170/* Other enums */
171enum OpalFreezeState {
172 OPAL_EEH_STOPPED_NOT_FROZEN = 0,
173 OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
174 OPAL_EEH_STOPPED_DMA_FREEZE = 2,
175 OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
176 OPAL_EEH_STOPPED_RESET = 4,
177 OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
178 OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
179};
180
181enum OpalEehFreezeActionToken {
182 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
183 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
184 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3,
185
186 OPAL_EEH_ACTION_SET_FREEZE_MMIO = 1,
187 OPAL_EEH_ACTION_SET_FREEZE_DMA = 2,
188 OPAL_EEH_ACTION_SET_FREEZE_ALL = 3
189};
190
191enum OpalPciStatusToken {
192 OPAL_EEH_NO_ERROR = 0,
193 OPAL_EEH_IOC_ERROR = 1,
194 OPAL_EEH_PHB_ERROR = 2,
195 OPAL_EEH_PE_ERROR = 3,
196 OPAL_EEH_PE_MMIO_ERROR = 4,
197 OPAL_EEH_PE_DMA_ERROR = 5
198};
199
200enum OpalPciErrorSeverity {
201 OPAL_EEH_SEV_NO_ERROR = 0,
202 OPAL_EEH_SEV_IOC_DEAD = 1,
203 OPAL_EEH_SEV_PHB_DEAD = 2,
204 OPAL_EEH_SEV_PHB_FENCED = 3,
205 OPAL_EEH_SEV_PE_ER = 4,
206 OPAL_EEH_SEV_INF = 5
207};
208
209enum OpalErrinjectType {
210 OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR = 0,
211 OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64 = 1,
212};
213
214enum OpalErrinjectFunc {
215 /* IOA bus specific errors */
216 OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR = 0,
217 OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA = 1,
218 OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR = 2,
219 OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA = 3,
220 OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR = 4,
221 OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA = 5,
222 OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR = 6,
223 OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA = 7,
224 OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR = 8,
225 OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA = 9,
226 OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR = 10,
227 OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA = 11,
228 OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR = 12,
229 OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA = 13,
230 OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER = 14,
231 OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET = 15,
232 OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR = 16,
233 OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA = 17,
234 OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER = 18,
235 OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET = 19,
236};
237
238enum OpalMmioWindowType {
239 OPAL_M32_WINDOW_TYPE = 1,
240 OPAL_M64_WINDOW_TYPE = 2,
241 OPAL_IO_WINDOW_TYPE = 3
242};
243
244enum OpalExceptionHandler {
245 OPAL_MACHINE_CHECK_HANDLER = 1,
246 OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
247 OPAL_SOFTPATCH_HANDLER = 3
248};
249
250enum OpalPendingState {
251 OPAL_EVENT_OPAL_INTERNAL = 0x1,
252 OPAL_EVENT_NVRAM = 0x2,
253 OPAL_EVENT_RTC = 0x4,
254 OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
255 OPAL_EVENT_CONSOLE_INPUT = 0x10,
256 OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
257 OPAL_EVENT_ERROR_LOG = 0x40,
258 OPAL_EVENT_EPOW = 0x80,
259 OPAL_EVENT_LED_STATUS = 0x100,
260 OPAL_EVENT_PCI_ERROR = 0x200,
261 OPAL_EVENT_DUMP_AVAIL = 0x400,
262 OPAL_EVENT_MSG_PENDING = 0x800,
263};
264
265enum OpalThreadStatus {
266 OPAL_THREAD_INACTIVE = 0x0,
267 OPAL_THREAD_STARTED = 0x1,
268 OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
269};
270
271enum OpalPciBusCompare {
272 OpalPciBusAny = 0, /* Any bus number match */
273 OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */
274 OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */
275 OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */
276 OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */
277 OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */
278 OpalPciBusAll = 7, /* Match bus number exactly */
279};
280
281enum OpalDeviceCompare {
282 OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
283 OPAL_COMPARE_RID_DEVICE_NUMBER = 1
284};
285
286enum OpalFuncCompare {
287 OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
288 OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
289};
290
291enum OpalPeAction {
292 OPAL_UNMAP_PE = 0,
293 OPAL_MAP_PE = 1
294};
295
296enum OpalPeltvAction {
297 OPAL_REMOVE_PE_FROM_DOMAIN = 0,
298 OPAL_ADD_PE_TO_DOMAIN = 1
299};
300
301enum OpalMveEnableAction {
302 OPAL_DISABLE_MVE = 0,
303 OPAL_ENABLE_MVE = 1
304};
305
306enum OpalM64Action {
307 OPAL_DISABLE_M64 = 0,
308 OPAL_ENABLE_M64_SPLIT = 1,
309 OPAL_ENABLE_M64_NON_SPLIT = 2
310};
311
312enum OpalPciResetScope {
313 OPAL_RESET_PHB_COMPLETE = 1,
314 OPAL_RESET_PCI_LINK = 2,
315 OPAL_RESET_PHB_ERROR = 3,
316 OPAL_RESET_PCI_HOT = 4,
317 OPAL_RESET_PCI_FUNDAMENTAL = 5,
318 OPAL_RESET_PCI_IODA_TABLE = 6
319};
320
321enum OpalPciReinitScope {
322 /*
323 * Note: we chose values that do not overlap
324 * OpalPciResetScope as OPAL v2 used the same
325 * enum for both
326 */
327 OPAL_REINIT_PCI_DEV = 1000
328};
329
330enum OpalPciResetState {
331 OPAL_DEASSERT_RESET = 0,
332 OPAL_ASSERT_RESET = 1
333};
334
335/*
336 * Address cycle types for LPC accesses. These also correspond
337 * to the content of the first cell of the "reg" property for
338 * device nodes on the LPC bus
339 */
340enum OpalLPCAddressType {
341 OPAL_LPC_MEM = 0,
342 OPAL_LPC_IO = 1,
343 OPAL_LPC_FW = 2,
344};
345
346enum opal_msg_type {
347 OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc,
348 * additional params function-specific
349 */
350 OPAL_MSG_MEM_ERR,
351 OPAL_MSG_EPOW,
352 OPAL_MSG_SHUTDOWN, /* params[0] = 1 reboot, 0 shutdown */
353 OPAL_MSG_HMI_EVT,
354 OPAL_MSG_DPO,
355 OPAL_MSG_TYPE_MAX,
356};
357
358struct opal_msg {
359 __be32 msg_type;
360 __be32 reserved;
361 __be64 params[8];
362};
363
364/* System parameter permission */
365enum OpalSysparamPerm {
366 OPAL_SYSPARAM_READ = 0x1,
367 OPAL_SYSPARAM_WRITE = 0x2,
368 OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
369};
370
371enum {
372 OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1,
373};
374
375struct opal_ipmi_msg {
376 uint8_t version;
377 uint8_t netfn;
378 uint8_t cmd;
379 uint8_t data[];
380};
381
382/* FSP memory errors handling */
383enum OpalMemErr_Version {
384 OpalMemErr_V1 = 1,
385};
386
387enum OpalMemErrType {
388 OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
389 OPAL_MEM_ERR_TYPE_DYN_DALLOC,
390};
391
392/* Memory Reilience error type */
393enum OpalMemErr_ResilErrType {
394 OPAL_MEM_RESILIENCE_CE = 0,
395 OPAL_MEM_RESILIENCE_UE,
396 OPAL_MEM_RESILIENCE_UE_SCRUB,
397};
398
399/* Dynamic Memory Deallocation type */
400enum OpalMemErr_DynErrType {
401 OPAL_MEM_DYNAMIC_DEALLOC = 0,
402};
403
404struct OpalMemoryErrorData {
405 enum OpalMemErr_Version version:8; /* 0x00 */
406 enum OpalMemErrType type:8; /* 0x01 */
407 __be16 flags; /* 0x02 */
408 uint8_t reserved_1[4]; /* 0x04 */
409
410 union {
411 /* Memory Resilience corrected/uncorrected error info */
412 struct {
413 enum OpalMemErr_ResilErrType resil_err_type:8;
414 uint8_t reserved_1[7];
415 __be64 physical_address_start;
416 __be64 physical_address_end;
417 } resilience;
418 /* Dynamic memory deallocation error info */
419 struct {
420 enum OpalMemErr_DynErrType dyn_err_type:8;
421 uint8_t reserved_1[7];
422 __be64 physical_address_start;
423 __be64 physical_address_end;
424 } dyn_dealloc;
425 } u;
426};
427
428/* HMI interrupt event */
429enum OpalHMI_Version {
430 OpalHMIEvt_V1 = 1,
431};
432
433enum OpalHMI_Severity {
434 OpalHMI_SEV_NO_ERROR = 0,
435 OpalHMI_SEV_WARNING = 1,
436 OpalHMI_SEV_ERROR_SYNC = 2,
437 OpalHMI_SEV_FATAL = 3,
438};
439
440enum OpalHMI_Disposition {
441 OpalHMI_DISPOSITION_RECOVERED = 0,
442 OpalHMI_DISPOSITION_NOT_RECOVERED = 1,
443};
444
445enum OpalHMI_ErrType {
446 OpalHMI_ERROR_MALFUNC_ALERT = 0,
447 OpalHMI_ERROR_PROC_RECOV_DONE,
448 OpalHMI_ERROR_PROC_RECOV_DONE_AGAIN,
449 OpalHMI_ERROR_PROC_RECOV_MASKED,
450 OpalHMI_ERROR_TFAC,
451 OpalHMI_ERROR_TFMR_PARITY,
452 OpalHMI_ERROR_HA_OVERFLOW_WARN,
453 OpalHMI_ERROR_XSCOM_FAIL,
454 OpalHMI_ERROR_XSCOM_DONE,
455 OpalHMI_ERROR_SCOM_FIR,
456 OpalHMI_ERROR_DEBUG_TRIG_FIR,
457 OpalHMI_ERROR_HYP_RESOURCE,
458 OpalHMI_ERROR_CAPP_RECOVERY,
459};
460
461struct OpalHMIEvent {
462 uint8_t version; /* 0x00 */
463 uint8_t severity; /* 0x01 */
464 uint8_t type; /* 0x02 */
465 uint8_t disposition; /* 0x03 */
466 uint8_t reserved_1[4]; /* 0x04 */
467
468 __be64 hmer;
469 /* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
470 __be64 tfmr;
471};
472
473enum {
474 OPAL_P7IOC_DIAG_TYPE_NONE = 0,
475 OPAL_P7IOC_DIAG_TYPE_RGC = 1,
476 OPAL_P7IOC_DIAG_TYPE_BI = 2,
477 OPAL_P7IOC_DIAG_TYPE_CI = 3,
478 OPAL_P7IOC_DIAG_TYPE_MISC = 4,
479 OPAL_P7IOC_DIAG_TYPE_I2C = 5,
480 OPAL_P7IOC_DIAG_TYPE_LAST = 6
481};
482
483struct OpalIoP7IOCErrorData {
484 __be16 type;
485
486 /* GEM */
487 __be64 gemXfir;
488 __be64 gemRfir;
489 __be64 gemRirqfir;
490 __be64 gemMask;
491 __be64 gemRwof;
492
493 /* LEM */
494 __be64 lemFir;
495 __be64 lemErrMask;
496 __be64 lemAction0;
497 __be64 lemAction1;
498 __be64 lemWof;
499
500 union {
501 struct OpalIoP7IOCRgcErrorData {
502 __be64 rgcStatus; /* 3E1C10 */
503 __be64 rgcLdcp; /* 3E1C18 */
504 }rgc;
505 struct OpalIoP7IOCBiErrorData {
506 __be64 biLdcp0; /* 3C0100, 3C0118 */
507 __be64 biLdcp1; /* 3C0108, 3C0120 */
508 __be64 biLdcp2; /* 3C0110, 3C0128 */
509 __be64 biFenceStatus; /* 3C0130, 3C0130 */
510
511 uint8_t biDownbound; /* BI Downbound or Upbound */
512 }bi;
513 struct OpalIoP7IOCCiErrorData {
514 __be64 ciPortStatus; /* 3Dn008 */
515 __be64 ciPortLdcp; /* 3Dn010 */
516
517 uint8_t ciPort; /* Index of CI port: 0/1 */
518 }ci;
519 };
520};
521
522/**
523 * This structure defines the overlay which will be used to store PHB error
524 * data upon request.
525 */
526enum {
527 OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
528};
529
530enum {
531 OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
532 OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
533};
534
535enum {
536 OPAL_P7IOC_NUM_PEST_REGS = 128,
537 OPAL_PHB3_NUM_PEST_REGS = 256
538};
539
540struct OpalIoPhbErrorCommon {
541 __be32 version;
542 __be32 ioType;
543 __be32 len;
544};
545
546struct OpalIoP7IOCPhbErrorData {
547 struct OpalIoPhbErrorCommon common;
548
549 __be32 brdgCtl;
550
551 // P7IOC utl regs
552 __be32 portStatusReg;
553 __be32 rootCmplxStatus;
554 __be32 busAgentStatus;
555
556 // P7IOC cfg regs
557 __be32 deviceStatus;
558 __be32 slotStatus;
559 __be32 linkStatus;
560 __be32 devCmdStatus;
561 __be32 devSecStatus;
562
563 // cfg AER regs
564 __be32 rootErrorStatus;
565 __be32 uncorrErrorStatus;
566 __be32 corrErrorStatus;
567 __be32 tlpHdr1;
568 __be32 tlpHdr2;
569 __be32 tlpHdr3;
570 __be32 tlpHdr4;
571 __be32 sourceId;
572
573 __be32 rsv3;
574
575 // Record data about the call to allocate a buffer.
576 __be64 errorClass;
577 __be64 correlator;
578
579 //P7IOC MMIO Error Regs
580 __be64 p7iocPlssr; // n120
581 __be64 p7iocCsr; // n110
582 __be64 lemFir; // nC00
583 __be64 lemErrorMask; // nC18
584 __be64 lemWOF; // nC40
585 __be64 phbErrorStatus; // nC80
586 __be64 phbFirstErrorStatus; // nC88
587 __be64 phbErrorLog0; // nCC0
588 __be64 phbErrorLog1; // nCC8
589 __be64 mmioErrorStatus; // nD00
590 __be64 mmioFirstErrorStatus; // nD08
591 __be64 mmioErrorLog0; // nD40
592 __be64 mmioErrorLog1; // nD48
593 __be64 dma0ErrorStatus; // nD80
594 __be64 dma0FirstErrorStatus; // nD88
595 __be64 dma0ErrorLog0; // nDC0
596 __be64 dma0ErrorLog1; // nDC8
597 __be64 dma1ErrorStatus; // nE00
598 __be64 dma1FirstErrorStatus; // nE08
599 __be64 dma1ErrorLog0; // nE40
600 __be64 dma1ErrorLog1; // nE48
601 __be64 pestA[OPAL_P7IOC_NUM_PEST_REGS];
602 __be64 pestB[OPAL_P7IOC_NUM_PEST_REGS];
603};
604
605struct OpalIoPhb3ErrorData {
606 struct OpalIoPhbErrorCommon common;
607
608 __be32 brdgCtl;
609
610 /* PHB3 UTL regs */
611 __be32 portStatusReg;
612 __be32 rootCmplxStatus;
613 __be32 busAgentStatus;
614
615 /* PHB3 cfg regs */
616 __be32 deviceStatus;
617 __be32 slotStatus;
618 __be32 linkStatus;
619 __be32 devCmdStatus;
620 __be32 devSecStatus;
621
622 /* cfg AER regs */
623 __be32 rootErrorStatus;
624 __be32 uncorrErrorStatus;
625 __be32 corrErrorStatus;
626 __be32 tlpHdr1;
627 __be32 tlpHdr2;
628 __be32 tlpHdr3;
629 __be32 tlpHdr4;
630 __be32 sourceId;
631
632 __be32 rsv3;
633
634 /* Record data about the call to allocate a buffer */
635 __be64 errorClass;
636 __be64 correlator;
637
638 /* PHB3 MMIO Error Regs */
639 __be64 nFir; /* 000 */
640 __be64 nFirMask; /* 003 */
641 __be64 nFirWOF; /* 008 */
642 __be64 phbPlssr; /* 120 */
643 __be64 phbCsr; /* 110 */
644 __be64 lemFir; /* C00 */
645 __be64 lemErrorMask; /* C18 */
646 __be64 lemWOF; /* C40 */
647 __be64 phbErrorStatus; /* C80 */
648 __be64 phbFirstErrorStatus; /* C88 */
649 __be64 phbErrorLog0; /* CC0 */
650 __be64 phbErrorLog1; /* CC8 */
651 __be64 mmioErrorStatus; /* D00 */
652 __be64 mmioFirstErrorStatus; /* D08 */
653 __be64 mmioErrorLog0; /* D40 */
654 __be64 mmioErrorLog1; /* D48 */
655 __be64 dma0ErrorStatus; /* D80 */
656 __be64 dma0FirstErrorStatus; /* D88 */
657 __be64 dma0ErrorLog0; /* DC0 */
658 __be64 dma0ErrorLog1; /* DC8 */
659 __be64 dma1ErrorStatus; /* E00 */
660 __be64 dma1FirstErrorStatus; /* E08 */
661 __be64 dma1ErrorLog0; /* E40 */
662 __be64 dma1ErrorLog1; /* E48 */
663 __be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
664 __be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
665};
666
667enum {
668 OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
669 OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
670};
671
672typedef struct oppanel_line {
673 __be64 line;
674 __be64 line_len;
675} oppanel_line_t;
676
677/*
678 * SG entries
679 *
680 * WARNING: The current implementation requires each entry
681 * to represent a block that is 4k aligned *and* each block
682 * size except the last one in the list to be as well.
683 */
684struct opal_sg_entry {
685 __be64 data;
686 __be64 length;
687};
688
689/*
690 * Candiate image SG list.
691 *
692 * length = VER | length
693 */
694struct opal_sg_list {
695 __be64 length;
696 __be64 next;
697 struct opal_sg_entry entry[];
698};
699
700/*
701 * Dump region ID range usable by the OS
702 */
703#define OPAL_DUMP_REGION_HOST_START 0x80
704#define OPAL_DUMP_REGION_LOG_BUF 0x80
705#define OPAL_DUMP_REGION_HOST_END 0xFF
706
707/* CAPI modes for PHB */
708enum {
709 OPAL_PHB_CAPI_MODE_PCIE = 0,
710 OPAL_PHB_CAPI_MODE_CAPI = 1,
711 OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
712 OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
713};
714
715/* OPAL I2C request */
716struct opal_i2c_request {
717 uint8_t type;
718#define OPAL_I2C_RAW_READ 0
719#define OPAL_I2C_RAW_WRITE 1
720#define OPAL_I2C_SM_READ 2
721#define OPAL_I2C_SM_WRITE 3
722 uint8_t flags;
723#define OPAL_I2C_ADDR_10 0x01 /* Not supported yet */
724 uint8_t subaddr_sz; /* Max 4 */
725 uint8_t reserved;
726 __be16 addr; /* 7 or 10 bit address */
727 __be16 reserved2;
728 __be32 subaddr; /* Sub-address if any */
729 __be32 size; /* Data size */
730 __be64 buffer_ra; /* Buffer real address */
731};
732
733#endif /* __ASSEMBLY__ */
734
735#endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9ee0a30a02ce..042af1abfc4d 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -9,755 +9,17 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef __OPAL_H 12#ifndef _ASM_POWERPC_OPAL_H
13#define __OPAL_H 13#define _ASM_POWERPC_OPAL_H
14 14
15#ifndef __ASSEMBLY__ 15#include <asm/opal-api.h>
16/*
17 * SG entry
18 *
19 * WARNING: The current implementation requires each entry
20 * to represent a block that is 4k aligned *and* each block
21 * size except the last one in the list to be as well.
22 */
23struct opal_sg_entry {
24 __be64 data;
25 __be64 length;
26};
27
28/* SG list */
29struct opal_sg_list {
30 __be64 length;
31 __be64 next;
32 struct opal_sg_entry entry[];
33};
34
35/* We calculate number of sg entries based on PAGE_SIZE */
36#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
37
38#endif /* __ASSEMBLY__ */
39
40/****** OPAL APIs ******/
41
42/* Return codes */
43#define OPAL_SUCCESS 0
44#define OPAL_PARAMETER -1
45#define OPAL_BUSY -2
46#define OPAL_PARTIAL -3
47#define OPAL_CONSTRAINED -4
48#define OPAL_CLOSED -5
49#define OPAL_HARDWARE -6
50#define OPAL_UNSUPPORTED -7
51#define OPAL_PERMISSION -8
52#define OPAL_NO_MEM -9
53#define OPAL_RESOURCE -10
54#define OPAL_INTERNAL_ERROR -11
55#define OPAL_BUSY_EVENT -12
56#define OPAL_HARDWARE_FROZEN -13
57#define OPAL_WRONG_STATE -14
58#define OPAL_ASYNC_COMPLETION -15
59#define OPAL_I2C_TIMEOUT -17
60#define OPAL_I2C_INVALID_CMD -18
61#define OPAL_I2C_LBUS_PARITY -19
62#define OPAL_I2C_BKEND_OVERRUN -20
63#define OPAL_I2C_BKEND_ACCESS -21
64#define OPAL_I2C_ARBT_LOST -22
65#define OPAL_I2C_NACK_RCVD -23
66#define OPAL_I2C_STOP_ERR -24
67
68/* API Tokens (in r0) */
69#define OPAL_INVALID_CALL -1
70#define OPAL_CONSOLE_WRITE 1
71#define OPAL_CONSOLE_READ 2
72#define OPAL_RTC_READ 3
73#define OPAL_RTC_WRITE 4
74#define OPAL_CEC_POWER_DOWN 5
75#define OPAL_CEC_REBOOT 6
76#define OPAL_READ_NVRAM 7
77#define OPAL_WRITE_NVRAM 8
78#define OPAL_HANDLE_INTERRUPT 9
79#define OPAL_POLL_EVENTS 10
80#define OPAL_PCI_SET_HUB_TCE_MEMORY 11
81#define OPAL_PCI_SET_PHB_TCE_MEMORY 12
82#define OPAL_PCI_CONFIG_READ_BYTE 13
83#define OPAL_PCI_CONFIG_READ_HALF_WORD 14
84#define OPAL_PCI_CONFIG_READ_WORD 15
85#define OPAL_PCI_CONFIG_WRITE_BYTE 16
86#define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17
87#define OPAL_PCI_CONFIG_WRITE_WORD 18
88#define OPAL_SET_XIVE 19
89#define OPAL_GET_XIVE 20
90#define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */
91#define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22
92#define OPAL_PCI_EEH_FREEZE_STATUS 23
93#define OPAL_PCI_SHPC 24
94#define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25
95#define OPAL_PCI_EEH_FREEZE_CLEAR 26
96#define OPAL_PCI_PHB_MMIO_ENABLE 27
97#define OPAL_PCI_SET_PHB_MEM_WINDOW 28
98#define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
99#define OPAL_PCI_SET_PHB_TABLE_MEMORY 30
100#define OPAL_PCI_SET_PE 31
101#define OPAL_PCI_SET_PELTV 32
102#define OPAL_PCI_SET_MVE 33
103#define OPAL_PCI_SET_MVE_ENABLE 34
104#define OPAL_PCI_GET_XIVE_REISSUE 35
105#define OPAL_PCI_SET_XIVE_REISSUE 36
106#define OPAL_PCI_SET_XIVE_PE 37
107#define OPAL_GET_XIVE_SOURCE 38
108#define OPAL_GET_MSI_32 39
109#define OPAL_GET_MSI_64 40
110#define OPAL_START_CPU 41
111#define OPAL_QUERY_CPU_STATUS 42
112#define OPAL_WRITE_OPPANEL 43
113#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
114#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
115#define OPAL_PCI_RESET 49
116#define OPAL_PCI_GET_HUB_DIAG_DATA 50
117#define OPAL_PCI_GET_PHB_DIAG_DATA 51
118#define OPAL_PCI_FENCE_PHB 52
119#define OPAL_PCI_REINIT 53
120#define OPAL_PCI_MASK_PE_ERROR 54
121#define OPAL_SET_SLOT_LED_STATUS 55
122#define OPAL_GET_EPOW_STATUS 56
123#define OPAL_SET_SYSTEM_ATTENTION_LED 57
124#define OPAL_RESERVED1 58
125#define OPAL_RESERVED2 59
126#define OPAL_PCI_NEXT_ERROR 60
127#define OPAL_PCI_EEH_FREEZE_STATUS2 61
128#define OPAL_PCI_POLL 62
129#define OPAL_PCI_MSI_EOI 63
130#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
131#define OPAL_XSCOM_READ 65
132#define OPAL_XSCOM_WRITE 66
133#define OPAL_LPC_READ 67
134#define OPAL_LPC_WRITE 68
135#define OPAL_RETURN_CPU 69
136#define OPAL_REINIT_CPUS 70
137#define OPAL_ELOG_READ 71
138#define OPAL_ELOG_WRITE 72
139#define OPAL_ELOG_ACK 73
140#define OPAL_ELOG_RESEND 74
141#define OPAL_ELOG_SIZE 75
142#define OPAL_FLASH_VALIDATE 76
143#define OPAL_FLASH_MANAGE 77
144#define OPAL_FLASH_UPDATE 78
145#define OPAL_RESYNC_TIMEBASE 79
146#define OPAL_CHECK_TOKEN 80
147#define OPAL_DUMP_INIT 81
148#define OPAL_DUMP_INFO 82
149#define OPAL_DUMP_READ 83
150#define OPAL_DUMP_ACK 84
151#define OPAL_GET_MSG 85
152#define OPAL_CHECK_ASYNC_COMPLETION 86
153#define OPAL_SYNC_HOST_REBOOT 87
154#define OPAL_SENSOR_READ 88
155#define OPAL_GET_PARAM 89
156#define OPAL_SET_PARAM 90
157#define OPAL_DUMP_RESEND 91
158#define OPAL_PCI_SET_PHB_CXL_MODE 93
159#define OPAL_DUMP_INFO2 94
160#define OPAL_PCI_ERR_INJECT 96
161#define OPAL_PCI_EEH_FREEZE_SET 97
162#define OPAL_HANDLE_HMI 98
163#define OPAL_CONFIG_CPU_IDLE_STATE 99
164#define OPAL_SLW_SET_REG 100
165#define OPAL_REGISTER_DUMP_REGION 101
166#define OPAL_UNREGISTER_DUMP_REGION 102
167#define OPAL_WRITE_TPO 103
168#define OPAL_READ_TPO 104
169#define OPAL_IPMI_SEND 107
170#define OPAL_IPMI_RECV 108
171#define OPAL_I2C_REQUEST 109
172
173/* Device tree flags */
174
175/* Flags set in power-mgmt nodes in device tree if
176 * respective idle states are supported in the platform.
177 */
178#define OPAL_PM_NAP_ENABLED 0x00010000
179#define OPAL_PM_SLEEP_ENABLED 0x00020000
180#define OPAL_PM_WINKLE_ENABLED 0x00040000
181#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000
182 16
183#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
184 18
185#include <linux/notifier.h> 19#include <linux/notifier.h>
186 20
187/* Other enums */ 21/* We calculate number of sg entries based on PAGE_SIZE */
188enum OpalVendorApiTokens { 22#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
189 OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999
190};
191
192enum OpalFreezeState {
193 OPAL_EEH_STOPPED_NOT_FROZEN = 0,
194 OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
195 OPAL_EEH_STOPPED_DMA_FREEZE = 2,
196 OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
197 OPAL_EEH_STOPPED_RESET = 4,
198 OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
199 OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
200};
201
202enum OpalEehFreezeActionToken {
203 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
204 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
205 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3,
206
207 OPAL_EEH_ACTION_SET_FREEZE_MMIO = 1,
208 OPAL_EEH_ACTION_SET_FREEZE_DMA = 2,
209 OPAL_EEH_ACTION_SET_FREEZE_ALL = 3
210};
211
212enum OpalPciStatusToken {
213 OPAL_EEH_NO_ERROR = 0,
214 OPAL_EEH_IOC_ERROR = 1,
215 OPAL_EEH_PHB_ERROR = 2,
216 OPAL_EEH_PE_ERROR = 3,
217 OPAL_EEH_PE_MMIO_ERROR = 4,
218 OPAL_EEH_PE_DMA_ERROR = 5
219};
220
221enum OpalPciErrorSeverity {
222 OPAL_EEH_SEV_NO_ERROR = 0,
223 OPAL_EEH_SEV_IOC_DEAD = 1,
224 OPAL_EEH_SEV_PHB_DEAD = 2,
225 OPAL_EEH_SEV_PHB_FENCED = 3,
226 OPAL_EEH_SEV_PE_ER = 4,
227 OPAL_EEH_SEV_INF = 5
228};
229
230enum OpalErrinjectType {
231 OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR = 0,
232 OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64 = 1,
233};
234
235enum OpalErrinjectFunc {
236 /* IOA bus specific errors */
237 OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR = 0,
238 OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA = 1,
239 OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR = 2,
240 OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA = 3,
241 OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR = 4,
242 OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA = 5,
243 OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR = 6,
244 OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA = 7,
245 OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR = 8,
246 OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA = 9,
247 OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR = 10,
248 OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA = 11,
249 OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR = 12,
250 OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA = 13,
251 OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER = 14,
252 OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET = 15,
253 OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR = 16,
254 OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA = 17,
255 OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER = 18,
256 OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET = 19,
257};
258
259enum OpalShpcAction {
260 OPAL_SHPC_GET_LINK_STATE = 0,
261 OPAL_SHPC_GET_SLOT_STATE = 1
262};
263
264enum OpalShpcLinkState {
265 OPAL_SHPC_LINK_DOWN = 0,
266 OPAL_SHPC_LINK_UP = 1
267};
268
269enum OpalMmioWindowType {
270 OPAL_M32_WINDOW_TYPE = 1,
271 OPAL_M64_WINDOW_TYPE = 2,
272 OPAL_IO_WINDOW_TYPE = 3
273};
274
275enum OpalShpcSlotState {
276 OPAL_SHPC_DEV_NOT_PRESENT = 0,
277 OPAL_SHPC_DEV_PRESENT = 1
278};
279
280enum OpalExceptionHandler {
281 OPAL_MACHINE_CHECK_HANDLER = 1,
282 OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
283 OPAL_SOFTPATCH_HANDLER = 3
284};
285
286enum OpalPendingState {
287 OPAL_EVENT_OPAL_INTERNAL = 0x1,
288 OPAL_EVENT_NVRAM = 0x2,
289 OPAL_EVENT_RTC = 0x4,
290 OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
291 OPAL_EVENT_CONSOLE_INPUT = 0x10,
292 OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
293 OPAL_EVENT_ERROR_LOG = 0x40,
294 OPAL_EVENT_EPOW = 0x80,
295 OPAL_EVENT_LED_STATUS = 0x100,
296 OPAL_EVENT_PCI_ERROR = 0x200,
297 OPAL_EVENT_DUMP_AVAIL = 0x400,
298 OPAL_EVENT_MSG_PENDING = 0x800,
299};
300
301enum OpalMessageType {
302 OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc,
303 * additional params function-specific
304 */
305 OPAL_MSG_MEM_ERR,
306 OPAL_MSG_EPOW,
307 OPAL_MSG_SHUTDOWN, /* params[0] = 1 reboot, 0 shutdown */
308 OPAL_MSG_HMI_EVT,
309 OPAL_MSG_TYPE_MAX,
310};
311
312enum OpalThreadStatus {
313 OPAL_THREAD_INACTIVE = 0x0,
314 OPAL_THREAD_STARTED = 0x1,
315 OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
316};
317
318enum OpalPciBusCompare {
319 OpalPciBusAny = 0, /* Any bus number match */
320 OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */
321 OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */
322 OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */
323 OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */
324 OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */
325 OpalPciBusAll = 7, /* Match bus number exactly */
326};
327
328enum OpalDeviceCompare {
329 OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
330 OPAL_COMPARE_RID_DEVICE_NUMBER = 1
331};
332
333enum OpalFuncCompare {
334 OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
335 OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
336};
337
338enum OpalPeAction {
339 OPAL_UNMAP_PE = 0,
340 OPAL_MAP_PE = 1
341};
342
343enum OpalPeltvAction {
344 OPAL_REMOVE_PE_FROM_DOMAIN = 0,
345 OPAL_ADD_PE_TO_DOMAIN = 1
346};
347
348enum OpalMveEnableAction {
349 OPAL_DISABLE_MVE = 0,
350 OPAL_ENABLE_MVE = 1
351};
352
353enum OpalM64EnableAction {
354 OPAL_DISABLE_M64 = 0,
355 OPAL_ENABLE_M64_SPLIT = 1,
356 OPAL_ENABLE_M64_NON_SPLIT = 2
357};
358
359enum OpalPciResetScope {
360 OPAL_RESET_PHB_COMPLETE = 1,
361 OPAL_RESET_PCI_LINK = 2,
362 OPAL_RESET_PHB_ERROR = 3,
363 OPAL_RESET_PCI_HOT = 4,
364 OPAL_RESET_PCI_FUNDAMENTAL = 5,
365 OPAL_RESET_PCI_IODA_TABLE = 6
366};
367
368enum OpalPciReinitScope {
369 OPAL_REINIT_PCI_DEV = 1000
370};
371
372enum OpalPciResetState {
373 OPAL_DEASSERT_RESET = 0,
374 OPAL_ASSERT_RESET = 1
375};
376
377enum OpalPciMaskAction {
378 OPAL_UNMASK_ERROR_TYPE = 0,
379 OPAL_MASK_ERROR_TYPE = 1
380};
381
382enum OpalSlotLedType {
383 OPAL_SLOT_LED_ID_TYPE = 0,
384 OPAL_SLOT_LED_FAULT_TYPE = 1
385};
386
387enum OpalLedAction {
388 OPAL_TURN_OFF_LED = 0,
389 OPAL_TURN_ON_LED = 1,
390 OPAL_QUERY_LED_STATE_AFTER_BUSY = 2
391};
392
393enum OpalEpowStatus {
394 OPAL_EPOW_NONE = 0,
395 OPAL_EPOW_UPS = 1,
396 OPAL_EPOW_OVER_AMBIENT_TEMP = 2,
397 OPAL_EPOW_OVER_INTERNAL_TEMP = 3
398};
399
400/*
401 * Address cycle types for LPC accesses. These also correspond
402 * to the content of the first cell of the "reg" property for
403 * device nodes on the LPC bus
404 */
405enum OpalLPCAddressType {
406 OPAL_LPC_MEM = 0,
407 OPAL_LPC_IO = 1,
408 OPAL_LPC_FW = 2,
409};
410
411/* System parameter permission */
412enum OpalSysparamPerm {
413 OPAL_SYSPARAM_READ = 0x1,
414 OPAL_SYSPARAM_WRITE = 0x2,
415 OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
416};
417
418struct opal_msg {
419 __be32 msg_type;
420 __be32 reserved;
421 __be64 params[8];
422};
423
424enum {
425 OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1,
426};
427
428struct opal_ipmi_msg {
429 uint8_t version;
430 uint8_t netfn;
431 uint8_t cmd;
432 uint8_t data[];
433};
434
435/* FSP memory errors handling */
436enum OpalMemErr_Version {
437 OpalMemErr_V1 = 1,
438};
439
440enum OpalMemErrType {
441 OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
442 OPAL_MEM_ERR_TYPE_DYN_DALLOC,
443 OPAL_MEM_ERR_TYPE_SCRUB,
444};
445
446/* Memory Reilience error type */
447enum OpalMemErr_ResilErrType {
448 OPAL_MEM_RESILIENCE_CE = 0,
449 OPAL_MEM_RESILIENCE_UE,
450 OPAL_MEM_RESILIENCE_UE_SCRUB,
451};
452
453/* Dynamic Memory Deallocation type */
454enum OpalMemErr_DynErrType {
455 OPAL_MEM_DYNAMIC_DEALLOC = 0,
456};
457
458/* OpalMemoryErrorData->flags */
459#define OPAL_MEM_CORRECTED_ERROR 0x0001
460#define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002
461#define OPAL_MEM_ACK_REQUIRED 0x8000
462
463struct OpalMemoryErrorData {
464 enum OpalMemErr_Version version:8; /* 0x00 */
465 enum OpalMemErrType type:8; /* 0x01 */
466 __be16 flags; /* 0x02 */
467 uint8_t reserved_1[4]; /* 0x04 */
468
469 union {
470 /* Memory Resilience corrected/uncorrected error info */
471 struct {
472 enum OpalMemErr_ResilErrType resil_err_type:8;
473 uint8_t reserved_1[7];
474 __be64 physical_address_start;
475 __be64 physical_address_end;
476 } resilience;
477 /* Dynamic memory deallocation error info */
478 struct {
479 enum OpalMemErr_DynErrType dyn_err_type:8;
480 uint8_t reserved_1[7];
481 __be64 physical_address_start;
482 __be64 physical_address_end;
483 } dyn_dealloc;
484 } u;
485};
486
487/* HMI interrupt event */
488enum OpalHMI_Version {
489 OpalHMIEvt_V1 = 1,
490};
491
492enum OpalHMI_Severity {
493 OpalHMI_SEV_NO_ERROR = 0,
494 OpalHMI_SEV_WARNING = 1,
495 OpalHMI_SEV_ERROR_SYNC = 2,
496 OpalHMI_SEV_FATAL = 3,
497};
498
499enum OpalHMI_Disposition {
500 OpalHMI_DISPOSITION_RECOVERED = 0,
501 OpalHMI_DISPOSITION_NOT_RECOVERED = 1,
502};
503
504enum OpalHMI_ErrType {
505 OpalHMI_ERROR_MALFUNC_ALERT = 0,
506 OpalHMI_ERROR_PROC_RECOV_DONE,
507 OpalHMI_ERROR_PROC_RECOV_DONE_AGAIN,
508 OpalHMI_ERROR_PROC_RECOV_MASKED,
509 OpalHMI_ERROR_TFAC,
510 OpalHMI_ERROR_TFMR_PARITY,
511 OpalHMI_ERROR_HA_OVERFLOW_WARN,
512 OpalHMI_ERROR_XSCOM_FAIL,
513 OpalHMI_ERROR_XSCOM_DONE,
514 OpalHMI_ERROR_SCOM_FIR,
515 OpalHMI_ERROR_DEBUG_TRIG_FIR,
516 OpalHMI_ERROR_HYP_RESOURCE,
517};
518
519struct OpalHMIEvent {
520 uint8_t version; /* 0x00 */
521 uint8_t severity; /* 0x01 */
522 uint8_t type; /* 0x02 */
523 uint8_t disposition; /* 0x03 */
524 uint8_t reserved_1[4]; /* 0x04 */
525
526 __be64 hmer;
527 /* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
528 __be64 tfmr;
529};
530
531enum {
532 OPAL_P7IOC_DIAG_TYPE_NONE = 0,
533 OPAL_P7IOC_DIAG_TYPE_RGC = 1,
534 OPAL_P7IOC_DIAG_TYPE_BI = 2,
535 OPAL_P7IOC_DIAG_TYPE_CI = 3,
536 OPAL_P7IOC_DIAG_TYPE_MISC = 4,
537 OPAL_P7IOC_DIAG_TYPE_I2C = 5,
538 OPAL_P7IOC_DIAG_TYPE_LAST = 6
539};
540
541struct OpalIoP7IOCErrorData {
542 __be16 type;
543
544 /* GEM */
545 __be64 gemXfir;
546 __be64 gemRfir;
547 __be64 gemRirqfir;
548 __be64 gemMask;
549 __be64 gemRwof;
550
551 /* LEM */
552 __be64 lemFir;
553 __be64 lemErrMask;
554 __be64 lemAction0;
555 __be64 lemAction1;
556 __be64 lemWof;
557
558 union {
559 struct OpalIoP7IOCRgcErrorData {
560 __be64 rgcStatus; /* 3E1C10 */
561 __be64 rgcLdcp; /* 3E1C18 */
562 }rgc;
563 struct OpalIoP7IOCBiErrorData {
564 __be64 biLdcp0; /* 3C0100, 3C0118 */
565 __be64 biLdcp1; /* 3C0108, 3C0120 */
566 __be64 biLdcp2; /* 3C0110, 3C0128 */
567 __be64 biFenceStatus; /* 3C0130, 3C0130 */
568
569 u8 biDownbound; /* BI Downbound or Upbound */
570 }bi;
571 struct OpalIoP7IOCCiErrorData {
572 __be64 ciPortStatus; /* 3Dn008 */
573 __be64 ciPortLdcp; /* 3Dn010 */
574
575 u8 ciPort; /* Index of CI port: 0/1 */
576 }ci;
577 };
578};
579
580/**
581 * This structure defines the overlay which will be used to store PHB error
582 * data upon request.
583 */
584enum {
585 OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
586};
587
588enum {
589 OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
590 OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
591};
592
593enum {
594 OPAL_P7IOC_NUM_PEST_REGS = 128,
595 OPAL_PHB3_NUM_PEST_REGS = 256
596};
597
598/* CAPI modes for PHB */
599enum {
600 OPAL_PHB_CAPI_MODE_PCIE = 0,
601 OPAL_PHB_CAPI_MODE_CAPI = 1,
602 OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
603 OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
604};
605
606struct OpalIoPhbErrorCommon {
607 __be32 version;
608 __be32 ioType;
609 __be32 len;
610};
611
612struct OpalIoP7IOCPhbErrorData {
613 struct OpalIoPhbErrorCommon common;
614
615 __be32 brdgCtl;
616
617 // P7IOC utl regs
618 __be32 portStatusReg;
619 __be32 rootCmplxStatus;
620 __be32 busAgentStatus;
621
622 // P7IOC cfg regs
623 __be32 deviceStatus;
624 __be32 slotStatus;
625 __be32 linkStatus;
626 __be32 devCmdStatus;
627 __be32 devSecStatus;
628
629 // cfg AER regs
630 __be32 rootErrorStatus;
631 __be32 uncorrErrorStatus;
632 __be32 corrErrorStatus;
633 __be32 tlpHdr1;
634 __be32 tlpHdr2;
635 __be32 tlpHdr3;
636 __be32 tlpHdr4;
637 __be32 sourceId;
638
639 __be32 rsv3;
640
641 // Record data about the call to allocate a buffer.
642 __be64 errorClass;
643 __be64 correlator;
644
645 //P7IOC MMIO Error Regs
646 __be64 p7iocPlssr; // n120
647 __be64 p7iocCsr; // n110
648 __be64 lemFir; // nC00
649 __be64 lemErrorMask; // nC18
650 __be64 lemWOF; // nC40
651 __be64 phbErrorStatus; // nC80
652 __be64 phbFirstErrorStatus; // nC88
653 __be64 phbErrorLog0; // nCC0
654 __be64 phbErrorLog1; // nCC8
655 __be64 mmioErrorStatus; // nD00
656 __be64 mmioFirstErrorStatus; // nD08
657 __be64 mmioErrorLog0; // nD40
658 __be64 mmioErrorLog1; // nD48
659 __be64 dma0ErrorStatus; // nD80
660 __be64 dma0FirstErrorStatus; // nD88
661 __be64 dma0ErrorLog0; // nDC0
662 __be64 dma0ErrorLog1; // nDC8
663 __be64 dma1ErrorStatus; // nE00
664 __be64 dma1FirstErrorStatus; // nE08
665 __be64 dma1ErrorLog0; // nE40
666 __be64 dma1ErrorLog1; // nE48
667 __be64 pestA[OPAL_P7IOC_NUM_PEST_REGS];
668 __be64 pestB[OPAL_P7IOC_NUM_PEST_REGS];
669};
670
671struct OpalIoPhb3ErrorData {
672 struct OpalIoPhbErrorCommon common;
673
674 __be32 brdgCtl;
675
676 /* PHB3 UTL regs */
677 __be32 portStatusReg;
678 __be32 rootCmplxStatus;
679 __be32 busAgentStatus;
680
681 /* PHB3 cfg regs */
682 __be32 deviceStatus;
683 __be32 slotStatus;
684 __be32 linkStatus;
685 __be32 devCmdStatus;
686 __be32 devSecStatus;
687
688 /* cfg AER regs */
689 __be32 rootErrorStatus;
690 __be32 uncorrErrorStatus;
691 __be32 corrErrorStatus;
692 __be32 tlpHdr1;
693 __be32 tlpHdr2;
694 __be32 tlpHdr3;
695 __be32 tlpHdr4;
696 __be32 sourceId;
697
698 __be32 rsv3;
699
700 /* Record data about the call to allocate a buffer */
701 __be64 errorClass;
702 __be64 correlator;
703
704 __be64 nFir; /* 000 */
705 __be64 nFirMask; /* 003 */
706 __be64 nFirWOF; /* 008 */
707
708 /* PHB3 MMIO Error Regs */
709 __be64 phbPlssr; /* 120 */
710 __be64 phbCsr; /* 110 */
711 __be64 lemFir; /* C00 */
712 __be64 lemErrorMask; /* C18 */
713 __be64 lemWOF; /* C40 */
714 __be64 phbErrorStatus; /* C80 */
715 __be64 phbFirstErrorStatus; /* C88 */
716 __be64 phbErrorLog0; /* CC0 */
717 __be64 phbErrorLog1; /* CC8 */
718 __be64 mmioErrorStatus; /* D00 */
719 __be64 mmioFirstErrorStatus; /* D08 */
720 __be64 mmioErrorLog0; /* D40 */
721 __be64 mmioErrorLog1; /* D48 */
722 __be64 dma0ErrorStatus; /* D80 */
723 __be64 dma0FirstErrorStatus; /* D88 */
724 __be64 dma0ErrorLog0; /* DC0 */
725 __be64 dma0ErrorLog1; /* DC8 */
726 __be64 dma1ErrorStatus; /* E00 */
727 __be64 dma1FirstErrorStatus; /* E08 */
728 __be64 dma1ErrorLog0; /* E40 */
729 __be64 dma1ErrorLog1; /* E48 */
730 __be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
731 __be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
732};
733
734enum {
735 OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
736 OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
737};
738
739typedef struct oppanel_line {
740 const char * line;
741 uint64_t line_len;
742} oppanel_line_t;
743
744/* OPAL I2C request */
745struct opal_i2c_request {
746 uint8_t type;
747#define OPAL_I2C_RAW_READ 0
748#define OPAL_I2C_RAW_WRITE 1
749#define OPAL_I2C_SM_READ 2
750#define OPAL_I2C_SM_WRITE 3
751 uint8_t flags;
752#define OPAL_I2C_ADDR_10 0x01 /* Not supported yet */
753 uint8_t subaddr_sz; /* Max 4 */
754 uint8_t reserved;
755 __be16 addr; /* 7 or 10 bit address */
756 __be16 reserved2;
757 __be32 subaddr; /* Sub-address if any */
758 __be32 size; /* Data size */
759 __be64 buffer_ra; /* Buffer real address */
760};
761 23
762/* /sys/firmware/opal */ 24/* /sys/firmware/opal */
763extern struct kobject *opal_kobj; 25extern struct kobject *opal_kobj;
@@ -932,6 +194,13 @@ int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
932int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id, 194int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
933 struct opal_i2c_request *oreq); 195 struct opal_i2c_request *oreq);
934 196
197int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,
198 uint64_t size, uint64_t token);
199int64_t opal_flash_write(uint64_t id, uint64_t offset, uint64_t buf,
200 uint64_t size, uint64_t token);
201int64_t opal_flash_erase(uint64_t id, uint64_t offset, uint64_t size,
202 uint64_t token);
203
935/* Internal functions */ 204/* Internal functions */
936extern int early_init_dt_scan_opal(unsigned long node, const char *uname, 205extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
937 int depth, void *data); 206 int depth, void *data);
@@ -946,8 +215,10 @@ extern void hvc_opal_init_early(void);
946extern int opal_notifier_register(struct notifier_block *nb); 215extern int opal_notifier_register(struct notifier_block *nb);
947extern int opal_notifier_unregister(struct notifier_block *nb); 216extern int opal_notifier_unregister(struct notifier_block *nb);
948 217
949extern int opal_message_notifier_register(enum OpalMessageType msg_type, 218extern int opal_message_notifier_register(enum opal_msg_type msg_type,
950 struct notifier_block *nb); 219 struct notifier_block *nb);
220extern int opal_message_notifier_unregister(enum opal_msg_type msg_type,
221 struct notifier_block *nb);
951extern void opal_notifier_enable(void); 222extern void opal_notifier_enable(void);
952extern void opal_notifier_disable(void); 223extern void opal_notifier_disable(void);
953extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); 224extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
@@ -962,7 +233,7 @@ extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
962struct rtc_time; 233struct rtc_time;
963extern unsigned long opal_get_boot_time(void); 234extern unsigned long opal_get_boot_time(void);
964extern void opal_nvram_init(void); 235extern void opal_nvram_init(void);
965extern void opal_flash_init(void); 236extern void opal_flash_update_init(void);
966extern void opal_flash_term_callback(void); 237extern void opal_flash_term_callback(void);
967extern int opal_elog_init(void); 238extern int opal_elog_init(void);
968extern void opal_platform_dump_init(void); 239extern void opal_platform_dump_init(void);
@@ -983,13 +254,8 @@ struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
983 unsigned long vmalloc_size); 254 unsigned long vmalloc_size);
984void opal_free_sg_list(struct opal_sg_list *sg); 255void opal_free_sg_list(struct opal_sg_list *sg);
985 256
986/* 257extern int opal_error_code(int rc);
987 * Dump region ID range usable by the OS
988 */
989#define OPAL_DUMP_REGION_HOST_START 0x80
990#define OPAL_DUMP_REGION_LOG_BUF 0x80
991#define OPAL_DUMP_REGION_HOST_END 0xFF
992 258
993#endif /* __ASSEMBLY__ */ 259#endif /* __ASSEMBLY__ */
994 260
995#endif /* __OPAL_H */ 261#endif /* _ASM_POWERPC_OPAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e5f22c6c4bf9..70bd4381f8e6 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -106,9 +106,9 @@ struct paca_struct {
106#endif /* CONFIG_PPC_STD_MMU_64 */ 106#endif /* CONFIG_PPC_STD_MMU_64 */
107 107
108#ifdef CONFIG_PPC_BOOK3E 108#ifdef CONFIG_PPC_BOOK3E
109 u64 exgen[8] __attribute__((aligned(0x80))); 109 u64 exgen[8] __aligned(0x40);
110 /* Keep pgd in the same cacheline as the start of extlb */ 110 /* Keep pgd in the same cacheline as the start of extlb */
111 pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */ 111 pgd_t *pgd __aligned(0x40); /* Current PGD */
112 pgd_t *kernel_pgd; /* Kernel PGD */ 112 pgd_t *kernel_pgd; /* Kernel PGD */
113 113
114 /* Shared by all threads of a core -- points to tcd of first thread */ 114 /* Shared by all threads of a core -- points to tcd of first thread */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 546d036fe925..1811c44bf34b 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -15,6 +15,24 @@
15struct device_node; 15struct device_node;
16 16
17/* 17/*
18 * PCI controller operations
19 */
20struct pci_controller_ops {
21 void (*dma_dev_setup)(struct pci_dev *dev);
22 void (*dma_bus_setup)(struct pci_bus *bus);
23
24 int (*probe_mode)(struct pci_bus *);
25
26 /* Called when pci_enable_device() is called. Returns true to
27 * allow assignment/enabling of the device. */
28 bool (*enable_device_hook)(struct pci_dev *);
29
30 /* Called during PCI resource reassignment */
31 resource_size_t (*window_alignment)(struct pci_bus *, unsigned long type);
32 void (*reset_secondary_bus)(struct pci_dev *dev);
33};
34
35/*
18 * Structure of a PCI controller (host bridge) 36 * Structure of a PCI controller (host bridge)
19 */ 37 */
20struct pci_controller { 38struct pci_controller {
@@ -46,6 +64,7 @@ struct pci_controller {
46 resource_size_t isa_mem_phys; 64 resource_size_t isa_mem_phys;
47 resource_size_t isa_mem_size; 65 resource_size_t isa_mem_size;
48 66
67 struct pci_controller_ops controller_ops;
49 struct pci_ops *ops; 68 struct pci_ops *ops;
50 unsigned int __iomem *cfg_addr; 69 unsigned int __iomem *cfg_addr;
51 void __iomem *cfg_data; 70 void __iomem *cfg_data;
@@ -89,6 +108,7 @@ struct pci_controller {
89 108
90#ifdef CONFIG_PPC64 109#ifdef CONFIG_PPC64
91 unsigned long buid; 110 unsigned long buid;
111 struct pci_dn *pci_data;
92#endif /* CONFIG_PPC64 */ 112#endif /* CONFIG_PPC64 */
93 113
94 void *private_data; 114 void *private_data;
@@ -154,31 +174,51 @@ static inline int isa_vaddr_is_ioport(void __iomem *address)
154struct iommu_table; 174struct iommu_table;
155 175
156struct pci_dn { 176struct pci_dn {
177 int flags;
178#define PCI_DN_FLAG_IOV_VF 0x01
179
157 int busno; /* pci bus number */ 180 int busno; /* pci bus number */
158 int devfn; /* pci device and function number */ 181 int devfn; /* pci device and function number */
182 int vendor_id; /* Vendor ID */
183 int device_id; /* Device ID */
184 int class_code; /* Device class code */
159 185
186 struct pci_dn *parent;
160 struct pci_controller *phb; /* for pci devices */ 187 struct pci_controller *phb; /* for pci devices */
161 struct iommu_table *iommu_table; /* for phb's or bridges */ 188 struct iommu_table *iommu_table; /* for phb's or bridges */
162 struct device_node *node; /* back-pointer to the device_node */ 189 struct device_node *node; /* back-pointer to the device_node */
163 190
164 int pci_ext_config_space; /* for pci devices */ 191 int pci_ext_config_space; /* for pci devices */
165 192
166 struct pci_dev *pcidev; /* back-pointer to the pci device */
167#ifdef CONFIG_EEH 193#ifdef CONFIG_EEH
168 struct eeh_dev *edev; /* eeh device */ 194 struct eeh_dev *edev; /* eeh device */
169#endif 195#endif
170#define IODA_INVALID_PE (-1) 196#define IODA_INVALID_PE (-1)
171#ifdef CONFIG_PPC_POWERNV 197#ifdef CONFIG_PPC_POWERNV
172 int pe_number; 198 int pe_number;
199#ifdef CONFIG_PCI_IOV
200 u16 vfs_expanded; /* number of VFs IOV BAR expanded */
201 u16 num_vfs; /* number of VFs enabled*/
202 int offset; /* PE# for the first VF PE */
203#define M64_PER_IOV 4
204 int m64_per_iov;
205#define IODA_INVALID_M64 (-1)
206 int m64_wins[PCI_SRIOV_NUM_BARS][M64_PER_IOV];
207#endif /* CONFIG_PCI_IOV */
173#endif 208#endif
209 struct list_head child_list;
210 struct list_head list;
174}; 211};
175 212
176/* Get the pointer to a device_node's pci_dn */ 213/* Get the pointer to a device_node's pci_dn */
177#define PCI_DN(dn) ((struct pci_dn *) (dn)->data) 214#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
178 215
216extern struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
217 int devfn);
179extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev); 218extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
180 219extern struct pci_dn *add_dev_pci_data(struct pci_dev *pdev);
181extern void * update_dn_pci_info(struct device_node *dn, void *data); 220extern void remove_dev_pci_data(struct pci_dev *pdev);
221extern void *update_dn_pci_info(struct device_node *dn, void *data);
182 222
183static inline int pci_device_from_OF_node(struct device_node *np, 223static inline int pci_device_from_OF_node(struct device_node *np,
184 u8 *bus, u8 *devfn) 224 u8 *bus, u8 *devfn)
@@ -191,20 +231,12 @@ static inline int pci_device_from_OF_node(struct device_node *np,
191} 231}
192 232
193#if defined(CONFIG_EEH) 233#if defined(CONFIG_EEH)
194static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn) 234static inline struct eeh_dev *pdn_to_eeh_dev(struct pci_dn *pdn)
195{ 235{
196 /* 236 return pdn ? pdn->edev : NULL;
197 * For those OF nodes whose parent isn't PCI bridge, they
198 * don't have PCI_DN actually. So we have to skip them for
199 * any EEH operations.
200 */
201 if (!dn || !PCI_DN(dn))
202 return NULL;
203
204 return PCI_DN(dn)->edev;
205} 237}
206#else 238#else
207#define of_node_to_eeh_dev(x) (NULL) 239#define pdn_to_eeh_dev(x) (NULL)
208#endif 240#endif
209 241
210/** Find the bus corresponding to the indicated device node */ 242/** Find the bus corresponding to the indicated device node */
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 1b0739bc14b5..4aef8d660999 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -22,7 +22,7 @@
22 22
23#include <asm-generic/pci-dma-compat.h> 23#include <asm-generic/pci-dma-compat.h>
24 24
25/* Return values for ppc_md.pci_probe_mode function */ 25/* Return values for pci_controller_ops.probe_mode function */
26#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */ 26#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
27#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */ 27#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
28#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */ 28#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index db1e2b8eff3c..4122a86d6858 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -23,8 +23,6 @@ extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
23 23
24extern struct list_head hose_list; 24extern struct list_head hose_list;
25 25
26extern void find_and_init_phbs(void);
27
28extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */ 26extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
29 27
30/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */ 28/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
@@ -33,9 +31,14 @@ extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
33 31
34/* PCI device_node operations */ 32/* PCI device_node operations */
35struct device_node; 33struct device_node;
34struct pci_dn;
35
36typedef void *(*traverse_func)(struct device_node *me, void *data); 36typedef void *(*traverse_func)(struct device_node *me, void *data);
37void *traverse_pci_devices(struct device_node *start, traverse_func pre, 37void *traverse_pci_devices(struct device_node *start, traverse_func pre,
38 void *data); 38 void *data);
39void *traverse_pci_dn(struct pci_dn *root,
40 void *(*fn)(struct pci_dn *, void *),
41 void *data);
39 42
40extern void pci_devs_phb_init(void); 43extern void pci_devs_phb_init(void);
41extern void pci_devs_phb_init_dynamic(struct pci_controller *phb); 44extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
@@ -76,7 +79,6 @@ static inline const char *eeh_driver_name(struct pci_dev *pdev)
76#endif /* CONFIG_EEH */ 79#endif /* CONFIG_EEH */
77 80
78#else /* CONFIG_PCI */ 81#else /* CONFIG_PCI */
79static inline void find_and_init_phbs(void) { }
80static inline void init_pci_config_tokens(void) { } 82static inline void init_pci_config_tokens(void) { }
81#endif /* !CONFIG_PCI */ 83#endif /* !CONFIG_PCI */
82 84
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 7e4612528546..dd0fc18d8103 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -637,105 +637,105 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
637 637
638/* AltiVec Registers (VPRs) */ 638/* AltiVec Registers (VPRs) */
639 639
640#define vr0 0 640#define v0 0
641#define vr1 1 641#define v1 1
642#define vr2 2 642#define v2 2
643#define vr3 3 643#define v3 3
644#define vr4 4 644#define v4 4
645#define vr5 5 645#define v5 5
646#define vr6 6 646#define v6 6
647#define vr7 7 647#define v7 7
648#define vr8 8 648#define v8 8
649#define vr9 9 649#define v9 9
650#define vr10 10 650#define v10 10
651#define vr11 11 651#define v11 11
652#define vr12 12 652#define v12 12
653#define vr13 13 653#define v13 13
654#define vr14 14 654#define v14 14
655#define vr15 15 655#define v15 15
656#define vr16 16 656#define v16 16
657#define vr17 17 657#define v17 17
658#define vr18 18 658#define v18 18
659#define vr19 19 659#define v19 19
660#define vr20 20 660#define v20 20
661#define vr21 21 661#define v21 21
662#define vr22 22 662#define v22 22
663#define vr23 23 663#define v23 23
664#define vr24 24 664#define v24 24
665#define vr25 25 665#define v25 25
666#define vr26 26 666#define v26 26
667#define vr27 27 667#define v27 27
668#define vr28 28 668#define v28 28
669#define vr29 29 669#define v29 29
670#define vr30 30 670#define v30 30
671#define vr31 31 671#define v31 31
672 672
673/* VSX Registers (VSRs) */ 673/* VSX Registers (VSRs) */
674 674
675#define vsr0 0 675#define vs0 0
676#define vsr1 1 676#define vs1 1
677#define vsr2 2 677#define vs2 2
678#define vsr3 3 678#define vs3 3
679#define vsr4 4 679#define vs4 4
680#define vsr5 5 680#define vs5 5
681#define vsr6 6 681#define vs6 6
682#define vsr7 7 682#define vs7 7
683#define vsr8 8 683#define vs8 8
684#define vsr9 9 684#define vs9 9
685#define vsr10 10 685#define vs10 10
686#define vsr11 11 686#define vs11 11
687#define vsr12 12 687#define vs12 12
688#define vsr13 13 688#define vs13 13
689#define vsr14 14 689#define vs14 14
690#define vsr15 15 690#define vs15 15
691#define vsr16 16 691#define vs16 16
692#define vsr17 17 692#define vs17 17
693#define vsr18 18 693#define vs18 18
694#define vsr19 19 694#define vs19 19
695#define vsr20 20 695#define vs20 20
696#define vsr21 21 696#define vs21 21
697#define vsr22 22 697#define vs22 22
698#define vsr23 23 698#define vs23 23
699#define vsr24 24 699#define vs24 24
700#define vsr25 25 700#define vs25 25
701#define vsr26 26 701#define vs26 26
702#define vsr27 27 702#define vs27 27
703#define vsr28 28 703#define vs28 28
704#define vsr29 29 704#define vs29 29
705#define vsr30 30 705#define vs30 30
706#define vsr31 31 706#define vs31 31
707#define vsr32 32 707#define vs32 32
708#define vsr33 33 708#define vs33 33
709#define vsr34 34 709#define vs34 34
710#define vsr35 35 710#define vs35 35
711#define vsr36 36 711#define vs36 36
712#define vsr37 37 712#define vs37 37
713#define vsr38 38 713#define vs38 38
714#define vsr39 39 714#define vs39 39
715#define vsr40 40 715#define vs40 40
716#define vsr41 41 716#define vs41 41
717#define vsr42 42 717#define vs42 42
718#define vsr43 43 718#define vs43 43
719#define vsr44 44 719#define vs44 44
720#define vsr45 45 720#define vs45 45
721#define vsr46 46 721#define vs46 46
722#define vsr47 47 722#define vs47 47
723#define vsr48 48 723#define vs48 48
724#define vsr49 49 724#define vs49 49
725#define vsr50 50 725#define vs50 50
726#define vsr51 51 726#define vs51 51
727#define vsr52 52 727#define vs52 52
728#define vsr53 53 728#define vs53 53
729#define vsr54 54 729#define vs54 54
730#define vsr55 55 730#define vs55 55
731#define vsr56 56 731#define vs56 56
732#define vsr57 57 732#define vs57 57
733#define vsr58 58 733#define vs58 58
734#define vsr59 59 734#define vs59 59
735#define vsr60 60 735#define vs60 60
736#define vsr61 61 736#define vs61 61
737#define vsr62 62 737#define vs62 62
738#define vsr63 63 738#define vs63 63
739 739
740/* SPE Registers (EVPRs) */ 740/* SPE Registers (EVPRs) */
741 741
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 2e23e92a4372..7a4ede16b283 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <asm/page.h> 6#include <asm/page.h>
7#include <linux/time.h>
7 8
8/* 9/*
9 * Definitions for talking to the RTAS on CHRP machines. 10 * Definitions for talking to the RTAS on CHRP machines.
@@ -273,6 +274,7 @@ inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log)
273#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I') 274#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
274#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H') 275#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
275#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D') 276#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
277#define PSERIES_ELOG_SECT_ID_HOTPLUG (('H' << 8) | 'P')
276 278
277/* Vendor specific Platform Event Log Format, Version 6, section header */ 279/* Vendor specific Platform Event Log Format, Version 6, section header */
278struct pseries_errorlog { 280struct pseries_errorlog {
@@ -296,6 +298,31 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
296 return be16_to_cpu(sect->length); 298 return be16_to_cpu(sect->length);
297} 299}
298 300
301/* RTAS pseries hotplug errorlog section */
302struct pseries_hp_errorlog {
303 u8 resource;
304 u8 action;
305 u8 id_type;
306 u8 reserved;
307 union {
308 __be32 drc_index;
309 __be32 drc_count;
310 char drc_name[1];
311 } _drc_u;
312};
313
314#define PSERIES_HP_ELOG_RESOURCE_CPU 1
315#define PSERIES_HP_ELOG_RESOURCE_MEM 2
316#define PSERIES_HP_ELOG_RESOURCE_SLOT 3
317#define PSERIES_HP_ELOG_RESOURCE_PHB 4
318
319#define PSERIES_HP_ELOG_ACTION_ADD 1
320#define PSERIES_HP_ELOG_ACTION_REMOVE 2
321
322#define PSERIES_HP_ELOG_ID_DRC_NAME 1
323#define PSERIES_HP_ELOG_ID_DRC_INDEX 2
324#define PSERIES_HP_ELOG_ID_DRC_COUNT 3
325
299struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, 326struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
300 uint16_t section_id); 327 uint16_t section_id);
301 328
@@ -327,7 +354,7 @@ extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
327extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); 354extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
328extern int rtas_online_cpus_mask(cpumask_var_t cpus); 355extern int rtas_online_cpus_mask(cpumask_var_t cpus);
329extern int rtas_offline_cpus_mask(cpumask_var_t cpus); 356extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
330extern int rtas_ibm_suspend_me(u64 handle, int *vasi_return); 357extern int rtas_ibm_suspend_me(u64 handle);
331 358
332struct rtc_time; 359struct rtc_time;
333extern unsigned long rtas_get_boot_time(void); 360extern unsigned long rtas_get_boot_time(void);
@@ -343,8 +370,12 @@ extern int early_init_dt_scan_rtas(unsigned long node,
343extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal); 370extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
344 371
345#ifdef CONFIG_PPC_PSERIES 372#ifdef CONFIG_PPC_PSERIES
373extern time64_t last_rtas_event;
374extern int clobbering_unread_rtas_event(void);
346extern int pseries_devicetree_update(s32 scope); 375extern int pseries_devicetree_update(s32 scope);
347extern void post_mobility_fixup(void); 376extern void post_mobility_fixup(void);
377#else
378static inline int clobbering_unread_rtas_event(void) { return 0; }
348#endif 379#endif
349 380
350#ifdef CONFIG_PPC_RTAS_DAEMON 381#ifdef CONFIG_PPC_RTAS_DAEMON
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index fbdf18cf954c..e9d384cbd021 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -7,7 +7,6 @@
7extern void ppc_printk_progress(char *s, unsigned short hex); 7extern void ppc_printk_progress(char *s, unsigned short hex);
8 8
9extern unsigned int rtas_data; 9extern unsigned int rtas_data;
10extern int mem_init_done; /* set on boot once kmalloc can be called */
11extern unsigned long long memory_limit; 10extern unsigned long long memory_limit;
12extern unsigned long klimit; 11extern unsigned long klimit;
13extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); 12extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index d607df5081a7..825663c30945 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -42,7 +42,7 @@ struct smp_ops_t {
42#ifdef CONFIG_PPC_SMP_MUXED_IPI 42#ifdef CONFIG_PPC_SMP_MUXED_IPI
43 void (*cause_ipi)(int cpu, unsigned long data); 43 void (*cause_ipi)(int cpu, unsigned long data);
44#endif 44#endif
45 int (*probe)(void); 45 void (*probe)(void);
46 int (*kick_cpu)(int nr); 46 int (*kick_cpu)(int nr);
47 void (*setup_cpu)(int nr); 47 void (*setup_cpu)(int nr);
48 void (*bringup_done)(void); 48 void (*bringup_done)(void);
@@ -125,7 +125,6 @@ extern irqreturn_t smp_ipi_demux(void);
125 125
126void smp_init_pSeries(void); 126void smp_init_pSeries(void);
127void smp_init_cell(void); 127void smp_init_cell(void);
128void smp_init_celleb(void);
129void smp_setup_cpu_maps(void); 128void smp_setup_cpu_maps(void);
130 129
131extern int __cpu_disable(void); 130extern int __cpu_disable(void);
@@ -175,7 +174,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
175 174
176extern int smt_enabled_at_boot; 175extern int smt_enabled_at_boot;
177 176
178extern int smp_mpic_probe(void); 177extern void smp_mpic_probe(void);
179extern void smp_mpic_setup_cpu(int cpu); 178extern void smp_mpic_setup_cpu(int cpu);
180extern int smp_generic_kick_cpu(int nr); 179extern int smp_generic_kick_cpu(int nr);
181extern int smp_generic_cpu_bootable(unsigned int nr); 180extern int smp_generic_cpu_bootable(unsigned int nr);
diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h
index 96f59de61855..487e09077a3e 100644
--- a/arch/powerpc/include/asm/swab.h
+++ b/arch/powerpc/include/asm/swab.h
@@ -9,30 +9,4 @@
9 9
10#include <uapi/asm/swab.h> 10#include <uapi/asm/swab.h>
11 11
12static __inline__ __u16 ld_le16(const volatile __u16 *addr)
13{
14 __u16 val;
15
16 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
17 return val;
18}
19
20static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
21{
22 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
23}
24
25static __inline__ __u32 ld_le32(const volatile __u32 *addr)
26{
27 __u32 val;
28
29 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
30 return val;
31}
32
33static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
34{
35 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
36}
37
38#endif /* _ASM_POWERPC_SWAB_H */ 12#endif /* _ASM_POWERPC_SWAB_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 91062eef582f..f1863a138b4a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -367,3 +367,4 @@ SYSCALL_SPU(getrandom)
367SYSCALL_SPU(memfd_create) 367SYSCALL_SPU(memfd_create)
368SYSCALL_SPU(bpf) 368SYSCALL_SPU(bpf)
369COMPAT_SYS(execveat) 369COMPAT_SYS(execveat)
370PPC64ONLY(switch_endian)
diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h
index c44131e68e11..233ef5fe5fde 100644
--- a/arch/powerpc/include/asm/ucc_slow.h
+++ b/arch/powerpc/include/asm/ucc_slow.h
@@ -251,19 +251,6 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode);
251 */ 251 */
252void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode); 252void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode);
253 253
254/* ucc_slow_poll_transmitter_now
255 * Immediately forces a poll of the transmitter for data to be sent.
256 * Typically, the hardware performs a periodic poll for data that the
257 * transmit routine has set up to be transmitted. In cases where
258 * this polling cycle is not soon enough, this optional routine can
259 * be invoked to force a poll right away, instead. Proper use for
260 * each transmission for which this functionality is desired is to
261 * call the transmit routine and then this routine right after.
262 *
263 * uccs - (In) pointer to the slow UCC structure.
264 */
265void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs);
266
267/* ucc_slow_graceful_stop_tx 254/* ucc_slow_graceful_stop_tx
268 * Smoothly stops transmission on a specified slow UCC. 255 * Smoothly stops transmission on a specified slow UCC.
269 * 256 *
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 36b79c31eedd..f4f8b667d75b 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 363 15#define __NR_syscalls 364
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h
index e5f8dd366212..ab3acd2f2786 100644
--- a/arch/powerpc/include/asm/vga.h
+++ b/arch/powerpc/include/asm/vga.h
@@ -25,12 +25,12 @@
25 25
26static inline void scr_writew(u16 val, volatile u16 *addr) 26static inline void scr_writew(u16 val, volatile u16 *addr)
27{ 27{
28 st_le16(addr, val); 28 *addr = cpu_to_le16(val);
29} 29}
30 30
31static inline u16 scr_readw(volatile const u16 *addr) 31static inline u16 scr_readw(volatile const u16 *addr)
32{ 32{
33 return ld_le16(addr); 33 return le16_to_cpu(*addr);
34} 34}
35 35
36#define VT_BUF_HAVE_MEMCPYW 36#define VT_BUF_HAVE_MEMCPYW
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 6997f4a271df..0e25bdb190bb 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -146,7 +146,7 @@ extern void xics_update_irq_servers(void);
146extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join); 146extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join);
147extern void xics_mask_unknown_vec(unsigned int vec); 147extern void xics_mask_unknown_vec(unsigned int vec);
148extern irqreturn_t xics_ipi_dispatch(int cpu); 148extern irqreturn_t xics_ipi_dispatch(int cpu);
149extern int xics_smp_probe(void); 149extern void xics_smp_probe(void);
150extern void xics_register_ics(struct ics *ics); 150extern void xics_register_ics(struct ics *ics);
151extern void xics_teardown_cpu(void); 151extern void xics_teardown_cpu(void);
152extern void xics_kexec_teardown_cpu(int secondary); 152extern void xics_kexec_teardown_cpu(int secondary);
diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h
index 77d2ed35b111..8036b385417d 100644
--- a/arch/powerpc/include/uapi/asm/ptrace.h
+++ b/arch/powerpc/include/uapi/asm/ptrace.h
@@ -136,7 +136,7 @@ struct pt_regs {
136#endif /* __powerpc64__ */ 136#endif /* __powerpc64__ */
137 137
138/* 138/*
139 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. 139 * Get/set all the altivec registers v0..v31, vscr, vrsave, in one go.
140 * The transfer totals 34 quadword. Quadwords 0-31 contain the 140 * The transfer totals 34 quadword. Quadwords 0-31 contain the
141 * corresponding vector registers. Quadword 32 contains the vscr as the 141 * corresponding vector registers. Quadword 32 contains the vscr as the
142 * last word (offset 12) within that quadword. Quadword 33 contains the 142 * last word (offset 12) within that quadword. Quadword 33 contains the
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
index 5d836b7c1176..5047659815a5 100644
--- a/arch/powerpc/include/uapi/asm/tm.h
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -11,7 +11,7 @@
11#define TM_CAUSE_RESCHED 0xde 11#define TM_CAUSE_RESCHED 0xde
12#define TM_CAUSE_TLBI 0xdc 12#define TM_CAUSE_TLBI 0xdc
13#define TM_CAUSE_FAC_UNAV 0xda 13#define TM_CAUSE_FAC_UNAV 0xda
14#define TM_CAUSE_SYSCALL 0xd8 /* future use */ 14#define TM_CAUSE_SYSCALL 0xd8
15#define TM_CAUSE_MISC 0xd6 /* future use */ 15#define TM_CAUSE_MISC 0xd6 /* future use */
16#define TM_CAUSE_SIGNAL 0xd4 16#define TM_CAUSE_SIGNAL 0xd4
17#define TM_CAUSE_ALIGNMENT 0xd2 17#define TM_CAUSE_ALIGNMENT 0xd2
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index ef5b5b1f3123..e4aa173dae62 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -385,5 +385,6 @@
385#define __NR_memfd_create 360 385#define __NR_memfd_create 360
386#define __NR_bpf 361 386#define __NR_bpf 361
387#define __NR_execveat 362 387#define __NR_execveat 362
388#define __NR_switch_endian 363
388 389
389#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 390#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 502cf69b6c89..c1ebbdaac28f 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -33,7 +33,8 @@ obj-y := cputable.o ptrace.o syscalls.o \
33 signal.o sysfs.o cacheinfo.o time.o \ 33 signal.o sysfs.o cacheinfo.o time.o \
34 prom.o traps.o setup-common.o \ 34 prom.o traps.o setup-common.o \
35 udbg.o misc.o io.o dma.o \ 35 udbg.o misc.o io.o dma.o \
36 misc_$(CONFIG_WORD_SIZE).o vdso32/ 36 misc_$(CONFIG_WORD_SIZE).o vdso32/ \
37 of_platform.o prom_parse.o
37obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 38obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
38 signal_64.o ptrace32.o \ 39 signal_64.o ptrace32.o \
39 paca.o nvram_64.o firmware.o 40 paca.o nvram_64.o firmware.o
@@ -47,7 +48,6 @@ obj-$(CONFIG_PPC64) += vdso64/
47obj-$(CONFIG_ALTIVEC) += vecemu.o 48obj-$(CONFIG_ALTIVEC) += vecemu.o
48obj-$(CONFIG_PPC_970_NAP) += idle_power4.o 49obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
49obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o 50obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o
50obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o
51procfs-y := proc_powerpc.o 51procfs-y := proc_powerpc.o
52obj-$(CONFIG_PROC_FS) += $(procfs-y) 52obj-$(CONFIG_PROC_FS) += $(procfs-y)
53rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o 53rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index ae77b7e59889..c641983bbdd6 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -61,12 +61,22 @@ struct cache_type_info {
61}; 61};
62 62
63/* These are used to index the cache_type_info array. */ 63/* These are used to index the cache_type_info array. */
64#define CACHE_TYPE_UNIFIED 0 64#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
65#define CACHE_TYPE_INSTRUCTION 1 65#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
66#define CACHE_TYPE_DATA 2 66#define CACHE_TYPE_INSTRUCTION 2
67#define CACHE_TYPE_DATA 3
67 68
68static const struct cache_type_info cache_type_info[] = { 69static const struct cache_type_info cache_type_info[] = {
69 { 70 {
71 /* Embedded systems that use cache-size, cache-block-size,
72 * etc. for the Unified (typically L2) cache. */
73 .name = "Unified",
74 .size_prop = "cache-size",
75 .line_size_props = { "cache-line-size",
76 "cache-block-size", },
77 .nr_sets_prop = "cache-sets",
78 },
79 {
70 /* PowerPC Processor binding says the [di]-cache-* 80 /* PowerPC Processor binding says the [di]-cache-*
71 * must be equal on unified caches, so just use 81 * must be equal on unified caches, so just use
72 * d-cache properties. */ 82 * d-cache properties. */
@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
293{ 303{
294 struct cache *iter; 304 struct cache *iter;
295 305
296 if (cache->type == CACHE_TYPE_UNIFIED) 306 if (cache->type == CACHE_TYPE_UNIFIED ||
307 cache->type == CACHE_TYPE_UNIFIED_D)
297 return cache; 308 return cache;
298 309
299 list_for_each_entry(iter, &cache_list, list) 310 list_for_each_entry(iter, &cache_list, list)
@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
324 return of_get_property(np, "cache-unified", NULL); 335 return of_get_property(np, "cache-unified", NULL);
325} 336}
326 337
327static struct cache *cache_do_one_devnode_unified(struct device_node *node, 338/*
328 int level) 339 * Unified caches can have two different sets of tags. Most embedded
340 * use cache-size, etc. for the unified cache size, but open firmware systems
341 * use d-cache-size, etc. Check on initialization for which type we have, and
342 * return the appropriate structure type. Assume it's embedded if it isn't
343 * open firmware. If it's yet a 3rd type, then there will be missing entries
344 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
345 * to be extended further.
346 */
347static int cache_is_unified_d(const struct device_node *np)
329{ 348{
330 struct cache *cache; 349 return of_get_property(np,
350 cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
351 CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
352}
331 353
354/*
355 */
356static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
357{
332 pr_debug("creating L%d ucache for %s\n", level, node->full_name); 358 pr_debug("creating L%d ucache for %s\n", level, node->full_name);
333 359
334 cache = new_cache(CACHE_TYPE_UNIFIED, level, node); 360 return new_cache(cache_is_unified_d(node), level, node);
335
336 return cache;
337} 361}
338 362
339static struct cache *cache_do_one_devnode_split(struct device_node *node, 363static struct cache *cache_do_one_devnode_split(struct device_node *node,
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 46733535cc0b..9c9b7411b28b 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -137,15 +137,11 @@ __init_HFSCR:
137/* 137/*
138 * Clear the TLB using the specified IS form of tlbiel instruction 138 * Clear the TLB using the specified IS form of tlbiel instruction
139 * (invalidate by congruence class). P7 has 128 CCs., P8 has 512. 139 * (invalidate by congruence class). P7 has 128 CCs., P8 has 512.
140 *
141 * r3 = IS field
142 */ 140 */
143__init_tlb_power7: 141__init_tlb_power7:
144 li r3,0xc00 /* IS field = 0b11 */
145_GLOBAL(__flush_tlb_power7)
146 li r6,128 142 li r6,128
147 mtctr r6 143 mtctr r6
148 mr r7,r3 /* IS field */ 144 li r7,0xc00 /* IS field = 0b11 */
149 ptesync 145 ptesync
1502: tlbiel r7 1462: tlbiel r7
151 addi r7,r7,0x1000 147 addi r7,r7,0x1000
@@ -154,11 +150,9 @@ _GLOBAL(__flush_tlb_power7)
1541: blr 1501: blr
155 151
156__init_tlb_power8: 152__init_tlb_power8:
157 li r3,0xc00 /* IS field = 0b11 */
158_GLOBAL(__flush_tlb_power8)
159 li r6,512 153 li r6,512
160 mtctr r6 154 mtctr r6
161 mr r7,r3 /* IS field */ 155 li r7,0xc00 /* IS field = 0b11 */
162 ptesync 156 ptesync
1632: tlbiel r7 1572: tlbiel r7
164 addi r7,r7,0x1000 158 addi r7,r7,0x1000
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f83046878336..60262fdf35ba 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -71,8 +71,8 @@ extern void __restore_cpu_power7(void);
71extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); 71extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
72extern void __restore_cpu_power8(void); 72extern void __restore_cpu_power8(void);
73extern void __restore_cpu_a2(void); 73extern void __restore_cpu_a2(void);
74extern void __flush_tlb_power7(unsigned long inval_selector); 74extern void __flush_tlb_power7(unsigned int action);
75extern void __flush_tlb_power8(unsigned long inval_selector); 75extern void __flush_tlb_power8(unsigned int action);
76extern long __machine_check_early_realmode_p7(struct pt_regs *regs); 76extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
77extern long __machine_check_early_realmode_p8(struct pt_regs *regs); 77extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
78#endif /* CONFIG_PPC64 */ 78#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 735979764cd4..6e8d764ce47b 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -116,16 +116,13 @@ void __init swiotlb_detect_4g(void)
116 } 116 }
117} 117}
118 118
119static int __init swiotlb_late_init(void) 119static int __init check_swiotlb_enabled(void)
120{ 120{
121 if (ppc_swiotlb_enable) { 121 if (ppc_swiotlb_enable)
122 swiotlb_print_info(); 122 swiotlb_print_info();
123 set_pci_dma_ops(&swiotlb_dma_ops); 123 else
124 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
125 } else {
126 swiotlb_free(); 124 swiotlb_free();
127 }
128 125
129 return 0; 126 return 0;
130} 127}
131subsys_initcall(swiotlb_late_init); 128subsys_initcall(check_swiotlb_enabled);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 3b2252e7731b..a4c62eb0ee48 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -164,30 +164,34 @@ __setup("eeh=", eeh_setup);
164 */ 164 */
165static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) 165static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
166{ 166{
167 struct device_node *dn = eeh_dev_to_of_node(edev); 167 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
168 u32 cfg; 168 u32 cfg;
169 int cap, i; 169 int cap, i;
170 int n = 0, l = 0; 170 int n = 0, l = 0;
171 char buffer[128]; 171 char buffer[128];
172 172
173 n += scnprintf(buf+n, len-n, "%s\n", dn->full_name); 173 n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n",
174 pr_warn("EEH: of node=%s\n", dn->full_name); 174 edev->phb->global_number, pdn->busno,
175 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
176 pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n",
177 edev->phb->global_number, pdn->busno,
178 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
175 179
176 eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg); 180 eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
177 n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); 181 n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
178 pr_warn("EEH: PCI device/vendor: %08x\n", cfg); 182 pr_warn("EEH: PCI device/vendor: %08x\n", cfg);
179 183
180 eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg); 184 eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cfg);
181 n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); 185 n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
182 pr_warn("EEH: PCI cmd/status register: %08x\n", cfg); 186 pr_warn("EEH: PCI cmd/status register: %08x\n", cfg);
183 187
184 /* Gather bridge-specific registers */ 188 /* Gather bridge-specific registers */
185 if (edev->mode & EEH_DEV_BRIDGE) { 189 if (edev->mode & EEH_DEV_BRIDGE) {
186 eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg); 190 eeh_ops->read_config(pdn, PCI_SEC_STATUS, 2, &cfg);
187 n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); 191 n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
188 pr_warn("EEH: Bridge secondary status: %04x\n", cfg); 192 pr_warn("EEH: Bridge secondary status: %04x\n", cfg);
189 193
190 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg); 194 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg);
191 n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); 195 n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
192 pr_warn("EEH: Bridge control: %04x\n", cfg); 196 pr_warn("EEH: Bridge control: %04x\n", cfg);
193 } 197 }
@@ -195,11 +199,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
195 /* Dump out the PCI-X command and status regs */ 199 /* Dump out the PCI-X command and status regs */
196 cap = edev->pcix_cap; 200 cap = edev->pcix_cap;
197 if (cap) { 201 if (cap) {
198 eeh_ops->read_config(dn, cap, 4, &cfg); 202 eeh_ops->read_config(pdn, cap, 4, &cfg);
199 n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); 203 n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
200 pr_warn("EEH: PCI-X cmd: %08x\n", cfg); 204 pr_warn("EEH: PCI-X cmd: %08x\n", cfg);
201 205
202 eeh_ops->read_config(dn, cap+4, 4, &cfg); 206 eeh_ops->read_config(pdn, cap+4, 4, &cfg);
203 n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); 207 n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
204 pr_warn("EEH: PCI-X status: %08x\n", cfg); 208 pr_warn("EEH: PCI-X status: %08x\n", cfg);
205 } 209 }
@@ -211,7 +215,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
211 pr_warn("EEH: PCI-E capabilities and status follow:\n"); 215 pr_warn("EEH: PCI-E capabilities and status follow:\n");
212 216
213 for (i=0; i<=8; i++) { 217 for (i=0; i<=8; i++) {
214 eeh_ops->read_config(dn, cap+4*i, 4, &cfg); 218 eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
215 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); 219 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
216 220
217 if ((i % 4) == 0) { 221 if ((i % 4) == 0) {
@@ -238,7 +242,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
238 pr_warn("EEH: PCI-E AER capability register set follows:\n"); 242 pr_warn("EEH: PCI-E AER capability register set follows:\n");
239 243
240 for (i=0; i<=13; i++) { 244 for (i=0; i<=13; i++) {
241 eeh_ops->read_config(dn, cap+4*i, 4, &cfg); 245 eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
242 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); 246 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
243 247
244 if ((i % 4) == 0) { 248 if ((i % 4) == 0) {
@@ -414,11 +418,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
414 int ret; 418 int ret;
415 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); 419 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
416 unsigned long flags; 420 unsigned long flags;
417 struct device_node *dn; 421 struct pci_dn *pdn;
418 struct pci_dev *dev; 422 struct pci_dev *dev;
419 struct eeh_pe *pe, *parent_pe, *phb_pe; 423 struct eeh_pe *pe, *parent_pe, *phb_pe;
420 int rc = 0; 424 int rc = 0;
421 const char *location; 425 const char *location = NULL;
422 426
423 eeh_stats.total_mmio_ffs++; 427 eeh_stats.total_mmio_ffs++;
424 428
@@ -429,15 +433,14 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
429 eeh_stats.no_dn++; 433 eeh_stats.no_dn++;
430 return 0; 434 return 0;
431 } 435 }
432 dn = eeh_dev_to_of_node(edev);
433 dev = eeh_dev_to_pci_dev(edev); 436 dev = eeh_dev_to_pci_dev(edev);
434 pe = eeh_dev_to_pe(edev); 437 pe = eeh_dev_to_pe(edev);
435 438
436 /* Access to IO BARs might get this far and still not want checking. */ 439 /* Access to IO BARs might get this far and still not want checking. */
437 if (!pe) { 440 if (!pe) {
438 eeh_stats.ignored_check++; 441 eeh_stats.ignored_check++;
439 pr_debug("EEH: Ignored check for %s %s\n", 442 pr_debug("EEH: Ignored check for %s\n",
440 eeh_pci_name(dev), dn->full_name); 443 eeh_pci_name(dev));
441 return 0; 444 return 0;
442 } 445 }
443 446
@@ -473,10 +476,13 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
473 if (pe->state & EEH_PE_ISOLATED) { 476 if (pe->state & EEH_PE_ISOLATED) {
474 pe->check_count++; 477 pe->check_count++;
475 if (pe->check_count % EEH_MAX_FAILS == 0) { 478 if (pe->check_count % EEH_MAX_FAILS == 0) {
476 location = of_get_property(dn, "ibm,loc-code", NULL); 479 pdn = eeh_dev_to_pdn(edev);
480 if (pdn->node)
481 location = of_get_property(pdn->node, "ibm,loc-code", NULL);
477 printk(KERN_ERR "EEH: %d reads ignored for recovering device at " 482 printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
478 "location=%s driver=%s pci addr=%s\n", 483 "location=%s driver=%s pci addr=%s\n",
479 pe->check_count, location, 484 pe->check_count,
485 location ? location : "unknown",
480 eeh_driver_name(dev), eeh_pci_name(dev)); 486 eeh_driver_name(dev), eeh_pci_name(dev));
481 printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", 487 printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
482 eeh_driver_name(dev)); 488 eeh_driver_name(dev));
@@ -667,6 +673,55 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
667 return rc; 673 return rc;
668} 674}
669 675
676static void *eeh_disable_and_save_dev_state(void *data, void *userdata)
677{
678 struct eeh_dev *edev = data;
679 struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
680 struct pci_dev *dev = userdata;
681
682 /*
683 * The caller should have disabled and saved the
684 * state for the specified device
685 */
686 if (!pdev || pdev == dev)
687 return NULL;
688
689 /* Ensure we have D0 power state */
690 pci_set_power_state(pdev, PCI_D0);
691
692 /* Save device state */
693 pci_save_state(pdev);
694
695 /*
696 * Disable device to avoid any DMA traffic and
697 * interrupt from the device
698 */
699 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
700
701 return NULL;
702}
703
704static void *eeh_restore_dev_state(void *data, void *userdata)
705{
706 struct eeh_dev *edev = data;
707 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
708 struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
709 struct pci_dev *dev = userdata;
710
711 if (!pdev)
712 return NULL;
713
714 /* Apply customization from firmware */
715 if (pdn && eeh_ops->restore_config)
716 eeh_ops->restore_config(pdn);
717
718 /* The caller should restore state for the specified device */
719 if (pdev != dev)
720 pci_save_state(pdev);
721
722 return NULL;
723}
724
670/** 725/**
671 * pcibios_set_pcie_slot_reset - Set PCI-E reset state 726 * pcibios_set_pcie_slot_reset - Set PCI-E reset state
672 * @dev: pci device struct 727 * @dev: pci device struct
@@ -689,13 +744,19 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
689 switch (state) { 744 switch (state) {
690 case pcie_deassert_reset: 745 case pcie_deassert_reset:
691 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); 746 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
747 eeh_unfreeze_pe(pe, false);
692 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); 748 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
749 eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
693 break; 750 break;
694 case pcie_hot_reset: 751 case pcie_hot_reset:
752 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
753 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
695 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 754 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
696 eeh_ops->reset(pe, EEH_RESET_HOT); 755 eeh_ops->reset(pe, EEH_RESET_HOT);
697 break; 756 break;
698 case pcie_warm_reset: 757 case pcie_warm_reset:
758 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
759 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
699 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 760 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
700 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); 761 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
701 break; 762 break;
@@ -815,15 +876,15 @@ out:
815 */ 876 */
816void eeh_save_bars(struct eeh_dev *edev) 877void eeh_save_bars(struct eeh_dev *edev)
817{ 878{
879 struct pci_dn *pdn;
818 int i; 880 int i;
819 struct device_node *dn;
820 881
821 if (!edev) 882 pdn = eeh_dev_to_pdn(edev);
883 if (!pdn)
822 return; 884 return;
823 dn = eeh_dev_to_of_node(edev);
824 885
825 for (i = 0; i < 16; i++) 886 for (i = 0; i < 16; i++)
826 eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]); 887 eeh_ops->read_config(pdn, i * 4, 4, &edev->config_space[i]);
827 888
828 /* 889 /*
829 * For PCI bridges including root port, we need enable bus 890 * For PCI bridges including root port, we need enable bus
@@ -914,7 +975,7 @@ static struct notifier_block eeh_reboot_nb = {
914int eeh_init(void) 975int eeh_init(void)
915{ 976{
916 struct pci_controller *hose, *tmp; 977 struct pci_controller *hose, *tmp;
917 struct device_node *phb; 978 struct pci_dn *pdn;
918 static int cnt = 0; 979 static int cnt = 0;
919 int ret = 0; 980 int ret = 0;
920 981
@@ -949,20 +1010,9 @@ int eeh_init(void)
949 return ret; 1010 return ret;
950 1011
951 /* Enable EEH for all adapters */ 1012 /* Enable EEH for all adapters */
952 if (eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) { 1013 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
953 list_for_each_entry_safe(hose, tmp, 1014 pdn = hose->pci_data;
954 &hose_list, list_node) { 1015 traverse_pci_dn(pdn, eeh_ops->probe, NULL);
955 phb = hose->dn;
956 traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
957 }
958 } else if (eeh_has_flag(EEH_PROBE_MODE_DEV)) {
959 list_for_each_entry_safe(hose, tmp,
960 &hose_list, list_node)
961 pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL);
962 } else {
963 pr_warn("%s: Invalid probe mode %x",
964 __func__, eeh_subsystem_flags);
965 return -EINVAL;
966 } 1016 }
967 1017
968 /* 1018 /*
@@ -987,8 +1037,8 @@ int eeh_init(void)
987core_initcall_sync(eeh_init); 1037core_initcall_sync(eeh_init);
988 1038
989/** 1039/**
990 * eeh_add_device_early - Enable EEH for the indicated device_node 1040 * eeh_add_device_early - Enable EEH for the indicated device node
991 * @dn: device node for which to set up EEH 1041 * @pdn: PCI device node for which to set up EEH
992 * 1042 *
993 * This routine must be used to perform EEH initialization for PCI 1043 * This routine must be used to perform EEH initialization for PCI
994 * devices that were added after system boot (e.g. hotplug, dlpar). 1044 * devices that were added after system boot (e.g. hotplug, dlpar).
@@ -998,44 +1048,41 @@ core_initcall_sync(eeh_init);
998 * on the CEC architecture, type of the device, on earlier boot 1048 * on the CEC architecture, type of the device, on earlier boot
999 * command-line arguments & etc. 1049 * command-line arguments & etc.
1000 */ 1050 */
1001void eeh_add_device_early(struct device_node *dn) 1051void eeh_add_device_early(struct pci_dn *pdn)
1002{ 1052{
1003 struct pci_controller *phb; 1053 struct pci_controller *phb;
1054 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
1004 1055
1005 /* 1056 if (!edev || !eeh_enabled())
1006 * If we're doing EEH probe based on PCI device, we
1007 * would delay the probe until late stage because
1008 * the PCI device isn't available this moment.
1009 */
1010 if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
1011 return; 1057 return;
1012 1058
1013 if (!of_node_to_eeh_dev(dn))
1014 return;
1015 phb = of_node_to_eeh_dev(dn)->phb;
1016
1017 /* USB Bus children of PCI devices will not have BUID's */ 1059 /* USB Bus children of PCI devices will not have BUID's */
1018 if (NULL == phb || 0 == phb->buid) 1060 phb = edev->phb;
1061 if (NULL == phb ||
1062 (eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid))
1019 return; 1063 return;
1020 1064
1021 eeh_ops->of_probe(dn, NULL); 1065 eeh_ops->probe(pdn, NULL);
1022} 1066}
1023 1067
1024/** 1068/**
1025 * eeh_add_device_tree_early - Enable EEH for the indicated device 1069 * eeh_add_device_tree_early - Enable EEH for the indicated device
1026 * @dn: device node 1070 * @pdn: PCI device node
1027 * 1071 *
1028 * This routine must be used to perform EEH initialization for the 1072 * This routine must be used to perform EEH initialization for the
1029 * indicated PCI device that was added after system boot (e.g. 1073 * indicated PCI device that was added after system boot (e.g.
1030 * hotplug, dlpar). 1074 * hotplug, dlpar).
1031 */ 1075 */
1032void eeh_add_device_tree_early(struct device_node *dn) 1076void eeh_add_device_tree_early(struct pci_dn *pdn)
1033{ 1077{
1034 struct device_node *sib; 1078 struct pci_dn *n;
1035 1079
1036 for_each_child_of_node(dn, sib) 1080 if (!pdn)
1037 eeh_add_device_tree_early(sib); 1081 return;
1038 eeh_add_device_early(dn); 1082
1083 list_for_each_entry(n, &pdn->child_list, list)
1084 eeh_add_device_tree_early(n);
1085 eeh_add_device_early(pdn);
1039} 1086}
1040EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); 1087EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
1041 1088
@@ -1048,7 +1095,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
1048 */ 1095 */
1049void eeh_add_device_late(struct pci_dev *dev) 1096void eeh_add_device_late(struct pci_dev *dev)
1050{ 1097{
1051 struct device_node *dn; 1098 struct pci_dn *pdn;
1052 struct eeh_dev *edev; 1099 struct eeh_dev *edev;
1053 1100
1054 if (!dev || !eeh_enabled()) 1101 if (!dev || !eeh_enabled())
@@ -1056,8 +1103,8 @@ void eeh_add_device_late(struct pci_dev *dev)
1056 1103
1057 pr_debug("EEH: Adding device %s\n", pci_name(dev)); 1104 pr_debug("EEH: Adding device %s\n", pci_name(dev));
1058 1105
1059 dn = pci_device_to_OF_node(dev); 1106 pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
1060 edev = of_node_to_eeh_dev(dn); 1107 edev = pdn_to_eeh_dev(pdn);
1061 if (edev->pdev == dev) { 1108 if (edev->pdev == dev) {
1062 pr_debug("EEH: Already referenced !\n"); 1109 pr_debug("EEH: Already referenced !\n");
1063 return; 1110 return;
@@ -1089,13 +1136,6 @@ void eeh_add_device_late(struct pci_dev *dev)
1089 edev->pdev = dev; 1136 edev->pdev = dev;
1090 dev->dev.archdata.edev = edev; 1137 dev->dev.archdata.edev = edev;
1091 1138
1092 /*
1093 * We have to do the EEH probe here because the PCI device
1094 * hasn't been created yet in the early stage.
1095 */
1096 if (eeh_has_flag(EEH_PROBE_MODE_DEV))
1097 eeh_ops->dev_probe(dev, NULL);
1098
1099 eeh_addr_cache_insert_dev(dev); 1139 eeh_addr_cache_insert_dev(dev);
1100} 1140}
1101 1141
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index 07d8a2423a61..eeabeabea49c 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -171,30 +171,27 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
171 171
172static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) 172static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
173{ 173{
174 struct device_node *dn; 174 struct pci_dn *pdn;
175 struct eeh_dev *edev; 175 struct eeh_dev *edev;
176 int i; 176 int i;
177 177
178 dn = pci_device_to_OF_node(dev); 178 pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
179 if (!dn) { 179 if (!pdn) {
180 pr_warn("PCI: no pci dn found for dev=%s\n", 180 pr_warn("PCI: no pci dn found for dev=%s\n",
181 pci_name(dev)); 181 pci_name(dev));
182 return; 182 return;
183 } 183 }
184 184
185 edev = of_node_to_eeh_dev(dn); 185 edev = pdn_to_eeh_dev(pdn);
186 if (!edev) { 186 if (!edev) {
187 pr_warn("PCI: no EEH dev found for dn=%s\n", 187 pr_warn("PCI: no EEH dev found for %s\n",
188 dn->full_name); 188 pci_name(dev));
189 return; 189 return;
190 } 190 }
191 191
192 /* Skip any devices for which EEH is not enabled. */ 192 /* Skip any devices for which EEH is not enabled. */
193 if (!edev->pe) { 193 if (!edev->pe) {
194#ifdef DEBUG 194 dev_dbg(&dev->dev, "EEH: Skip building address cache\n");
195 pr_info("PCI: skip building address cache for=%s - %s\n",
196 pci_name(dev), dn->full_name);
197#endif
198 return; 195 return;
199 } 196 }
200 197
@@ -282,18 +279,18 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
282 */ 279 */
283void eeh_addr_cache_build(void) 280void eeh_addr_cache_build(void)
284{ 281{
285 struct device_node *dn; 282 struct pci_dn *pdn;
286 struct eeh_dev *edev; 283 struct eeh_dev *edev;
287 struct pci_dev *dev = NULL; 284 struct pci_dev *dev = NULL;
288 285
289 spin_lock_init(&pci_io_addr_cache_root.piar_lock); 286 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
290 287
291 for_each_pci_dev(dev) { 288 for_each_pci_dev(dev) {
292 dn = pci_device_to_OF_node(dev); 289 pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
293 if (!dn) 290 if (!pdn)
294 continue; 291 continue;
295 292
296 edev = of_node_to_eeh_dev(dn); 293 edev = pdn_to_eeh_dev(pdn);
297 if (!edev) 294 if (!edev)
298 continue; 295 continue;
299 296
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
index e5274ee9a75f..aabba94ff9cb 100644
--- a/arch/powerpc/kernel/eeh_dev.c
+++ b/arch/powerpc/kernel/eeh_dev.c
@@ -43,13 +43,13 @@
43 43
44/** 44/**
45 * eeh_dev_init - Create EEH device according to OF node 45 * eeh_dev_init - Create EEH device according to OF node
46 * @dn: device node 46 * @pdn: PCI device node
47 * @data: PHB 47 * @data: PHB
48 * 48 *
49 * It will create EEH device according to the given OF node. The function 49 * It will create EEH device according to the given OF node. The function
50 * might be called by PCI emunation, DR, PHB hotplug. 50 * might be called by PCI emunation, DR, PHB hotplug.
51 */ 51 */
52void *eeh_dev_init(struct device_node *dn, void *data) 52void *eeh_dev_init(struct pci_dn *pdn, void *data)
53{ 53{
54 struct pci_controller *phb = data; 54 struct pci_controller *phb = data;
55 struct eeh_dev *edev; 55 struct eeh_dev *edev;
@@ -63,8 +63,8 @@ void *eeh_dev_init(struct device_node *dn, void *data)
63 } 63 }
64 64
65 /* Associate EEH device with OF node */ 65 /* Associate EEH device with OF node */
66 PCI_DN(dn)->edev = edev; 66 pdn->edev = edev;
67 edev->dn = dn; 67 edev->pdn = pdn;
68 edev->phb = phb; 68 edev->phb = phb;
69 INIT_LIST_HEAD(&edev->list); 69 INIT_LIST_HEAD(&edev->list);
70 70
@@ -80,16 +80,16 @@ void *eeh_dev_init(struct device_node *dn, void *data)
80 */ 80 */
81void eeh_dev_phb_init_dynamic(struct pci_controller *phb) 81void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
82{ 82{
83 struct device_node *dn = phb->dn; 83 struct pci_dn *root = phb->pci_data;
84 84
85 /* EEH PE for PHB */ 85 /* EEH PE for PHB */
86 eeh_phb_pe_create(phb); 86 eeh_phb_pe_create(phb);
87 87
88 /* EEH device for PHB */ 88 /* EEH device for PHB */
89 eeh_dev_init(dn, phb); 89 eeh_dev_init(root, phb);
90 90
91 /* EEH devices for children OF nodes */ 91 /* EEH devices for children OF nodes */
92 traverse_pci_devices(dn, eeh_dev_init, phb); 92 traverse_pci_dn(root, eeh_dev_init, phb);
93} 93}
94 94
95/** 95/**
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index d099540c0f56..24768ff3cb73 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -83,28 +83,6 @@ static inline void eeh_pcid_put(struct pci_dev *pdev)
83 module_put(pdev->driver->driver.owner); 83 module_put(pdev->driver->driver.owner);
84} 84}
85 85
86#if 0
87static void print_device_node_tree(struct pci_dn *pdn, int dent)
88{
89 int i;
90 struct device_node *pc;
91
92 if (!pdn)
93 return;
94 for (i = 0; i < dent; i++)
95 printk(" ");
96 printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
97 pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
98 pdn->eeh_pe_config_addr, pdn->node->full_name);
99 dent += 3;
100 pc = pdn->node->child;
101 while (pc) {
102 print_device_node_tree(PCI_DN(pc), dent);
103 pc = pc->sibling;
104 }
105}
106#endif
107
108/** 86/**
109 * eeh_disable_irq - Disable interrupt for the recovering device 87 * eeh_disable_irq - Disable interrupt for the recovering device
110 * @dev: PCI device 88 * @dev: PCI device
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 1e4946c36f9e..35f0b62259bb 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -291,27 +291,25 @@ struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
291 */ 291 */
292static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) 292static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev)
293{ 293{
294 struct device_node *dn;
295 struct eeh_dev *parent; 294 struct eeh_dev *parent;
295 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
296 296
297 /* 297 /*
298 * It might have the case for the indirect parent 298 * It might have the case for the indirect parent
299 * EEH device already having associated PE, but 299 * EEH device already having associated PE, but
300 * the direct parent EEH device doesn't have yet. 300 * the direct parent EEH device doesn't have yet.
301 */ 301 */
302 dn = edev->dn->parent; 302 pdn = pdn ? pdn->parent : NULL;
303 while (dn) { 303 while (pdn) {
304 /* We're poking out of PCI territory */ 304 /* We're poking out of PCI territory */
305 if (!PCI_DN(dn)) return NULL; 305 parent = pdn_to_eeh_dev(pdn);
306 306 if (!parent)
307 parent = of_node_to_eeh_dev(dn); 307 return NULL;
308 /* We're poking out of PCI territory */
309 if (!parent) return NULL;
310 308
311 if (parent->pe) 309 if (parent->pe)
312 return parent->pe; 310 return parent->pe;
313 311
314 dn = dn->parent; 312 pdn = pdn->parent;
315 } 313 }
316 314
317 return NULL; 315 return NULL;
@@ -330,6 +328,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
330{ 328{
331 struct eeh_pe *pe, *parent; 329 struct eeh_pe *pe, *parent;
332 330
331 /* Check if the PE number is valid */
332 if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) {
333 pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%d\n",
334 __func__, edev->config_addr, edev->phb->global_number);
335 return -EINVAL;
336 }
337
333 /* 338 /*
334 * Search the PE has been existing or not according 339 * Search the PE has been existing or not according
335 * to the PE address. If that has been existing, the 340 * to the PE address. If that has been existing, the
@@ -338,21 +343,18 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
338 */ 343 */
339 pe = eeh_pe_get(edev); 344 pe = eeh_pe_get(edev);
340 if (pe && !(pe->type & EEH_PE_INVALID)) { 345 if (pe && !(pe->type & EEH_PE_INVALID)) {
341 if (!edev->pe_config_addr) {
342 pr_err("%s: PE with addr 0x%x already exists\n",
343 __func__, edev->config_addr);
344 return -EEXIST;
345 }
346
347 /* Mark the PE as type of PCI bus */ 346 /* Mark the PE as type of PCI bus */
348 pe->type = EEH_PE_BUS; 347 pe->type = EEH_PE_BUS;
349 edev->pe = pe; 348 edev->pe = pe;
350 349
351 /* Put the edev to PE */ 350 /* Put the edev to PE */
352 list_add_tail(&edev->list, &pe->edevs); 351 list_add_tail(&edev->list, &pe->edevs);
353 pr_debug("EEH: Add %s to Bus PE#%x\n", 352 pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n",
354 edev->dn->full_name, pe->addr); 353 edev->phb->global_number,
355 354 edev->config_addr >> 8,
355 PCI_SLOT(edev->config_addr & 0xFF),
356 PCI_FUNC(edev->config_addr & 0xFF),
357 pe->addr);
356 return 0; 358 return 0;
357 } else if (pe && (pe->type & EEH_PE_INVALID)) { 359 } else if (pe && (pe->type & EEH_PE_INVALID)) {
358 list_add_tail(&edev->list, &pe->edevs); 360 list_add_tail(&edev->list, &pe->edevs);
@@ -368,9 +370,14 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
368 parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); 370 parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
369 parent = parent->parent; 371 parent = parent->parent;
370 } 372 }
371 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
372 edev->dn->full_name, pe->addr, pe->parent->addr);
373 373
374 pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device "
375 "PE#%x, Parent PE#%x\n",
376 edev->phb->global_number,
377 edev->config_addr >> 8,
378 PCI_SLOT(edev->config_addr & 0xFF),
379 PCI_FUNC(edev->config_addr & 0xFF),
380 pe->addr, pe->parent->addr);
374 return 0; 381 return 0;
375 } 382 }
376 383
@@ -409,8 +416,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
409 list_add_tail(&pe->child, &parent->child_list); 416 list_add_tail(&pe->child, &parent->child_list);
410 list_add_tail(&edev->list, &pe->edevs); 417 list_add_tail(&edev->list, &pe->edevs);
411 edev->pe = pe; 418 edev->pe = pe;
412 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", 419 pr_debug("EEH: Add %04x:%02x:%02x.%01x to "
413 edev->dn->full_name, pe->addr, pe->parent->addr); 420 "Device PE#%x, Parent PE#%x\n",
421 edev->phb->global_number,
422 edev->config_addr >> 8,
423 PCI_SLOT(edev->config_addr & 0xFF),
424 PCI_FUNC(edev->config_addr & 0xFF),
425 pe->addr, pe->parent->addr);
414 426
415 return 0; 427 return 0;
416} 428}
@@ -430,8 +442,11 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
430 int cnt; 442 int cnt;
431 443
432 if (!edev->pe) { 444 if (!edev->pe) {
433 pr_debug("%s: No PE found for EEH device %s\n", 445 pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n",
434 __func__, edev->dn->full_name); 446 __func__, edev->phb->global_number,
447 edev->config_addr >> 8,
448 PCI_SLOT(edev->config_addr & 0xFF),
449 PCI_FUNC(edev->config_addr & 0xFF));
435 return -EEXIST; 450 return -EEXIST;
436 } 451 }
437 452
@@ -653,9 +668,9 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state)
653 * blocked on normal path during the stage. So we need utilize 668 * blocked on normal path during the stage. So we need utilize
654 * eeh operations, which is always permitted. 669 * eeh operations, which is always permitted.
655 */ 670 */
656static void eeh_bridge_check_link(struct eeh_dev *edev, 671static void eeh_bridge_check_link(struct eeh_dev *edev)
657 struct device_node *dn)
658{ 672{
673 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
659 int cap; 674 int cap;
660 uint32_t val; 675 uint32_t val;
661 int timeout = 0; 676 int timeout = 0;
@@ -675,32 +690,32 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
675 690
676 /* Check slot status */ 691 /* Check slot status */
677 cap = edev->pcie_cap; 692 cap = edev->pcie_cap;
678 eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val); 693 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val);
679 if (!(val & PCI_EXP_SLTSTA_PDS)) { 694 if (!(val & PCI_EXP_SLTSTA_PDS)) {
680 pr_debug(" No card in the slot (0x%04x) !\n", val); 695 pr_debug(" No card in the slot (0x%04x) !\n", val);
681 return; 696 return;
682 } 697 }
683 698
684 /* Check power status if we have the capability */ 699 /* Check power status if we have the capability */
685 eeh_ops->read_config(dn, cap + PCI_EXP_SLTCAP, 2, &val); 700 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val);
686 if (val & PCI_EXP_SLTCAP_PCP) { 701 if (val & PCI_EXP_SLTCAP_PCP) {
687 eeh_ops->read_config(dn, cap + PCI_EXP_SLTCTL, 2, &val); 702 eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val);
688 if (val & PCI_EXP_SLTCTL_PCC) { 703 if (val & PCI_EXP_SLTCTL_PCC) {
689 pr_debug(" In power-off state, power it on ...\n"); 704 pr_debug(" In power-off state, power it on ...\n");
690 val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); 705 val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
691 val |= (0x0100 & PCI_EXP_SLTCTL_PIC); 706 val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
692 eeh_ops->write_config(dn, cap + PCI_EXP_SLTCTL, 2, val); 707 eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val);
693 msleep(2 * 1000); 708 msleep(2 * 1000);
694 } 709 }
695 } 710 }
696 711
697 /* Enable link */ 712 /* Enable link */
698 eeh_ops->read_config(dn, cap + PCI_EXP_LNKCTL, 2, &val); 713 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val);
699 val &= ~PCI_EXP_LNKCTL_LD; 714 val &= ~PCI_EXP_LNKCTL_LD;
700 eeh_ops->write_config(dn, cap + PCI_EXP_LNKCTL, 2, val); 715 eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val);
701 716
702 /* Check link */ 717 /* Check link */
703 eeh_ops->read_config(dn, cap + PCI_EXP_LNKCAP, 4, &val); 718 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val);
704 if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { 719 if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
705 pr_debug(" No link reporting capability (0x%08x) \n", val); 720 pr_debug(" No link reporting capability (0x%08x) \n", val);
706 msleep(1000); 721 msleep(1000);
@@ -713,7 +728,7 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
713 msleep(20); 728 msleep(20);
714 timeout += 20; 729 timeout += 20;
715 730
716 eeh_ops->read_config(dn, cap + PCI_EXP_LNKSTA, 2, &val); 731 eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val);
717 if (val & PCI_EXP_LNKSTA_DLLLA) 732 if (val & PCI_EXP_LNKSTA_DLLLA)
718 break; 733 break;
719 } 734 }
@@ -728,9 +743,9 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
728#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) 743#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
729#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) 744#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
730 745
731static void eeh_restore_bridge_bars(struct eeh_dev *edev, 746static void eeh_restore_bridge_bars(struct eeh_dev *edev)
732 struct device_node *dn)
733{ 747{
748 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
734 int i; 749 int i;
735 750
736 /* 751 /*
@@ -738,49 +753,49 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev,
738 * Bus numbers and windows: 0x18 - 0x30 753 * Bus numbers and windows: 0x18 - 0x30
739 */ 754 */
740 for (i = 4; i < 13; i++) 755 for (i = 4; i < 13; i++)
741 eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); 756 eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]);
742 /* Rom: 0x38 */ 757 /* Rom: 0x38 */
743 eeh_ops->write_config(dn, 14*4, 4, edev->config_space[14]); 758 eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]);
744 759
745 /* Cache line & Latency timer: 0xC 0xD */ 760 /* Cache line & Latency timer: 0xC 0xD */
746 eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, 761 eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
747 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 762 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
748 eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, 763 eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1,
749 SAVED_BYTE(PCI_LATENCY_TIMER)); 764 SAVED_BYTE(PCI_LATENCY_TIMER));
750 /* Max latency, min grant, interrupt ping and line: 0x3C */ 765 /* Max latency, min grant, interrupt ping and line: 0x3C */
751 eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); 766 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
752 767
753 /* PCI Command: 0x4 */ 768 /* PCI Command: 0x4 */
754 eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]); 769 eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
755 770
756 /* Check the PCIe link is ready */ 771 /* Check the PCIe link is ready */
757 eeh_bridge_check_link(edev, dn); 772 eeh_bridge_check_link(edev);
758} 773}
759 774
760static void eeh_restore_device_bars(struct eeh_dev *edev, 775static void eeh_restore_device_bars(struct eeh_dev *edev)
761 struct device_node *dn)
762{ 776{
777 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
763 int i; 778 int i;
764 u32 cmd; 779 u32 cmd;
765 780
766 for (i = 4; i < 10; i++) 781 for (i = 4; i < 10; i++)
767 eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); 782 eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]);
768 /* 12 == Expansion ROM Address */ 783 /* 12 == Expansion ROM Address */
769 eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]); 784 eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]);
770 785
771 eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, 786 eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
772 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 787 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
773 eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, 788 eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1,
774 SAVED_BYTE(PCI_LATENCY_TIMER)); 789 SAVED_BYTE(PCI_LATENCY_TIMER));
775 790
776 /* max latency, min grant, interrupt pin and line */ 791 /* max latency, min grant, interrupt pin and line */
777 eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); 792 eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
778 793
779 /* 794 /*
780 * Restore PERR & SERR bits, some devices require it, 795 * Restore PERR & SERR bits, some devices require it,
781 * don't touch the other command bits 796 * don't touch the other command bits
782 */ 797 */
783 eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd); 798 eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd);
784 if (edev->config_space[1] & PCI_COMMAND_PARITY) 799 if (edev->config_space[1] & PCI_COMMAND_PARITY)
785 cmd |= PCI_COMMAND_PARITY; 800 cmd |= PCI_COMMAND_PARITY;
786 else 801 else
@@ -789,7 +804,7 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
789 cmd |= PCI_COMMAND_SERR; 804 cmd |= PCI_COMMAND_SERR;
790 else 805 else
791 cmd &= ~PCI_COMMAND_SERR; 806 cmd &= ~PCI_COMMAND_SERR;
792 eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd); 807 eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd);
793} 808}
794 809
795/** 810/**
@@ -804,16 +819,16 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
804static void *eeh_restore_one_device_bars(void *data, void *flag) 819static void *eeh_restore_one_device_bars(void *data, void *flag)
805{ 820{
806 struct eeh_dev *edev = (struct eeh_dev *)data; 821 struct eeh_dev *edev = (struct eeh_dev *)data;
807 struct device_node *dn = eeh_dev_to_of_node(edev); 822 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
808 823
809 /* Do special restore for bridges */ 824 /* Do special restore for bridges */
810 if (edev->mode & EEH_DEV_BRIDGE) 825 if (edev->mode & EEH_DEV_BRIDGE)
811 eeh_restore_bridge_bars(edev, dn); 826 eeh_restore_bridge_bars(edev);
812 else 827 else
813 eeh_restore_device_bars(edev, dn); 828 eeh_restore_device_bars(edev);
814 829
815 if (eeh_ops->restore_config) 830 if (eeh_ops->restore_config && pdn)
816 eeh_ops->restore_config(dn); 831 eeh_ops->restore_config(pdn);
817 832
818 return NULL; 833 return NULL;
819} 834}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d180caf2d6de..8ca9434c40e6 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -34,6 +34,7 @@
34#include <asm/ftrace.h> 34#include <asm/ftrace.h>
35#include <asm/hw_irq.h> 35#include <asm/hw_irq.h>
36#include <asm/context_tracking.h> 36#include <asm/context_tracking.h>
37#include <asm/tm.h>
37 38
38/* 39/*
39 * System calls. 40 * System calls.
@@ -145,6 +146,24 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
145 andi. r11,r10,_TIF_SYSCALL_DOTRACE 146 andi. r11,r10,_TIF_SYSCALL_DOTRACE
146 bne syscall_dotrace 147 bne syscall_dotrace
147.Lsyscall_dotrace_cont: 148.Lsyscall_dotrace_cont:
149#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
150BEGIN_FTR_SECTION
151 b 1f
152END_FTR_SECTION_IFCLR(CPU_FTR_TM)
153 extrdi. r11, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
154 beq+ 1f
155
156 /* Doom the transaction and don't perform the syscall: */
157 mfmsr r11
158 li r12, 1
159 rldimi r11, r12, MSR_TM_LG, 63-MSR_TM_LG
160 mtmsrd r11, 0
161 li r11, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
162 TABORT(R11)
163
164 b .Lsyscall_exit
1651:
166#endif
148 cmpldi 0,r0,NR_syscalls 167 cmpldi 0,r0,NR_syscalls
149 bge- syscall_enosys 168 bge- syscall_enosys
150 169
@@ -356,6 +375,11 @@ _GLOBAL(ppc64_swapcontext)
356 bl sys_swapcontext 375 bl sys_swapcontext
357 b .Lsyscall_exit 376 b .Lsyscall_exit
358 377
378_GLOBAL(ppc_switch_endian)
379 bl save_nvgprs
380 bl sys_switch_endian
381 b .Lsyscall_exit
382
359_GLOBAL(ret_from_fork) 383_GLOBAL(ret_from_fork)
360 bl schedule_tail 384 bl schedule_tail
361 REST_NVGPRS(r1) 385 REST_NVGPRS(r1)
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 05adc8bbdef8..eeaa0d5f69d5 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -94,6 +94,7 @@ _GLOBAL(power7_powersave_common)
94 beq 1f 94 beq 1f
95 addi r1,r1,INT_FRAME_SIZE 95 addi r1,r1,INT_FRAME_SIZE
96 ld r0,16(r1) 96 ld r0,16(r1)
97 li r3,0 /* Return 0 (no nap) */
97 mtlr r0 98 mtlr r0
98 blr 99 blr
99 100
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index b6f123ab90ed..2c647b1e62e4 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -28,6 +28,55 @@
28#include <asm/mce.h> 28#include <asm/mce.h>
29#include <asm/machdep.h> 29#include <asm/machdep.h>
30 30
31static void flush_tlb_206(unsigned int num_sets, unsigned int action)
32{
33 unsigned long rb;
34 unsigned int i;
35
36 switch (action) {
37 case TLB_INVAL_SCOPE_GLOBAL:
38 rb = TLBIEL_INVAL_SET;
39 break;
40 case TLB_INVAL_SCOPE_LPID:
41 rb = TLBIEL_INVAL_SET_LPID;
42 break;
43 default:
44 BUG();
45 break;
46 }
47
48 asm volatile("ptesync" : : : "memory");
49 for (i = 0; i < num_sets; i++) {
50 asm volatile("tlbiel %0" : : "r" (rb));
51 rb += 1 << TLBIEL_INVAL_SET_SHIFT;
52 }
53 asm volatile("ptesync" : : : "memory");
54}
55
56/*
57 * Generic routine to flush TLB on power7. This routine is used as
58 * flush_tlb hook in cpu_spec for Power7 processor.
59 *
60 * action => TLB_INVAL_SCOPE_GLOBAL: Invalidate all TLBs.
61 * TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
62 */
63void __flush_tlb_power7(unsigned int action)
64{
65 flush_tlb_206(POWER7_TLB_SETS, action);
66}
67
68/*
69 * Generic routine to flush TLB on power8. This routine is used as
70 * flush_tlb hook in cpu_spec for power8 processor.
71 *
72 * action => TLB_INVAL_SCOPE_GLOBAL: Invalidate all TLBs.
73 * TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
74 */
75void __flush_tlb_power8(unsigned int action)
76{
77 flush_tlb_206(POWER8_TLB_SETS, action);
78}
79
31/* flush SLBs and reload */ 80/* flush SLBs and reload */
32static void flush_and_reload_slb(void) 81static void flush_and_reload_slb(void)
33{ 82{
@@ -79,7 +128,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
79 } 128 }
80 if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { 129 if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
81 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) 130 if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
82 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); 131 cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
83 /* reset error bits */ 132 /* reset error bits */
84 dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; 133 dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
85 } 134 }
@@ -110,7 +159,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
110 break; 159 break;
111 case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: 160 case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
112 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { 161 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
113 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); 162 cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
114 handled = 1; 163 handled = 1;
115 } 164 }
116 break; 165 break;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 34f7c9b7cd96..1e703f8ebad4 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -26,6 +26,9 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/kmsg_dump.h>
30#include <linux/pstore.h>
31#include <linux/zlib.h>
29#include <asm/uaccess.h> 32#include <asm/uaccess.h>
30#include <asm/nvram.h> 33#include <asm/nvram.h>
31#include <asm/rtas.h> 34#include <asm/rtas.h>
@@ -54,6 +57,680 @@ struct nvram_partition {
54 57
55static LIST_HEAD(nvram_partitions); 58static LIST_HEAD(nvram_partitions);
56 59
60#ifdef CONFIG_PPC_PSERIES
61struct nvram_os_partition rtas_log_partition = {
62 .name = "ibm,rtas-log",
63 .req_size = 2079,
64 .min_size = 1055,
65 .index = -1,
66 .os_partition = true
67};
68#endif
69
70struct nvram_os_partition oops_log_partition = {
71 .name = "lnx,oops-log",
72 .req_size = 4000,
73 .min_size = 2000,
74 .index = -1,
75 .os_partition = true
76};
77
78static const char *nvram_os_partitions[] = {
79#ifdef CONFIG_PPC_PSERIES
80 "ibm,rtas-log",
81#endif
82 "lnx,oops-log",
83 NULL
84};
85
86static void oops_to_nvram(struct kmsg_dumper *dumper,
87 enum kmsg_dump_reason reason);
88
89static struct kmsg_dumper nvram_kmsg_dumper = {
90 .dump = oops_to_nvram
91};
92
93/*
94 * For capturing and compressing an oops or panic report...
95
96 * big_oops_buf[] holds the uncompressed text we're capturing.
97 *
98 * oops_buf[] holds the compressed text, preceded by a oops header.
99 * oops header has u16 holding the version of oops header (to differentiate
100 * between old and new format header) followed by u16 holding the length of
101 * the compressed* text (*Or uncompressed, if compression fails.) and u64
102 * holding the timestamp. oops_buf[] gets written to NVRAM.
103 *
104 * oops_log_info points to the header. oops_data points to the compressed text.
105 *
106 * +- oops_buf
107 * | +- oops_data
108 * v v
109 * +-----------+-----------+-----------+------------------------+
110 * | version | length | timestamp | text |
111 * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) |
112 * +-----------+-----------+-----------+------------------------+
113 * ^
114 * +- oops_log_info
115 *
116 * We preallocate these buffers during init to avoid kmalloc during oops/panic.
117 */
118static size_t big_oops_buf_sz;
119static char *big_oops_buf, *oops_buf;
120static char *oops_data;
121static size_t oops_data_sz;
122
123/* Compression parameters */
124#define COMPR_LEVEL 6
125#define WINDOW_BITS 12
126#define MEM_LEVEL 4
127static struct z_stream_s stream;
128
129#ifdef CONFIG_PSTORE
130#ifdef CONFIG_PPC_POWERNV
131static struct nvram_os_partition skiboot_partition = {
132 .name = "ibm,skiboot",
133 .index = -1,
134 .os_partition = false
135};
136#endif
137
138#ifdef CONFIG_PPC_PSERIES
139static struct nvram_os_partition of_config_partition = {
140 .name = "of-config",
141 .index = -1,
142 .os_partition = false
143};
144#endif
145
146static struct nvram_os_partition common_partition = {
147 .name = "common",
148 .index = -1,
149 .os_partition = false
150};
151
152static enum pstore_type_id nvram_type_ids[] = {
153 PSTORE_TYPE_DMESG,
154 PSTORE_TYPE_PPC_COMMON,
155 -1,
156 -1,
157 -1
158};
159static int read_type;
160#endif
161
162/* nvram_write_os_partition
163 *
164 * We need to buffer the error logs into nvram to ensure that we have
165 * the failure information to decode. If we have a severe error there
166 * is no way to guarantee that the OS or the machine is in a state to
167 * get back to user land and write the error to disk. For example if
168 * the SCSI device driver causes a Machine Check by writing to a bad
169 * IO address, there is no way of guaranteeing that the device driver
170 * is in any state that is would also be able to write the error data
171 * captured to disk, thus we buffer it in NVRAM for analysis on the
172 * next boot.
173 *
174 * In NVRAM the partition containing the error log buffer will looks like:
175 * Header (in bytes):
176 * +-----------+----------+--------+------------+------------------+
177 * | signature | checksum | length | name | data |
178 * |0 |1 |2 3|4 15|16 length-1|
179 * +-----------+----------+--------+------------+------------------+
180 *
181 * The 'data' section would look like (in bytes):
182 * +--------------+------------+-----------------------------------+
183 * | event_logged | sequence # | error log |
184 * |0 3|4 7|8 error_log_size-1|
185 * +--------------+------------+-----------------------------------+
186 *
187 * event_logged: 0 if event has not been logged to syslog, 1 if it has
188 * sequence #: The unique sequence # for each event. (until it wraps)
189 * error log: The error log from event_scan
190 */
191int nvram_write_os_partition(struct nvram_os_partition *part,
192 char *buff, int length,
193 unsigned int err_type,
194 unsigned int error_log_cnt)
195{
196 int rc;
197 loff_t tmp_index;
198 struct err_log_info info;
199
200 if (part->index == -1)
201 return -ESPIPE;
202
203 if (length > part->size)
204 length = part->size;
205
206 info.error_type = cpu_to_be32(err_type);
207 info.seq_num = cpu_to_be32(error_log_cnt);
208
209 tmp_index = part->index;
210
211 rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info),
212 &tmp_index);
213 if (rc <= 0) {
214 pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
215 return rc;
216 }
217
218 rc = ppc_md.nvram_write(buff, length, &tmp_index);
219 if (rc <= 0) {
220 pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
221 return rc;
222 }
223
224 return 0;
225}
226
227/* nvram_read_partition
228 *
229 * Reads nvram partition for at most 'length'
230 */
231int nvram_read_partition(struct nvram_os_partition *part, char *buff,
232 int length, unsigned int *err_type,
233 unsigned int *error_log_cnt)
234{
235 int rc;
236 loff_t tmp_index;
237 struct err_log_info info;
238
239 if (part->index == -1)
240 return -1;
241
242 if (length > part->size)
243 length = part->size;
244
245 tmp_index = part->index;
246
247 if (part->os_partition) {
248 rc = ppc_md.nvram_read((char *)&info,
249 sizeof(struct err_log_info),
250 &tmp_index);
251 if (rc <= 0) {
252 pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
253 return rc;
254 }
255 }
256
257 rc = ppc_md.nvram_read(buff, length, &tmp_index);
258 if (rc <= 0) {
259 pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
260 return rc;
261 }
262
263 if (part->os_partition) {
264 *error_log_cnt = be32_to_cpu(info.seq_num);
265 *err_type = be32_to_cpu(info.error_type);
266 }
267
268 return 0;
269}
270
271/* nvram_init_os_partition
272 *
273 * This sets up a partition with an "OS" signature.
274 *
275 * The general strategy is the following:
276 * 1.) If a partition with the indicated name already exists...
277 * - If it's large enough, use it.
278 * - Otherwise, recycle it and keep going.
279 * 2.) Search for a free partition that is large enough.
280 * 3.) If there's not a free partition large enough, recycle any obsolete
281 * OS partitions and try again.
282 * 4.) Will first try getting a chunk that will satisfy the requested size.
283 * 5.) If a chunk of the requested size cannot be allocated, then try finding
284 * a chunk that will satisfy the minum needed.
285 *
286 * Returns 0 on success, else -1.
287 */
288int __init nvram_init_os_partition(struct nvram_os_partition *part)
289{
290 loff_t p;
291 int size;
292
293 /* Look for ours */
294 p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
295
296 /* Found one but too small, remove it */
297 if (p && size < part->min_size) {
298 pr_info("nvram: Found too small %s partition,"
299 " removing it...\n", part->name);
300 nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL);
301 p = 0;
302 }
303
304 /* Create one if we didn't find */
305 if (!p) {
306 p = nvram_create_partition(part->name, NVRAM_SIG_OS,
307 part->req_size, part->min_size);
308 if (p == -ENOSPC) {
309 pr_info("nvram: No room to create %s partition, "
310 "deleting any obsolete OS partitions...\n",
311 part->name);
312 nvram_remove_partition(NULL, NVRAM_SIG_OS,
313 nvram_os_partitions);
314 p = nvram_create_partition(part->name, NVRAM_SIG_OS,
315 part->req_size, part->min_size);
316 }
317 }
318
319 if (p <= 0) {
320 pr_err("nvram: Failed to find or create %s"
321 " partition, err %d\n", part->name, (int)p);
322 return -1;
323 }
324
325 part->index = p;
326 part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
327
328 return 0;
329}
330
331/* Derived from logfs_compress() */
332static int nvram_compress(const void *in, void *out, size_t inlen,
333 size_t outlen)
334{
335 int err, ret;
336
337 ret = -EIO;
338 err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
339 MEM_LEVEL, Z_DEFAULT_STRATEGY);
340 if (err != Z_OK)
341 goto error;
342
343 stream.next_in = in;
344 stream.avail_in = inlen;
345 stream.total_in = 0;
346 stream.next_out = out;
347 stream.avail_out = outlen;
348 stream.total_out = 0;
349
350 err = zlib_deflate(&stream, Z_FINISH);
351 if (err != Z_STREAM_END)
352 goto error;
353
354 err = zlib_deflateEnd(&stream);
355 if (err != Z_OK)
356 goto error;
357
358 if (stream.total_out >= stream.total_in)
359 goto error;
360
361 ret = stream.total_out;
362error:
363 return ret;
364}
365
366/* Compress the text from big_oops_buf into oops_buf. */
367static int zip_oops(size_t text_len)
368{
369 struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
370 int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
371 oops_data_sz);
372 if (zipped_len < 0) {
373 pr_err("nvram: compression failed; returned %d\n", zipped_len);
374 pr_err("nvram: logging uncompressed oops/panic report\n");
375 return -1;
376 }
377 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
378 oops_hdr->report_length = cpu_to_be16(zipped_len);
379 oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
380 return 0;
381}
382
383#ifdef CONFIG_PSTORE
384static int nvram_pstore_open(struct pstore_info *psi)
385{
386 /* Reset the iterator to start reading partitions again */
387 read_type = -1;
388 return 0;
389}
390
391/**
392 * nvram_pstore_write - pstore write callback for nvram
393 * @type: Type of message logged
394 * @reason: reason behind dump (oops/panic)
395 * @id: identifier to indicate the write performed
396 * @part: pstore writes data to registered buffer in parts,
397 * part number will indicate the same.
398 * @count: Indicates oops count
399 * @compressed: Flag to indicate the log is compressed
400 * @size: number of bytes written to the registered buffer
401 * @psi: registered pstore_info structure
402 *
403 * Called by pstore_dump() when an oops or panic report is logged in the
404 * printk buffer.
405 * Returns 0 on successful write.
406 */
407static int nvram_pstore_write(enum pstore_type_id type,
408 enum kmsg_dump_reason reason,
409 u64 *id, unsigned int part, int count,
410 bool compressed, size_t size,
411 struct pstore_info *psi)
412{
413 int rc;
414 unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
415 struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
416
417 /* part 1 has the recent messages from printk buffer */
418 if (part > 1 || (type != PSTORE_TYPE_DMESG))
419 return -1;
420
421 if (clobbering_unread_rtas_event())
422 return -1;
423
424 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
425 oops_hdr->report_length = cpu_to_be16(size);
426 oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
427
428 if (compressed)
429 err_type = ERR_TYPE_KERNEL_PANIC_GZ;
430
431 rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
432 (int) (sizeof(*oops_hdr) + size), err_type, count);
433
434 if (rc != 0)
435 return rc;
436
437 *id = part;
438 return 0;
439}
440
441/*
442 * Reads the oops/panic report, rtas, of-config and common partition.
443 * Returns the length of the data we read from each partition.
444 * Returns 0 if we've been called before.
445 */
446static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
447 int *count, struct timespec *time, char **buf,
448 bool *compressed, struct pstore_info *psi)
449{
450 struct oops_log_info *oops_hdr;
451 unsigned int err_type, id_no, size = 0;
452 struct nvram_os_partition *part = NULL;
453 char *buff = NULL;
454 int sig = 0;
455 loff_t p;
456
457 read_type++;
458
459 switch (nvram_type_ids[read_type]) {
460 case PSTORE_TYPE_DMESG:
461 part = &oops_log_partition;
462 *type = PSTORE_TYPE_DMESG;
463 break;
464 case PSTORE_TYPE_PPC_COMMON:
465 sig = NVRAM_SIG_SYS;
466 part = &common_partition;
467 *type = PSTORE_TYPE_PPC_COMMON;
468 *id = PSTORE_TYPE_PPC_COMMON;
469 time->tv_sec = 0;
470 time->tv_nsec = 0;
471 break;
472#ifdef CONFIG_PPC_PSERIES
473 case PSTORE_TYPE_PPC_RTAS:
474 part = &rtas_log_partition;
475 *type = PSTORE_TYPE_PPC_RTAS;
476 time->tv_sec = last_rtas_event;
477 time->tv_nsec = 0;
478 break;
479 case PSTORE_TYPE_PPC_OF:
480 sig = NVRAM_SIG_OF;
481 part = &of_config_partition;
482 *type = PSTORE_TYPE_PPC_OF;
483 *id = PSTORE_TYPE_PPC_OF;
484 time->tv_sec = 0;
485 time->tv_nsec = 0;
486 break;
487#endif
488#ifdef CONFIG_PPC_POWERNV
489 case PSTORE_TYPE_PPC_OPAL:
490 sig = NVRAM_SIG_FW;
491 part = &skiboot_partition;
492 *type = PSTORE_TYPE_PPC_OPAL;
493 *id = PSTORE_TYPE_PPC_OPAL;
494 time->tv_sec = 0;
495 time->tv_nsec = 0;
496 break;
497#endif
498 default:
499 return 0;
500 }
501
502 if (!part->os_partition) {
503 p = nvram_find_partition(part->name, sig, &size);
504 if (p <= 0) {
505 pr_err("nvram: Failed to find partition %s, "
506 "err %d\n", part->name, (int)p);
507 return 0;
508 }
509 part->index = p;
510 part->size = size;
511 }
512
513 buff = kmalloc(part->size, GFP_KERNEL);
514
515 if (!buff)
516 return -ENOMEM;
517
518 if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) {
519 kfree(buff);
520 return 0;
521 }
522
523 *count = 0;
524
525 if (part->os_partition)
526 *id = id_no;
527
528 if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
529 size_t length, hdr_size;
530
531 oops_hdr = (struct oops_log_info *)buff;
532 if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
533 /* Old format oops header had 2-byte record size */
534 hdr_size = sizeof(u16);
535 length = be16_to_cpu(oops_hdr->version);
536 time->tv_sec = 0;
537 time->tv_nsec = 0;
538 } else {
539 hdr_size = sizeof(*oops_hdr);
540 length = be16_to_cpu(oops_hdr->report_length);
541 time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
542 time->tv_nsec = 0;
543 }
544 *buf = kmalloc(length, GFP_KERNEL);
545 if (*buf == NULL)
546 return -ENOMEM;
547 memcpy(*buf, buff + hdr_size, length);
548 kfree(buff);
549
550 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
551 *compressed = true;
552 else
553 *compressed = false;
554 return length;
555 }
556
557 *buf = buff;
558 return part->size;
559}
560
561static struct pstore_info nvram_pstore_info = {
562 .owner = THIS_MODULE,
563 .name = "nvram",
564 .open = nvram_pstore_open,
565 .read = nvram_pstore_read,
566 .write = nvram_pstore_write,
567};
568
569static int nvram_pstore_init(void)
570{
571 int rc = 0;
572
573 if (machine_is(pseries)) {
574 nvram_type_ids[2] = PSTORE_TYPE_PPC_RTAS;
575 nvram_type_ids[3] = PSTORE_TYPE_PPC_OF;
576 } else
577 nvram_type_ids[2] = PSTORE_TYPE_PPC_OPAL;
578
579 nvram_pstore_info.buf = oops_data;
580 nvram_pstore_info.bufsize = oops_data_sz;
581
582 spin_lock_init(&nvram_pstore_info.buf_lock);
583
584 rc = pstore_register(&nvram_pstore_info);
585 if (rc != 0)
586 pr_err("nvram: pstore_register() failed, defaults to "
587 "kmsg_dump; returned %d\n", rc);
588
589 return rc;
590}
591#else
592static int nvram_pstore_init(void)
593{
594 return -1;
595}
596#endif
597
598void __init nvram_init_oops_partition(int rtas_partition_exists)
599{
600 int rc;
601
602 rc = nvram_init_os_partition(&oops_log_partition);
603 if (rc != 0) {
604#ifdef CONFIG_PPC_PSERIES
605 if (!rtas_partition_exists) {
606 pr_err("nvram: Failed to initialize oops partition!");
607 return;
608 }
609 pr_notice("nvram: Using %s partition to log both"
610 " RTAS errors and oops/panic reports\n",
611 rtas_log_partition.name);
612 memcpy(&oops_log_partition, &rtas_log_partition,
613 sizeof(rtas_log_partition));
614#else
615 pr_err("nvram: Failed to initialize oops partition!");
616 return;
617#endif
618 }
619 oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
620 if (!oops_buf) {
621 pr_err("nvram: No memory for %s partition\n",
622 oops_log_partition.name);
623 return;
624 }
625 oops_data = oops_buf + sizeof(struct oops_log_info);
626 oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
627
628 rc = nvram_pstore_init();
629
630 if (!rc)
631 return;
632
633 /*
634 * Figure compression (preceded by elimination of each line's <n>
635 * severity prefix) will reduce the oops/panic report to at most
636 * 45% of its original size.
637 */
638 big_oops_buf_sz = (oops_data_sz * 100) / 45;
639 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
640 if (big_oops_buf) {
641 stream.workspace = kmalloc(zlib_deflate_workspacesize(
642 WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
643 if (!stream.workspace) {
644 pr_err("nvram: No memory for compression workspace; "
645 "skipping compression of %s partition data\n",
646 oops_log_partition.name);
647 kfree(big_oops_buf);
648 big_oops_buf = NULL;
649 }
650 } else {
651 pr_err("No memory for uncompressed %s data; "
652 "skipping compression\n", oops_log_partition.name);
653 stream.workspace = NULL;
654 }
655
656 rc = kmsg_dump_register(&nvram_kmsg_dumper);
657 if (rc != 0) {
658 pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
659 kfree(oops_buf);
660 kfree(big_oops_buf);
661 kfree(stream.workspace);
662 }
663}
664
665/*
666 * This is our kmsg_dump callback, called after an oops or panic report
667 * has been written to the printk buffer. We want to capture as much
668 * of the printk buffer as possible. First, capture as much as we can
669 * that we think will compress sufficiently to fit in the lnx,oops-log
670 * partition. If that's too much, go back and capture uncompressed text.
671 */
672static void oops_to_nvram(struct kmsg_dumper *dumper,
673 enum kmsg_dump_reason reason)
674{
675 struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
676 static unsigned int oops_count = 0;
677 static bool panicking = false;
678 static DEFINE_SPINLOCK(lock);
679 unsigned long flags;
680 size_t text_len;
681 unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
682 int rc = -1;
683
684 switch (reason) {
685 case KMSG_DUMP_RESTART:
686 case KMSG_DUMP_HALT:
687 case KMSG_DUMP_POWEROFF:
688 /* These are almost always orderly shutdowns. */
689 return;
690 case KMSG_DUMP_OOPS:
691 break;
692 case KMSG_DUMP_PANIC:
693 panicking = true;
694 break;
695 case KMSG_DUMP_EMERG:
696 if (panicking)
697 /* Panic report already captured. */
698 return;
699 break;
700 default:
701 pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
702 __func__, (int) reason);
703 return;
704 }
705
706 if (clobbering_unread_rtas_event())
707 return;
708
709 if (!spin_trylock_irqsave(&lock, flags))
710 return;
711
712 if (big_oops_buf) {
713 kmsg_dump_get_buffer(dumper, false,
714 big_oops_buf, big_oops_buf_sz, &text_len);
715 rc = zip_oops(text_len);
716 }
717 if (rc != 0) {
718 kmsg_dump_rewind(dumper);
719 kmsg_dump_get_buffer(dumper, false,
720 oops_data, oops_data_sz, &text_len);
721 err_type = ERR_TYPE_KERNEL_PANIC;
722 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
723 oops_hdr->report_length = cpu_to_be16(text_len);
724 oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
725 }
726
727 (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
728 (int) (sizeof(*oops_hdr) + text_len), err_type,
729 ++oops_count);
730
731 spin_unlock_irqrestore(&lock, flags);
732}
733
57static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) 734static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
58{ 735{
59 int size; 736 int size;
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 2f35a72642c6..b60a67d92ebd 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -72,7 +72,7 @@ static int of_pci_phb_probe(struct platform_device *dev)
72 72
73 /* Register devices with EEH */ 73 /* Register devices with EEH */
74 if (dev->dev.of_node->child) 74 if (dev->dev.of_node->child)
75 eeh_add_device_tree_early(dev->dev.of_node); 75 eeh_add_device_tree_early(PCI_DN(dev->dev.of_node));
76 76
77 /* Scan the bus */ 77 /* Scan the bus */
78 pcibios_scan_phb(phb); 78 pcibios_scan_phb(phb);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 2a525c938158..0d054068a21d 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -76,7 +76,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
76 list_add_tail(&phb->list_node, &hose_list); 76 list_add_tail(&phb->list_node, &hose_list);
77 spin_unlock(&hose_spinlock); 77 spin_unlock(&hose_spinlock);
78 phb->dn = dev; 78 phb->dn = dev;
79 phb->is_dynamic = mem_init_done; 79 phb->is_dynamic = slab_is_available();
80#ifdef CONFIG_PPC64 80#ifdef CONFIG_PPC64
81 if (dev) { 81 if (dev) {
82 int nid = of_node_to_nid(dev); 82 int nid = of_node_to_nid(dev);
@@ -109,8 +109,10 @@ void pcibios_free_controller(struct pci_controller *phb)
109resource_size_t pcibios_window_alignment(struct pci_bus *bus, 109resource_size_t pcibios_window_alignment(struct pci_bus *bus,
110 unsigned long type) 110 unsigned long type)
111{ 111{
112 if (ppc_md.pcibios_window_alignment) 112 struct pci_controller *phb = pci_bus_to_host(bus);
113 return ppc_md.pcibios_window_alignment(bus, type); 113
114 if (phb->controller_ops.window_alignment)
115 return phb->controller_ops.window_alignment(bus, type);
114 116
115 /* 117 /*
116 * PCI core will figure out the default 118 * PCI core will figure out the default
@@ -122,14 +124,26 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
122 124
123void pcibios_reset_secondary_bus(struct pci_dev *dev) 125void pcibios_reset_secondary_bus(struct pci_dev *dev)
124{ 126{
125 if (ppc_md.pcibios_reset_secondary_bus) { 127 struct pci_controller *phb = pci_bus_to_host(dev->bus);
126 ppc_md.pcibios_reset_secondary_bus(dev); 128
129 if (phb->controller_ops.reset_secondary_bus) {
130 phb->controller_ops.reset_secondary_bus(dev);
127 return; 131 return;
128 } 132 }
129 133
130 pci_reset_secondary_bus(dev); 134 pci_reset_secondary_bus(dev);
131} 135}
132 136
137#ifdef CONFIG_PCI_IOV
138resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
139{
140 if (ppc_md.pcibios_iov_resource_alignment)
141 return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
142
143 return pci_iov_resource_size(pdev, resno);
144}
145#endif /* CONFIG_PCI_IOV */
146
133static resource_size_t pcibios_io_size(const struct pci_controller *hose) 147static resource_size_t pcibios_io_size(const struct pci_controller *hose)
134{ 148{
135#ifdef CONFIG_PPC64 149#ifdef CONFIG_PPC64
@@ -788,6 +802,10 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
788 pci_name(dev)); 802 pci_name(dev));
789 return; 803 return;
790 } 804 }
805
806 if (dev->is_virtfn)
807 return;
808
791 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 809 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
792 struct resource *res = dev->resource + i; 810 struct resource *res = dev->resource + i;
793 struct pci_bus_region reg; 811 struct pci_bus_region reg;
@@ -942,6 +960,8 @@ static void pcibios_fixup_bridge(struct pci_bus *bus)
942 960
943void pcibios_setup_bus_self(struct pci_bus *bus) 961void pcibios_setup_bus_self(struct pci_bus *bus)
944{ 962{
963 struct pci_controller *phb;
964
945 /* Fix up the bus resources for P2P bridges */ 965 /* Fix up the bus resources for P2P bridges */
946 if (bus->self != NULL) 966 if (bus->self != NULL)
947 pcibios_fixup_bridge(bus); 967 pcibios_fixup_bridge(bus);
@@ -953,12 +973,14 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
953 ppc_md.pcibios_fixup_bus(bus); 973 ppc_md.pcibios_fixup_bus(bus);
954 974
955 /* Setup bus DMA mappings */ 975 /* Setup bus DMA mappings */
956 if (ppc_md.pci_dma_bus_setup) 976 phb = pci_bus_to_host(bus);
957 ppc_md.pci_dma_bus_setup(bus); 977 if (phb->controller_ops.dma_bus_setup)
978 phb->controller_ops.dma_bus_setup(bus);
958} 979}
959 980
960static void pcibios_setup_device(struct pci_dev *dev) 981static void pcibios_setup_device(struct pci_dev *dev)
961{ 982{
983 struct pci_controller *phb;
962 /* Fixup NUMA node as it may not be setup yet by the generic 984 /* Fixup NUMA node as it may not be setup yet by the generic
963 * code and is needed by the DMA init 985 * code and is needed by the DMA init
964 */ 986 */
@@ -969,8 +991,9 @@ static void pcibios_setup_device(struct pci_dev *dev)
969 set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); 991 set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
970 992
971 /* Additional platform DMA/iommu setup */ 993 /* Additional platform DMA/iommu setup */
972 if (ppc_md.pci_dma_dev_setup) 994 phb = pci_bus_to_host(dev->bus);
973 ppc_md.pci_dma_dev_setup(dev); 995 if (phb->controller_ops.dma_dev_setup)
996 phb->controller_ops.dma_dev_setup(dev);
974 997
975 /* Read default IRQs and fixup if necessary */ 998 /* Read default IRQs and fixup if necessary */
976 pci_read_irq_line(dev); 999 pci_read_irq_line(dev);
@@ -986,6 +1009,12 @@ int pcibios_add_device(struct pci_dev *dev)
986 */ 1009 */
987 if (dev->bus->is_added) 1010 if (dev->bus->is_added)
988 pcibios_setup_device(dev); 1011 pcibios_setup_device(dev);
1012
1013#ifdef CONFIG_PCI_IOV
1014 if (ppc_md.pcibios_fixup_sriov)
1015 ppc_md.pcibios_fixup_sriov(dev);
1016#endif /* CONFIG_PCI_IOV */
1017
989 return 0; 1018 return 0;
990} 1019}
991 1020
@@ -1450,8 +1479,10 @@ EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1450 1479
1451int pcibios_enable_device(struct pci_dev *dev, int mask) 1480int pcibios_enable_device(struct pci_dev *dev, int mask)
1452{ 1481{
1453 if (ppc_md.pcibios_enable_device_hook) 1482 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1454 if (ppc_md.pcibios_enable_device_hook(dev)) 1483
1484 if (phb->controller_ops.enable_device_hook)
1485 if (!phb->controller_ops.enable_device_hook(dev))
1455 return -EINVAL; 1486 return -EINVAL;
1456 1487
1457 return pci_enable_resources(dev, mask); 1488 return pci_enable_resources(dev, mask);
@@ -1624,8 +1655,8 @@ void pcibios_scan_phb(struct pci_controller *hose)
1624 1655
1625 /* Get probe mode and perform scan */ 1656 /* Get probe mode and perform scan */
1626 mode = PCI_PROBE_NORMAL; 1657 mode = PCI_PROBE_NORMAL;
1627 if (node && ppc_md.pci_probe_mode) 1658 if (node && hose->controller_ops.probe_mode)
1628 mode = ppc_md.pci_probe_mode(bus); 1659 mode = hose->controller_ops.probe_mode(bus);
1629 pr_debug(" probe mode: %d\n", mode); 1660 pr_debug(" probe mode: %d\n", mode);
1630 if (mode == PCI_PROBE_DEVTREE) 1661 if (mode == PCI_PROBE_DEVTREE)
1631 of_scan_bus(node, bus); 1662 of_scan_bus(node, bus);
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 5b789177aa29..7ed85a69a9c2 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -73,13 +73,16 @@ void pcibios_add_pci_devices(struct pci_bus * bus)
73{ 73{
74 int slotno, mode, pass, max; 74 int slotno, mode, pass, max;
75 struct pci_dev *dev; 75 struct pci_dev *dev;
76 struct pci_controller *phb;
76 struct device_node *dn = pci_bus_to_OF_node(bus); 77 struct device_node *dn = pci_bus_to_OF_node(bus);
77 78
78 eeh_add_device_tree_early(dn); 79 eeh_add_device_tree_early(PCI_DN(dn));
80
81 phb = pci_bus_to_host(bus);
79 82
80 mode = PCI_PROBE_NORMAL; 83 mode = PCI_PROBE_NORMAL;
81 if (ppc_md.pci_probe_mode) 84 if (phb->controller_ops.probe_mode)
82 mode = ppc_md.pci_probe_mode(bus); 85 mode = phb->controller_ops.probe_mode(bus);
83 86
84 if (mode == PCI_PROBE_DEVTREE) { 87 if (mode == PCI_PROBE_DEVTREE) {
85 /* use ofdt-based probe */ 88 /* use ofdt-based probe */
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 83df3075d3df..b3b4df91b792 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -32,12 +32,237 @@
32#include <asm/ppc-pci.h> 32#include <asm/ppc-pci.h>
33#include <asm/firmware.h> 33#include <asm/firmware.h>
34 34
35/*
36 * The function is used to find the firmware data of one
37 * specific PCI device, which is attached to the indicated
38 * PCI bus. For VFs, their firmware data is linked to that
39 * one of PF's bridge. For other devices, their firmware
40 * data is linked to that of their bridge.
41 */
42static struct pci_dn *pci_bus_to_pdn(struct pci_bus *bus)
43{
44 struct pci_bus *pbus;
45 struct device_node *dn;
46 struct pci_dn *pdn;
47
48 /*
49 * We probably have virtual bus which doesn't
50 * have associated bridge.
51 */
52 pbus = bus;
53 while (pbus) {
54 if (pci_is_root_bus(pbus) || pbus->self)
55 break;
56
57 pbus = pbus->parent;
58 }
59
60 /*
61 * Except virtual bus, all PCI buses should
62 * have device nodes.
63 */
64 dn = pci_bus_to_OF_node(pbus);
65 pdn = dn ? PCI_DN(dn) : NULL;
66
67 return pdn;
68}
69
70struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
71 int devfn)
72{
73 struct device_node *dn = NULL;
74 struct pci_dn *parent, *pdn;
75 struct pci_dev *pdev = NULL;
76
77 /* Fast path: fetch from PCI device */
78 list_for_each_entry(pdev, &bus->devices, bus_list) {
79 if (pdev->devfn == devfn) {
80 if (pdev->dev.archdata.pci_data)
81 return pdev->dev.archdata.pci_data;
82
83 dn = pci_device_to_OF_node(pdev);
84 break;
85 }
86 }
87
88 /* Fast path: fetch from device node */
89 pdn = dn ? PCI_DN(dn) : NULL;
90 if (pdn)
91 return pdn;
92
93 /* Slow path: fetch from firmware data hierarchy */
94 parent = pci_bus_to_pdn(bus);
95 if (!parent)
96 return NULL;
97
98 list_for_each_entry(pdn, &parent->child_list, list) {
99 if (pdn->busno == bus->number &&
100 pdn->devfn == devfn)
101 return pdn;
102 }
103
104 return NULL;
105}
106
35struct pci_dn *pci_get_pdn(struct pci_dev *pdev) 107struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
36{ 108{
37 struct device_node *dn = pci_device_to_OF_node(pdev); 109 struct device_node *dn;
38 if (!dn) 110 struct pci_dn *parent, *pdn;
111
112 /* Search device directly */
113 if (pdev->dev.archdata.pci_data)
114 return pdev->dev.archdata.pci_data;
115
116 /* Check device node */
117 dn = pci_device_to_OF_node(pdev);
118 pdn = dn ? PCI_DN(dn) : NULL;
119 if (pdn)
120 return pdn;
121
122 /*
123 * VFs don't have device nodes. We hook their
124 * firmware data to PF's bridge.
125 */
126 parent = pci_bus_to_pdn(pdev->bus);
127 if (!parent)
128 return NULL;
129
130 list_for_each_entry(pdn, &parent->child_list, list) {
131 if (pdn->busno == pdev->bus->number &&
132 pdn->devfn == pdev->devfn)
133 return pdn;
134 }
135
136 return NULL;
137}
138
139#ifdef CONFIG_PCI_IOV
140static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
141 struct pci_dev *pdev,
142 int busno, int devfn)
143{
144 struct pci_dn *pdn;
145
146 /* Except PHB, we always have the parent */
147 if (!parent)
148 return NULL;
149
150 pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
151 if (!pdn) {
152 dev_warn(&pdev->dev, "%s: Out of memory!\n", __func__);
39 return NULL; 153 return NULL;
40 return PCI_DN(dn); 154 }
155
156 pdn->phb = parent->phb;
157 pdn->parent = parent;
158 pdn->busno = busno;
159 pdn->devfn = devfn;
160#ifdef CONFIG_PPC_POWERNV
161 pdn->pe_number = IODA_INVALID_PE;
162#endif
163 INIT_LIST_HEAD(&pdn->child_list);
164 INIT_LIST_HEAD(&pdn->list);
165 list_add_tail(&pdn->list, &parent->child_list);
166
167 /*
168 * If we already have PCI device instance, lets
169 * bind them.
170 */
171 if (pdev)
172 pdev->dev.archdata.pci_data = pdn;
173
174 return pdn;
175}
176#endif
177
178struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
179{
180#ifdef CONFIG_PCI_IOV
181 struct pci_dn *parent, *pdn;
182 int i;
183
184 /* Only support IOV for now */
185 if (!pdev->is_physfn)
186 return pci_get_pdn(pdev);
187
188 /* Check if VFs have been populated */
189 pdn = pci_get_pdn(pdev);
190 if (!pdn || (pdn->flags & PCI_DN_FLAG_IOV_VF))
191 return NULL;
192
193 pdn->flags |= PCI_DN_FLAG_IOV_VF;
194 parent = pci_bus_to_pdn(pdev->bus);
195 if (!parent)
196 return NULL;
197
198 for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
199 pdn = add_one_dev_pci_data(parent, NULL,
200 pci_iov_virtfn_bus(pdev, i),
201 pci_iov_virtfn_devfn(pdev, i));
202 if (!pdn) {
203 dev_warn(&pdev->dev, "%s: Cannot create firmware data for VF#%d\n",
204 __func__, i);
205 return NULL;
206 }
207 }
208#endif /* CONFIG_PCI_IOV */
209
210 return pci_get_pdn(pdev);
211}
212
213void remove_dev_pci_data(struct pci_dev *pdev)
214{
215#ifdef CONFIG_PCI_IOV
216 struct pci_dn *parent;
217 struct pci_dn *pdn, *tmp;
218 int i;
219
220 /*
221 * VF and VF PE are created/released dynamically, so we need to
222 * bind/unbind them. Otherwise the VF and VF PE would be mismatched
223 * when re-enabling SR-IOV.
224 */
225 if (pdev->is_virtfn) {
226 pdn = pci_get_pdn(pdev);
227#ifdef CONFIG_PPC_POWERNV
228 pdn->pe_number = IODA_INVALID_PE;
229#endif
230 return;
231 }
232
233 /* Only support IOV PF for now */
234 if (!pdev->is_physfn)
235 return;
236
237 /* Check if VFs have been populated */
238 pdn = pci_get_pdn(pdev);
239 if (!pdn || !(pdn->flags & PCI_DN_FLAG_IOV_VF))
240 return;
241
242 pdn->flags &= ~PCI_DN_FLAG_IOV_VF;
243 parent = pci_bus_to_pdn(pdev->bus);
244 if (!parent)
245 return;
246
247 /*
248 * We might introduce flag to pci_dn in future
249 * so that we can release VF's firmware data in
250 * a batch mode.
251 */
252 for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
253 list_for_each_entry_safe(pdn, tmp,
254 &parent->child_list, list) {
255 if (pdn->busno != pci_iov_virtfn_bus(pdev, i) ||
256 pdn->devfn != pci_iov_virtfn_devfn(pdev, i))
257 continue;
258
259 if (!list_empty(&pdn->list))
260 list_del(&pdn->list);
261
262 kfree(pdn);
263 }
264 }
265#endif /* CONFIG_PCI_IOV */
41} 266}
42 267
43/* 268/*
@@ -49,6 +274,7 @@ void *update_dn_pci_info(struct device_node *dn, void *data)
49 struct pci_controller *phb = data; 274 struct pci_controller *phb = data;
50 const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL); 275 const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL);
51 const __be32 *regs; 276 const __be32 *regs;
277 struct device_node *parent;
52 struct pci_dn *pdn; 278 struct pci_dn *pdn;
53 279
54 pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); 280 pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
@@ -69,7 +295,25 @@ void *update_dn_pci_info(struct device_node *dn, void *data)
69 pdn->devfn = (addr >> 8) & 0xff; 295 pdn->devfn = (addr >> 8) & 0xff;
70 } 296 }
71 297
298 /* vendor/device IDs and class code */
299 regs = of_get_property(dn, "vendor-id", NULL);
300 pdn->vendor_id = regs ? of_read_number(regs, 1) : 0;
301 regs = of_get_property(dn, "device-id", NULL);
302 pdn->device_id = regs ? of_read_number(regs, 1) : 0;
303 regs = of_get_property(dn, "class-code", NULL);
304 pdn->class_code = regs ? of_read_number(regs, 1) : 0;
305
306 /* Extended config space */
72 pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1); 307 pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
308
309 /* Attach to parent node */
310 INIT_LIST_HEAD(&pdn->child_list);
311 INIT_LIST_HEAD(&pdn->list);
312 parent = of_get_parent(dn);
313 pdn->parent = parent ? PCI_DN(parent) : NULL;
314 if (pdn->parent)
315 list_add_tail(&pdn->list, &pdn->parent->child_list);
316
73 return NULL; 317 return NULL;
74} 318}
75 319
@@ -131,6 +375,46 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
131 return NULL; 375 return NULL;
132} 376}
133 377
378static struct pci_dn *pci_dn_next_one(struct pci_dn *root,
379 struct pci_dn *pdn)
380{
381 struct list_head *next = pdn->child_list.next;
382
383 if (next != &pdn->child_list)
384 return list_entry(next, struct pci_dn, list);
385
386 while (1) {
387 if (pdn == root)
388 return NULL;
389
390 next = pdn->list.next;
391 if (next != &pdn->parent->child_list)
392 break;
393
394 pdn = pdn->parent;
395 }
396
397 return list_entry(next, struct pci_dn, list);
398}
399
400void *traverse_pci_dn(struct pci_dn *root,
401 void *(*fn)(struct pci_dn *, void *),
402 void *data)
403{
404 struct pci_dn *pdn = root;
405 void *ret;
406
407 /* Only scan the child nodes */
408 for (pdn = pci_dn_next_one(root, pdn); pdn;
409 pdn = pci_dn_next_one(root, pdn)) {
410 ret = fn(pdn, data);
411 if (ret)
412 return ret;
413 }
414
415 return NULL;
416}
417
134/** 418/**
135 * pci_devs_phb_init_dynamic - setup pci devices under this PHB 419 * pci_devs_phb_init_dynamic - setup pci devices under this PHB
136 * phb: pci-to-host bridge (top-level bridge connecting to cpu) 420 * phb: pci-to-host bridge (top-level bridge connecting to cpu)
@@ -147,8 +431,12 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb)
147 /* PHB nodes themselves must not match */ 431 /* PHB nodes themselves must not match */
148 update_dn_pci_info(dn, phb); 432 update_dn_pci_info(dn, phb);
149 pdn = dn->data; 433 pdn = dn->data;
150 if (pdn) 434 if (pdn) {
151 pdn->devfn = pdn->busno = -1; 435 pdn->devfn = pdn->busno = -1;
436 pdn->vendor_id = pdn->device_id = pdn->class_code = 0;
437 pdn->phb = phb;
438 phb->pci_data = pdn;
439 }
152 440
153 /* Update dn->phb ptrs for new phb and children devices */ 441 /* Update dn->phb ptrs for new phb and children devices */
154 traverse_pci_devices(dn, update_dn_pci_info, phb); 442 traverse_pci_devices(dn, update_dn_pci_info, phb);
@@ -171,3 +459,16 @@ void __init pci_devs_phb_init(void)
171 list_for_each_entry_safe(phb, tmp, &hose_list, list_node) 459 list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
172 pci_devs_phb_init_dynamic(phb); 460 pci_devs_phb_init_dynamic(phb);
173} 461}
462
463static void pci_dev_pdn_setup(struct pci_dev *pdev)
464{
465 struct pci_dn *pdn;
466
467 if (pdev->dev.archdata.pci_data)
468 return;
469
470 /* Setup the fast path */
471 pdn = pci_get_pdn(pdev);
472 pdev->dev.archdata.pci_data = pdn;
473}
474DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pci_dev_pdn_setup);
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index e6245e9c7d8d..42e02a2d570b 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -207,6 +207,7 @@ void of_scan_pci_bridge(struct pci_dev *dev)
207{ 207{
208 struct device_node *node = dev->dev.of_node; 208 struct device_node *node = dev->dev.of_node;
209 struct pci_bus *bus; 209 struct pci_bus *bus;
210 struct pci_controller *phb;
210 const __be32 *busrange, *ranges; 211 const __be32 *busrange, *ranges;
211 int len, i, mode; 212 int len, i, mode;
212 struct pci_bus_region region; 213 struct pci_bus_region region;
@@ -286,9 +287,11 @@ void of_scan_pci_bridge(struct pci_dev *dev)
286 bus->number); 287 bus->number);
287 pr_debug(" bus name: %s\n", bus->name); 288 pr_debug(" bus name: %s\n", bus->name);
288 289
290 phb = pci_bus_to_host(bus);
291
289 mode = PCI_PROBE_NORMAL; 292 mode = PCI_PROBE_NORMAL;
290 if (ppc_md.pci_probe_mode) 293 if (phb->controller_ops.probe_mode)
291 mode = ppc_md.pci_probe_mode(bus); 294 mode = phb->controller_ops.probe_mode(bus);
292 pr_debug(" probe mode: %d\n", mode); 295 pr_debug(" probe mode: %d\n", mode);
293 296
294 if (mode == PCI_PROBE_DEVTREE) 297 if (mode == PCI_PROBE_DEVTREE)
@@ -305,7 +308,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
305 const __be32 *reg; 308 const __be32 *reg;
306 int reglen, devfn; 309 int reglen, devfn;
307#ifdef CONFIG_EEH 310#ifdef CONFIG_EEH
308 struct eeh_dev *edev = of_node_to_eeh_dev(dn); 311 struct eeh_dev *edev = pdn_to_eeh_dev(PCI_DN(dn));
309#endif 312#endif
310 313
311 pr_debug(" * %s\n", dn->full_name); 314 pr_debug(" * %s\n", dn->full_name);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b4cc7bef6b16..febb50dd5328 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1114,8 +1114,11 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1114 */ 1114 */
1115extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ 1115extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
1116 1116
1117/*
1118 * Copy architecture-specific thread state
1119 */
1117int copy_thread(unsigned long clone_flags, unsigned long usp, 1120int copy_thread(unsigned long clone_flags, unsigned long usp,
1118 unsigned long arg, struct task_struct *p) 1121 unsigned long kthread_arg, struct task_struct *p)
1119{ 1122{
1120 struct pt_regs *childregs, *kregs; 1123 struct pt_regs *childregs, *kregs;
1121 extern void ret_from_fork(void); 1124 extern void ret_from_fork(void);
@@ -1127,6 +1130,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
1127 sp -= sizeof(struct pt_regs); 1130 sp -= sizeof(struct pt_regs);
1128 childregs = (struct pt_regs *) sp; 1131 childregs = (struct pt_regs *) sp;
1129 if (unlikely(p->flags & PF_KTHREAD)) { 1132 if (unlikely(p->flags & PF_KTHREAD)) {
1133 /* kernel thread */
1130 struct thread_info *ti = (void *)task_stack_page(p); 1134 struct thread_info *ti = (void *)task_stack_page(p);
1131 memset(childregs, 0, sizeof(struct pt_regs)); 1135 memset(childregs, 0, sizeof(struct pt_regs));
1132 childregs->gpr[1] = sp + sizeof(struct pt_regs); 1136 childregs->gpr[1] = sp + sizeof(struct pt_regs);
@@ -1137,11 +1141,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
1137 clear_tsk_thread_flag(p, TIF_32BIT); 1141 clear_tsk_thread_flag(p, TIF_32BIT);
1138 childregs->softe = 1; 1142 childregs->softe = 1;
1139#endif 1143#endif
1140 childregs->gpr[15] = arg; 1144 childregs->gpr[15] = kthread_arg;
1141 p->thread.regs = NULL; /* no user register state */ 1145 p->thread.regs = NULL; /* no user register state */
1142 ti->flags |= _TIF_RESTOREALL; 1146 ti->flags |= _TIF_RESTOREALL;
1143 f = ret_from_kernel_thread; 1147 f = ret_from_kernel_thread;
1144 } else { 1148 } else {
1149 /* user thread */
1145 struct pt_regs *regs = current_pt_regs(); 1150 struct pt_regs *regs = current_pt_regs();
1146 CHECK_FULL_REGS(regs); 1151 CHECK_FULL_REGS(regs);
1147 *childregs = *regs; 1152 *childregs = *regs;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1a85d8f96739..fd1fe4c37599 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2898,7 +2898,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2898 * Call OF "quiesce" method to shut down pending DMA's from 2898 * Call OF "quiesce" method to shut down pending DMA's from
2899 * devices etc... 2899 * devices etc...
2900 */ 2900 */
2901 prom_printf("Calling quiesce...\n"); 2901 prom_printf("Quiescing Open Firmware ...\n");
2902 call_prom("quiesce", 0, 0); 2902 call_prom("quiesce", 0, 0);
2903 2903
2904 /* 2904 /*
@@ -2910,7 +2910,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2910 2910
2911 /* Don't print anything after quiesce under OPAL, it crashes OFW */ 2911 /* Don't print anything after quiesce under OPAL, it crashes OFW */
2912 if (of_platform != PLATFORM_OPAL) { 2912 if (of_platform != PLATFORM_OPAL) {
2913 prom_printf("returning from prom_init\n"); 2913 prom_printf("Booting Linux via __start() ...\n");
2914 prom_debug("->dt_header_start=0x%x\n", hdr); 2914 prom_debug("->dt_header_start=0x%x\n", hdr);
2915 } 2915 }
2916 2916
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 21c45a2d0706..7a488c108410 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -401,7 +401,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
401 buf = altbuf; 401 buf = altbuf;
402 } else { 402 } else {
403 buf = rtas_err_buf; 403 buf = rtas_err_buf;
404 if (mem_init_done) 404 if (slab_is_available())
405 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC); 405 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
406 } 406 }
407 if (buf) 407 if (buf)
@@ -461,7 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
461 461
462 if (buff_copy) { 462 if (buff_copy) {
463 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); 463 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
464 if (mem_init_done) 464 if (slab_is_available())
465 kfree(buff_copy); 465 kfree(buff_copy);
466 } 466 }
467 return ret; 467 return ret;
@@ -897,7 +897,7 @@ int rtas_offline_cpus_mask(cpumask_var_t cpus)
897} 897}
898EXPORT_SYMBOL(rtas_offline_cpus_mask); 898EXPORT_SYMBOL(rtas_offline_cpus_mask);
899 899
900int rtas_ibm_suspend_me(u64 handle, int *vasi_return) 900int rtas_ibm_suspend_me(u64 handle)
901{ 901{
902 long state; 902 long state;
903 long rc; 903 long rc;
@@ -919,13 +919,11 @@ int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
919 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); 919 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
920 return rc; 920 return rc;
921 } else if (state == H_VASI_ENABLED) { 921 } else if (state == H_VASI_ENABLED) {
922 *vasi_return = RTAS_NOT_SUSPENDABLE; 922 return -EAGAIN;
923 return 0;
924 } else if (state != H_VASI_SUSPENDING) { 923 } else if (state != H_VASI_SUSPENDING) {
925 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n", 924 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
926 state); 925 state);
927 *vasi_return = -1; 926 return -EIO;
928 return 0;
929 } 927 }
930 928
931 if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) 929 if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
@@ -972,7 +970,7 @@ out:
972 return atomic_read(&data.error); 970 return atomic_read(&data.error);
973} 971}
974#else /* CONFIG_PPC_PSERIES */ 972#else /* CONFIG_PPC_PSERIES */
975int rtas_ibm_suspend_me(u64 handle, int *vasi_return) 973int rtas_ibm_suspend_me(u64 handle)
976{ 974{
977 return -ENOSYS; 975 return -ENOSYS;
978} 976}
@@ -1022,7 +1020,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
1022 unsigned long flags; 1020 unsigned long flags;
1023 char *buff_copy, *errbuf = NULL; 1021 char *buff_copy, *errbuf = NULL;
1024 int nargs, nret, token; 1022 int nargs, nret, token;
1025 int rc;
1026 1023
1027 if (!capable(CAP_SYS_ADMIN)) 1024 if (!capable(CAP_SYS_ADMIN))
1028 return -EPERM; 1025 return -EPERM;
@@ -1054,15 +1051,18 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
1054 if (token == ibm_suspend_me_token) { 1051 if (token == ibm_suspend_me_token) {
1055 1052
1056 /* 1053 /*
1057 * rtas_ibm_suspend_me assumes args are in cpu endian, or at least the 1054 * rtas_ibm_suspend_me assumes the streamid handle is in cpu
1058 * hcall within it requires it. 1055 * endian, or at least the hcall within it requires it.
1059 */ 1056 */
1060 int vasi_rc = 0; 1057 int rc = 0;
1061 u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32) 1058 u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
1062 | be32_to_cpu(args.args[1]); 1059 | be32_to_cpu(args.args[1]);
1063 rc = rtas_ibm_suspend_me(handle, &vasi_rc); 1060 rc = rtas_ibm_suspend_me(handle);
1064 args.rets[0] = cpu_to_be32(vasi_rc); 1061 if (rc == -EAGAIN)
1065 if (rc) 1062 args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
1063 else if (rc == -EIO)
1064 args.rets[0] = cpu_to_be32(-1);
1065 else if (rc)
1066 return rc; 1066 return rc;
1067 goto copy_return; 1067 goto copy_return;
1068 } 1068 }
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index ce230da2c015..73f1934582c2 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -113,7 +113,7 @@ static int rtas_pci_read_config(struct pci_bus *bus,
113 113
114 ret = rtas_read_config(pdn, where, size, val); 114 ret = rtas_read_config(pdn, where, size, val);
115 if (*val == EEH_IO_ERROR_VALUE(size) && 115 if (*val == EEH_IO_ERROR_VALUE(size) &&
116 eeh_dev_check_failure(of_node_to_eeh_dev(dn))) 116 eeh_dev_check_failure(pdn_to_eeh_dev(pdn)))
117 return PCIBIOS_DEVICE_NOT_FOUND; 117 return PCIBIOS_DEVICE_NOT_FOUND;
118 118
119 return ret; 119 return ret;
@@ -277,50 +277,3 @@ int rtas_setup_phb(struct pci_controller *phb)
277 277
278 return 0; 278 return 0;
279} 279}
280
281void __init find_and_init_phbs(void)
282{
283 struct device_node *node;
284 struct pci_controller *phb;
285 struct device_node *root = of_find_node_by_path("/");
286
287 for_each_child_of_node(root, node) {
288 if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
289 strcmp(node->type, "pciex") != 0))
290 continue;
291
292 phb = pcibios_alloc_controller(node);
293 if (!phb)
294 continue;
295 rtas_setup_phb(phb);
296 pci_process_bridge_OF_ranges(phb, node, 0);
297 isa_bridge_find_early(phb);
298 }
299
300 of_node_put(root);
301 pci_devs_phb_init();
302
303 /*
304 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
305 * in chosen.
306 */
307 if (of_chosen) {
308 const int *prop;
309
310 prop = of_get_property(of_chosen,
311 "linux,pci-probe-only", NULL);
312 if (prop) {
313 if (*prop)
314 pci_add_flags(PCI_PROBE_ONLY);
315 else
316 pci_clear_flags(PCI_PROBE_ONLY);
317 }
318
319#ifdef CONFIG_PPC32 /* Will be made generic soon */
320 prop = of_get_property(of_chosen,
321 "linux,pci-assign-all-buses", NULL);
322 if (prop && *prop)
323 pci_add_flags(PCI_REASSIGN_ALL_BUS);
324#endif /* CONFIG_PPC32 */
325 }
326}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 49f553bbb360..c69671c03c3b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -37,6 +37,7 @@
37#include <linux/memblock.h> 37#include <linux/memblock.h>
38#include <linux/hugetlb.h> 38#include <linux/hugetlb.h>
39#include <linux/memory.h> 39#include <linux/memory.h>
40#include <linux/nmi.h>
40 41
41#include <asm/io.h> 42#include <asm/io.h>
42#include <asm/kdump.h> 43#include <asm/kdump.h>
@@ -779,3 +780,22 @@ unsigned long memory_block_size_bytes(void)
779struct ppc_pci_io ppc_pci_io; 780struct ppc_pci_io ppc_pci_io;
780EXPORT_SYMBOL(ppc_pci_io); 781EXPORT_SYMBOL(ppc_pci_io);
781#endif 782#endif
783
784#ifdef CONFIG_HARDLOCKUP_DETECTOR
785u64 hw_nmi_get_sample_period(int watchdog_thresh)
786{
787 return ppc_proc_freq * watchdog_thresh;
788}
789
790/*
791 * The hardlockup detector breaks PMU event based branches and is likely
792 * to get false positives in KVM guests, so disable it by default.
793 */
794static int __init disable_hardlockup_detector(void)
795{
796 hardlockup_detector_disable();
797
798 return 0;
799}
800early_initcall(disable_hardlockup_detector);
801#endif
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index b2702e87db0d..5fa92706444b 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -121,3 +121,20 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
121 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, 121 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
122 (u64)len_high << 32 | len_low, advice); 122 (u64)len_high << 32 | len_low, advice);
123} 123}
124
125long sys_switch_endian(void)
126{
127 struct thread_info *ti;
128
129 current->thread.regs->msr ^= MSR_LE;
130
131 /*
132 * Set TIF_RESTOREALL so that r3 isn't clobbered on return to
133 * userspace. That also has the effect of restoring the non-volatile
134 * GPRs, so we saved them on the way in here.
135 */
136 ti = current_thread_info();
137 ti->flags |= _TIF_RESTOREALL;
138
139 return 0;
140}
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 7ab5d434e2ee..4d6b1d3a747f 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -22,6 +22,7 @@
22#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) 22#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func)
23#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) 23#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
24#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) 24#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
25#define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
25#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) 26#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264)
26#else 27#else
27#define SYSCALL(func) .long sys_##func 28#define SYSCALL(func) .long sys_##func
@@ -29,6 +30,7 @@
29#define PPC_SYS(func) .long ppc_##func 30#define PPC_SYS(func) .long ppc_##func
30#define OLDSYS(func) .long sys_##func 31#define OLDSYS(func) .long sys_##func
31#define SYS32ONLY(func) .long sys_##func 32#define SYS32ONLY(func) .long sys_##func
33#define PPC64ONLY(func) .long sys_ni_syscall
32#define SYSX(f, f3264, f32) .long f32 34#define SYSX(f, f3264, f32) .long f32
33#endif 35#endif
34#define SYSCALL_SPU(func) SYSCALL(func) 36#define SYSCALL_SPU(func) SYSCALL(func)
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
index 238aa63ced8f..2384129f5893 100644
--- a/arch/powerpc/kernel/systbl_chk.c
+++ b/arch/powerpc/kernel/systbl_chk.c
@@ -21,9 +21,11 @@
21#ifdef CONFIG_PPC64 21#ifdef CONFIG_PPC64
22#define OLDSYS(func) -1 22#define OLDSYS(func) -1
23#define SYS32ONLY(func) -1 23#define SYS32ONLY(func) -1
24#define PPC64ONLY(func) __NR_##func
24#else 25#else
25#define OLDSYS(func) __NR_old##func 26#define OLDSYS(func) __NR_old##func
26#define SYS32ONLY(func) __NR_##func 27#define SYS32ONLY(func) __NR_##func
28#define PPC64ONLY(func) -1
27#endif 29#endif
28#define SYSX(f, f3264, f32) -1 30#define SYSX(f, f3264, f32) -1
29 31
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 2a324f4cb1b9..5754b226da7e 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -152,9 +152,9 @@ _GLOBAL(tm_reclaim)
152 152
153 addi r7, r3, THREAD_TRANSACT_VRSTATE 153 addi r7, r3, THREAD_TRANSACT_VRSTATE
154 SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */ 154 SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */
155 mfvscr vr0 155 mfvscr v0
156 li r6, VRSTATE_VSCR 156 li r6, VRSTATE_VSCR
157 stvx vr0, r7, r6 157 stvx v0, r7, r6
158dont_backup_vec: 158dont_backup_vec:
159 mfspr r0, SPRN_VRSAVE 159 mfspr r0, SPRN_VRSAVE
160 std r0, THREAD_TRANSACT_VRSAVE(r3) 160 std r0, THREAD_TRANSACT_VRSAVE(r3)
@@ -359,8 +359,8 @@ _GLOBAL(__tm_recheckpoint)
359 359
360 addi r8, r3, THREAD_VRSTATE 360 addi r8, r3, THREAD_VRSTATE
361 li r5, VRSTATE_VSCR 361 li r5, VRSTATE_VSCR
362 lvx vr0, r8, r5 362 lvx v0, r8, r5
363 mtvscr vr0 363 mtvscr v0
364 REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */ 364 REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */
365dont_restore_vec: 365dont_restore_vec:
366 ld r5, THREAD_VRSAVE(r3) 366 ld r5, THREAD_VRSAVE(r3)
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index b7aa07279a63..7cc38b5b58bc 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -46,8 +46,6 @@ void __init udbg_early_init(void)
46#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE) 46#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE)
47 /* Maple real mode debug */ 47 /* Maple real mode debug */
48 udbg_init_maple_realmode(); 48 udbg_init_maple_realmode();
49#elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT)
50 udbg_init_debug_beat();
51#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE) 49#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE)
52 udbg_init_pas_realmode(); 50 udbg_init_pas_realmode();
53#elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX) 51#elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX)
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 74f8050518d6..f5c80d567d8d 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -24,8 +24,8 @@ _GLOBAL(do_load_up_transact_altivec)
24 stw r4,THREAD_USED_VR(r3) 24 stw r4,THREAD_USED_VR(r3)
25 25
26 li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR 26 li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
27 lvx vr0,r10,r3 27 lvx v0,r10,r3
28 mtvscr vr0 28 mtvscr v0
29 addi r10,r3,THREAD_TRANSACT_VRSTATE 29 addi r10,r3,THREAD_TRANSACT_VRSTATE
30 REST_32VRS(0,r4,r10) 30 REST_32VRS(0,r4,r10)
31 31
@@ -52,8 +52,8 @@ _GLOBAL(vec_enable)
52 */ 52 */
53_GLOBAL(load_vr_state) 53_GLOBAL(load_vr_state)
54 li r4,VRSTATE_VSCR 54 li r4,VRSTATE_VSCR
55 lvx vr0,r4,r3 55 lvx v0,r4,r3
56 mtvscr vr0 56 mtvscr v0
57 REST_32VRS(0,r4,r3) 57 REST_32VRS(0,r4,r3)
58 blr 58 blr
59 59
@@ -63,9 +63,9 @@ _GLOBAL(load_vr_state)
63 */ 63 */
64_GLOBAL(store_vr_state) 64_GLOBAL(store_vr_state)
65 SAVE_32VRS(0, r4, r3) 65 SAVE_32VRS(0, r4, r3)
66 mfvscr vr0 66 mfvscr v0
67 li r4, VRSTATE_VSCR 67 li r4, VRSTATE_VSCR
68 stvx vr0, r4, r3 68 stvx v0, r4, r3
69 blr 69 blr
70 70
71/* 71/*
@@ -104,9 +104,9 @@ _GLOBAL(load_up_altivec)
104 addi r4,r4,THREAD 104 addi r4,r4,THREAD
105 addi r6,r4,THREAD_VRSTATE 105 addi r6,r4,THREAD_VRSTATE
106 SAVE_32VRS(0,r5,r6) 106 SAVE_32VRS(0,r5,r6)
107 mfvscr vr0 107 mfvscr v0
108 li r10,VRSTATE_VSCR 108 li r10,VRSTATE_VSCR
109 stvx vr0,r10,r6 109 stvx v0,r10,r6
110 /* Disable VMX for last_task_used_altivec */ 110 /* Disable VMX for last_task_used_altivec */
111 PPC_LL r5,PT_REGS(r4) 111 PPC_LL r5,PT_REGS(r4)
112 toreal(r5) 112 toreal(r5)
@@ -142,8 +142,8 @@ _GLOBAL(load_up_altivec)
142 li r4,1 142 li r4,1
143 li r10,VRSTATE_VSCR 143 li r10,VRSTATE_VSCR
144 stw r4,THREAD_USED_VR(r5) 144 stw r4,THREAD_USED_VR(r5)
145 lvx vr0,r10,r6 145 lvx v0,r10,r6
146 mtvscr vr0 146 mtvscr v0
147 REST_32VRS(0,r4,r6) 147 REST_32VRS(0,r4,r6)
148#ifndef CONFIG_SMP 148#ifndef CONFIG_SMP
149 /* Update last_task_used_altivec to 'current' */ 149 /* Update last_task_used_altivec to 'current' */
@@ -186,9 +186,9 @@ _GLOBAL(giveup_altivec)
186 addi r7,r3,THREAD_VRSTATE 186 addi r7,r3,THREAD_VRSTATE
1872: PPC_LCMPI 0,r5,0 1872: PPC_LCMPI 0,r5,0
188 SAVE_32VRS(0,r4,r7) 188 SAVE_32VRS(0,r4,r7)
189 mfvscr vr0 189 mfvscr v0
190 li r4,VRSTATE_VSCR 190 li r4,VRSTATE_VSCR
191 stvx vr0,r4,r7 191 stvx v0,r4,r7
192 beq 1f 192 beq 1f
193 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) 193 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
194#ifdef CONFIG_VSX 194#ifdef CONFIG_VSX
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 60081bd75847..93b5f5c9b445 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -84,7 +84,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
84 } 84 }
85 if (dsisr & DSISR_MC_TLB_MULTI) { 85 if (dsisr & DSISR_MC_TLB_MULTI) {
86 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) 86 if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
87 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID); 87 cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_LPID);
88 dsisr &= ~DSISR_MC_TLB_MULTI; 88 dsisr &= ~DSISR_MC_TLB_MULTI;
89 } 89 }
90 /* Any other errors we don't understand? */ 90 /* Any other errors we don't understand? */
@@ -102,7 +102,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
102 break; 102 break;
103 case SRR1_MC_IFETCH_TLBMULTI: 103 case SRR1_MC_IFETCH_TLBMULTI:
104 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) 104 if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
105 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID); 105 cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_LPID);
106 break; 106 break;
107 default: 107 default:
108 handled = 0; 108 handled = 0;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 24bfe401373e..91bbc845ac66 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -720,7 +720,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
720 return; 720 return;
721 } 721 }
722 722
723 if (vcpu->arch.mmio_is_bigendian) { 723 if (!vcpu->arch.mmio_host_swabbed) {
724 switch (run->mmio.len) { 724 switch (run->mmio.len) {
725 case 8: gpr = *(u64 *)run->mmio.data; break; 725 case 8: gpr = *(u64 *)run->mmio.data; break;
726 case 4: gpr = *(u32 *)run->mmio.data; break; 726 case 4: gpr = *(u32 *)run->mmio.data; break;
@@ -728,10 +728,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
728 case 1: gpr = *(u8 *)run->mmio.data; break; 728 case 1: gpr = *(u8 *)run->mmio.data; break;
729 } 729 }
730 } else { 730 } else {
731 /* Convert BE data from userland back to LE. */
732 switch (run->mmio.len) { 731 switch (run->mmio.len) {
733 case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 732 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
734 case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 733 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
734 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
735 case 1: gpr = *(u8 *)run->mmio.data; break; 735 case 1: gpr = *(u8 *)run->mmio.data; break;
736 } 736 }
737 } 737 }
@@ -780,14 +780,13 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
780 int is_default_endian) 780 int is_default_endian)
781{ 781{
782 int idx, ret; 782 int idx, ret;
783 int is_bigendian; 783 bool host_swabbed;
784 784
785 /* Pity C doesn't have a logical XOR operator */
785 if (kvmppc_need_byteswap(vcpu)) { 786 if (kvmppc_need_byteswap(vcpu)) {
786 /* Default endianness is "little endian". */ 787 host_swabbed = is_default_endian;
787 is_bigendian = !is_default_endian;
788 } else { 788 } else {
789 /* Default endianness is "big endian". */ 789 host_swabbed = !is_default_endian;
790 is_bigendian = is_default_endian;
791 } 790 }
792 791
793 if (bytes > sizeof(run->mmio.data)) { 792 if (bytes > sizeof(run->mmio.data)) {
@@ -800,7 +799,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
800 run->mmio.is_write = 0; 799 run->mmio.is_write = 0;
801 800
802 vcpu->arch.io_gpr = rt; 801 vcpu->arch.io_gpr = rt;
803 vcpu->arch.mmio_is_bigendian = is_bigendian; 802 vcpu->arch.mmio_host_swabbed = host_swabbed;
804 vcpu->mmio_needed = 1; 803 vcpu->mmio_needed = 1;
805 vcpu->mmio_is_write = 0; 804 vcpu->mmio_is_write = 0;
806 vcpu->arch.mmio_sign_extend = 0; 805 vcpu->arch.mmio_sign_extend = 0;
@@ -840,14 +839,13 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
840{ 839{
841 void *data = run->mmio.data; 840 void *data = run->mmio.data;
842 int idx, ret; 841 int idx, ret;
843 int is_bigendian; 842 bool host_swabbed;
844 843
844 /* Pity C doesn't have a logical XOR operator */
845 if (kvmppc_need_byteswap(vcpu)) { 845 if (kvmppc_need_byteswap(vcpu)) {
846 /* Default endianness is "little endian". */ 846 host_swabbed = is_default_endian;
847 is_bigendian = !is_default_endian;
848 } else { 847 } else {
849 /* Default endianness is "big endian". */ 848 host_swabbed = !is_default_endian;
850 is_bigendian = is_default_endian;
851 } 849 }
852 850
853 if (bytes > sizeof(run->mmio.data)) { 851 if (bytes > sizeof(run->mmio.data)) {
@@ -862,7 +860,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
862 vcpu->mmio_is_write = 1; 860 vcpu->mmio_is_write = 1;
863 861
864 /* Store the value at the lowest bytes in 'data'. */ 862 /* Store the value at the lowest bytes in 'data'. */
865 if (is_bigendian) { 863 if (!host_swabbed) {
866 switch (bytes) { 864 switch (bytes) {
867 case 8: *(u64 *)data = val; break; 865 case 8: *(u64 *)data = val; break;
868 case 4: *(u32 *)data = val; break; 866 case 4: *(u32 *)data = val; break;
@@ -870,11 +868,11 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
870 case 1: *(u8 *)data = val; break; 868 case 1: *(u8 *)data = val; break;
871 } 869 }
872 } else { 870 } else {
873 /* Store LE value into 'data'. */
874 switch (bytes) { 871 switch (bytes) {
875 case 4: st_le32(data, val); break; 872 case 8: *(u64 *)data = swab64(val); break;
876 case 2: st_le16(data, val); break; 873 case 4: *(u32 *)data = swab32(val); break;
877 case 1: *(u8 *)data = val; break; 874 case 2: *(u16 *)data = swab16(val); break;
875 case 1: *(u8 *)data = val; break;
878 } 876 }
879 } 877 }
880 878
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index 4a6c2cf890d9..60b0b3fc8fc1 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -10,7 +10,7 @@ void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
10{ 10{
11 void *p; 11 void *p;
12 12
13 if (mem_init_done) 13 if (slab_is_available())
14 p = kzalloc(size, mask); 14 p = kzalloc(size, mask);
15 else { 15 else {
16 p = memblock_virt_alloc(size, 0); 16 p = memblock_virt_alloc(size, 0);
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 55f19f9fd708..6813f80d1eec 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -69,54 +69,6 @@ CACHELINE_BYTES = L1_CACHE_BYTES
69LG_CACHELINE_BYTES = L1_CACHE_SHIFT 69LG_CACHELINE_BYTES = L1_CACHE_SHIFT
70CACHELINE_MASK = (L1_CACHE_BYTES-1) 70CACHELINE_MASK = (L1_CACHE_BYTES-1)
71 71
72/*
73 * Use dcbz on the complete cache lines in the destination
74 * to set them to zero. This requires that the destination
75 * area is cacheable. -- paulus
76 */
77_GLOBAL(cacheable_memzero)
78 mr r5,r4
79 li r4,0
80 addi r6,r3,-4
81 cmplwi 0,r5,4
82 blt 7f
83 stwu r4,4(r6)
84 beqlr
85 andi. r0,r6,3
86 add r5,r0,r5
87 subf r6,r0,r6
88 clrlwi r7,r6,32-LG_CACHELINE_BYTES
89 add r8,r7,r5
90 srwi r9,r8,LG_CACHELINE_BYTES
91 addic. r9,r9,-1 /* total number of complete cachelines */
92 ble 2f
93 xori r0,r7,CACHELINE_MASK & ~3
94 srwi. r0,r0,2
95 beq 3f
96 mtctr r0
974: stwu r4,4(r6)
98 bdnz 4b
993: mtctr r9
100 li r7,4
10110: dcbz r7,r6
102 addi r6,r6,CACHELINE_BYTES
103 bdnz 10b
104 clrlwi r5,r8,32-LG_CACHELINE_BYTES
105 addi r5,r5,4
1062: srwi r0,r5,2
107 mtctr r0
108 bdz 6f
1091: stwu r4,4(r6)
110 bdnz 1b
1116: andi. r5,r5,3
1127: cmpwi 0,r5,0
113 beqlr
114 mtctr r5
115 addi r6,r6,3
1168: stbu r4,1(r6)
117 bdnz 8b
118 blr
119
120_GLOBAL(memset) 72_GLOBAL(memset)
121 rlwimi r4,r4,8,16,23 73 rlwimi r4,r4,8,16,23
122 rlwimi r4,r4,16,0,15 74 rlwimi r4,r4,16,0,15
@@ -142,85 +94,6 @@ _GLOBAL(memset)
142 bdnz 8b 94 bdnz 8b
143 blr 95 blr
144 96
145/*
146 * This version uses dcbz on the complete cache lines in the
147 * destination area to reduce memory traffic. This requires that
148 * the destination area is cacheable.
149 * We only use this version if the source and dest don't overlap.
150 * -- paulus.
151 */
152_GLOBAL(cacheable_memcpy)
153 add r7,r3,r5 /* test if the src & dst overlap */
154 add r8,r4,r5
155 cmplw 0,r4,r7
156 cmplw 1,r3,r8
157 crand 0,0,4 /* cr0.lt &= cr1.lt */
158 blt memcpy /* if regions overlap */
159
160 addi r4,r4,-4
161 addi r6,r3,-4
162 neg r0,r3
163 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
164 beq 58f
165
166 cmplw 0,r5,r0 /* is this more than total to do? */
167 blt 63f /* if not much to do */
168 andi. r8,r0,3 /* get it word-aligned first */
169 subf r5,r0,r5
170 mtctr r8
171 beq+ 61f
17270: lbz r9,4(r4) /* do some bytes */
173 stb r9,4(r6)
174 addi r4,r4,1
175 addi r6,r6,1
176 bdnz 70b
17761: srwi. r0,r0,2
178 mtctr r0
179 beq 58f
18072: lwzu r9,4(r4) /* do some words */
181 stwu r9,4(r6)
182 bdnz 72b
183
18458: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
185 clrlwi r5,r5,32-LG_CACHELINE_BYTES
186 li r11,4
187 mtctr r0
188 beq 63f
18953:
190 dcbz r11,r6
191 COPY_16_BYTES
192#if L1_CACHE_BYTES >= 32
193 COPY_16_BYTES
194#if L1_CACHE_BYTES >= 64
195 COPY_16_BYTES
196 COPY_16_BYTES
197#if L1_CACHE_BYTES >= 128
198 COPY_16_BYTES
199 COPY_16_BYTES
200 COPY_16_BYTES
201 COPY_16_BYTES
202#endif
203#endif
204#endif
205 bdnz 53b
206
20763: srwi. r0,r5,2
208 mtctr r0
209 beq 64f
21030: lwzu r0,4(r4)
211 stwu r0,4(r6)
212 bdnz 30b
213
21464: andi. r0,r5,3
215 mtctr r0
216 beq+ 65f
21740: lbz r0,4(r4)
218 stb r0,4(r6)
219 addi r4,r4,1
220 addi r6,r6,1
221 bdnz 40b
22265: blr
223
224_GLOBAL(memmove) 97_GLOBAL(memmove)
225 cmplw 0,r3,r4 98 cmplw 0,r3,r4
226 bgt backwards_memcpy 99 bgt backwards_memcpy
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index d7dafb3777ac..a84d333ecb09 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -83,23 +83,23 @@ _GLOBAL(copypage_power7)
83 li r12,112 83 li r12,112
84 84
85 .align 5 85 .align 5
861: lvx vr7,r0,r4 861: lvx v7,r0,r4
87 lvx vr6,r4,r6 87 lvx v6,r4,r6
88 lvx vr5,r4,r7 88 lvx v5,r4,r7
89 lvx vr4,r4,r8 89 lvx v4,r4,r8
90 lvx vr3,r4,r9 90 lvx v3,r4,r9
91 lvx vr2,r4,r10 91 lvx v2,r4,r10
92 lvx vr1,r4,r11 92 lvx v1,r4,r11
93 lvx vr0,r4,r12 93 lvx v0,r4,r12
94 addi r4,r4,128 94 addi r4,r4,128
95 stvx vr7,r0,r3 95 stvx v7,r0,r3
96 stvx vr6,r3,r6 96 stvx v6,r3,r6
97 stvx vr5,r3,r7 97 stvx v5,r3,r7
98 stvx vr4,r3,r8 98 stvx v4,r3,r8
99 stvx vr3,r3,r9 99 stvx v3,r3,r9
100 stvx vr2,r3,r10 100 stvx v2,r3,r10
101 stvx vr1,r3,r11 101 stvx v1,r3,r11
102 stvx vr0,r3,r12 102 stvx v0,r3,r12
103 addi r3,r3,128 103 addi r3,r3,128
104 bdnz 1b 104 bdnz 1b
105 105
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 92ee840529bc..da0c568d18c4 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -388,29 +388,29 @@ err3; std r0,0(r3)
388 li r11,48 388 li r11,48
389 389
390 bf cr7*4+3,5f 390 bf cr7*4+3,5f
391err3; lvx vr1,r0,r4 391err3; lvx v1,r0,r4
392 addi r4,r4,16 392 addi r4,r4,16
393err3; stvx vr1,r0,r3 393err3; stvx v1,r0,r3
394 addi r3,r3,16 394 addi r3,r3,16
395 395
3965: bf cr7*4+2,6f 3965: bf cr7*4+2,6f
397err3; lvx vr1,r0,r4 397err3; lvx v1,r0,r4
398err3; lvx vr0,r4,r9 398err3; lvx v0,r4,r9
399 addi r4,r4,32 399 addi r4,r4,32
400err3; stvx vr1,r0,r3 400err3; stvx v1,r0,r3
401err3; stvx vr0,r3,r9 401err3; stvx v0,r3,r9
402 addi r3,r3,32 402 addi r3,r3,32
403 403
4046: bf cr7*4+1,7f 4046: bf cr7*4+1,7f
405err3; lvx vr3,r0,r4 405err3; lvx v3,r0,r4
406err3; lvx vr2,r4,r9 406err3; lvx v2,r4,r9
407err3; lvx vr1,r4,r10 407err3; lvx v1,r4,r10
408err3; lvx vr0,r4,r11 408err3; lvx v0,r4,r11
409 addi r4,r4,64 409 addi r4,r4,64
410err3; stvx vr3,r0,r3 410err3; stvx v3,r0,r3
411err3; stvx vr2,r3,r9 411err3; stvx v2,r3,r9
412err3; stvx vr1,r3,r10 412err3; stvx v1,r3,r10
413err3; stvx vr0,r3,r11 413err3; stvx v0,r3,r11
414 addi r3,r3,64 414 addi r3,r3,64
415 415
4167: sub r5,r5,r6 4167: sub r5,r5,r6
@@ -433,23 +433,23 @@ err3; stvx vr0,r3,r11
433 */ 433 */
434 .align 5 434 .align 5
4358: 4358:
436err4; lvx vr7,r0,r4 436err4; lvx v7,r0,r4
437err4; lvx vr6,r4,r9 437err4; lvx v6,r4,r9
438err4; lvx vr5,r4,r10 438err4; lvx v5,r4,r10
439err4; lvx vr4,r4,r11 439err4; lvx v4,r4,r11
440err4; lvx vr3,r4,r12 440err4; lvx v3,r4,r12
441err4; lvx vr2,r4,r14 441err4; lvx v2,r4,r14
442err4; lvx vr1,r4,r15 442err4; lvx v1,r4,r15
443err4; lvx vr0,r4,r16 443err4; lvx v0,r4,r16
444 addi r4,r4,128 444 addi r4,r4,128
445err4; stvx vr7,r0,r3 445err4; stvx v7,r0,r3
446err4; stvx vr6,r3,r9 446err4; stvx v6,r3,r9
447err4; stvx vr5,r3,r10 447err4; stvx v5,r3,r10
448err4; stvx vr4,r3,r11 448err4; stvx v4,r3,r11
449err4; stvx vr3,r3,r12 449err4; stvx v3,r3,r12
450err4; stvx vr2,r3,r14 450err4; stvx v2,r3,r14
451err4; stvx vr1,r3,r15 451err4; stvx v1,r3,r15
452err4; stvx vr0,r3,r16 452err4; stvx v0,r3,r16
453 addi r3,r3,128 453 addi r3,r3,128
454 bdnz 8b 454 bdnz 8b
455 455
@@ -463,29 +463,29 @@ err4; stvx vr0,r3,r16
463 mtocrf 0x01,r6 463 mtocrf 0x01,r6
464 464
465 bf cr7*4+1,9f 465 bf cr7*4+1,9f
466err3; lvx vr3,r0,r4 466err3; lvx v3,r0,r4
467err3; lvx vr2,r4,r9 467err3; lvx v2,r4,r9
468err3; lvx vr1,r4,r10 468err3; lvx v1,r4,r10
469err3; lvx vr0,r4,r11 469err3; lvx v0,r4,r11
470 addi r4,r4,64 470 addi r4,r4,64
471err3; stvx vr3,r0,r3 471err3; stvx v3,r0,r3
472err3; stvx vr2,r3,r9 472err3; stvx v2,r3,r9
473err3; stvx vr1,r3,r10 473err3; stvx v1,r3,r10
474err3; stvx vr0,r3,r11 474err3; stvx v0,r3,r11
475 addi r3,r3,64 475 addi r3,r3,64
476 476
4779: bf cr7*4+2,10f 4779: bf cr7*4+2,10f
478err3; lvx vr1,r0,r4 478err3; lvx v1,r0,r4
479err3; lvx vr0,r4,r9 479err3; lvx v0,r4,r9
480 addi r4,r4,32 480 addi r4,r4,32
481err3; stvx vr1,r0,r3 481err3; stvx v1,r0,r3
482err3; stvx vr0,r3,r9 482err3; stvx v0,r3,r9
483 addi r3,r3,32 483 addi r3,r3,32
484 484
48510: bf cr7*4+3,11f 48510: bf cr7*4+3,11f
486err3; lvx vr1,r0,r4 486err3; lvx v1,r0,r4
487 addi r4,r4,16 487 addi r4,r4,16
488err3; stvx vr1,r0,r3 488err3; stvx v1,r0,r3
489 addi r3,r3,16 489 addi r3,r3,16
490 490
491 /* Up to 15B to go */ 491 /* Up to 15B to go */
@@ -560,42 +560,42 @@ err3; stw r7,4(r3)
560 li r10,32 560 li r10,32
561 li r11,48 561 li r11,48
562 562
563 LVS(vr16,0,r4) /* Setup permute control vector */ 563 LVS(v16,0,r4) /* Setup permute control vector */
564err3; lvx vr0,0,r4 564err3; lvx v0,0,r4
565 addi r4,r4,16 565 addi r4,r4,16
566 566
567 bf cr7*4+3,5f 567 bf cr7*4+3,5f
568err3; lvx vr1,r0,r4 568err3; lvx v1,r0,r4
569 VPERM(vr8,vr0,vr1,vr16) 569 VPERM(v8,v0,v1,v16)
570 addi r4,r4,16 570 addi r4,r4,16
571err3; stvx vr8,r0,r3 571err3; stvx v8,r0,r3
572 addi r3,r3,16 572 addi r3,r3,16
573 vor vr0,vr1,vr1 573 vor v0,v1,v1
574 574
5755: bf cr7*4+2,6f 5755: bf cr7*4+2,6f
576err3; lvx vr1,r0,r4 576err3; lvx v1,r0,r4
577 VPERM(vr8,vr0,vr1,vr16) 577 VPERM(v8,v0,v1,v16)
578err3; lvx vr0,r4,r9 578err3; lvx v0,r4,r9
579 VPERM(vr9,vr1,vr0,vr16) 579 VPERM(v9,v1,v0,v16)
580 addi r4,r4,32 580 addi r4,r4,32
581err3; stvx vr8,r0,r3 581err3; stvx v8,r0,r3
582err3; stvx vr9,r3,r9 582err3; stvx v9,r3,r9
583 addi r3,r3,32 583 addi r3,r3,32
584 584
5856: bf cr7*4+1,7f 5856: bf cr7*4+1,7f
586err3; lvx vr3,r0,r4 586err3; lvx v3,r0,r4
587 VPERM(vr8,vr0,vr3,vr16) 587 VPERM(v8,v0,v3,v16)
588err3; lvx vr2,r4,r9 588err3; lvx v2,r4,r9
589 VPERM(vr9,vr3,vr2,vr16) 589 VPERM(v9,v3,v2,v16)
590err3; lvx vr1,r4,r10 590err3; lvx v1,r4,r10
591 VPERM(vr10,vr2,vr1,vr16) 591 VPERM(v10,v2,v1,v16)
592err3; lvx vr0,r4,r11 592err3; lvx v0,r4,r11
593 VPERM(vr11,vr1,vr0,vr16) 593 VPERM(v11,v1,v0,v16)
594 addi r4,r4,64 594 addi r4,r4,64
595err3; stvx vr8,r0,r3 595err3; stvx v8,r0,r3
596err3; stvx vr9,r3,r9 596err3; stvx v9,r3,r9
597err3; stvx vr10,r3,r10 597err3; stvx v10,r3,r10
598err3; stvx vr11,r3,r11 598err3; stvx v11,r3,r11
599 addi r3,r3,64 599 addi r3,r3,64
600 600
6017: sub r5,r5,r6 6017: sub r5,r5,r6
@@ -618,31 +618,31 @@ err3; stvx vr11,r3,r11
618 */ 618 */
619 .align 5 619 .align 5
6208: 6208:
621err4; lvx vr7,r0,r4 621err4; lvx v7,r0,r4
622 VPERM(vr8,vr0,vr7,vr16) 622 VPERM(v8,v0,v7,v16)
623err4; lvx vr6,r4,r9 623err4; lvx v6,r4,r9
624 VPERM(vr9,vr7,vr6,vr16) 624 VPERM(v9,v7,v6,v16)
625err4; lvx vr5,r4,r10 625err4; lvx v5,r4,r10
626 VPERM(vr10,vr6,vr5,vr16) 626 VPERM(v10,v6,v5,v16)
627err4; lvx vr4,r4,r11 627err4; lvx v4,r4,r11
628 VPERM(vr11,vr5,vr4,vr16) 628 VPERM(v11,v5,v4,v16)
629err4; lvx vr3,r4,r12 629err4; lvx v3,r4,r12
630 VPERM(vr12,vr4,vr3,vr16) 630 VPERM(v12,v4,v3,v16)
631err4; lvx vr2,r4,r14 631err4; lvx v2,r4,r14
632 VPERM(vr13,vr3,vr2,vr16) 632 VPERM(v13,v3,v2,v16)
633err4; lvx vr1,r4,r15 633err4; lvx v1,r4,r15
634 VPERM(vr14,vr2,vr1,vr16) 634 VPERM(v14,v2,v1,v16)
635err4; lvx vr0,r4,r16 635err4; lvx v0,r4,r16
636 VPERM(vr15,vr1,vr0,vr16) 636 VPERM(v15,v1,v0,v16)
637 addi r4,r4,128 637 addi r4,r4,128
638err4; stvx vr8,r0,r3 638err4; stvx v8,r0,r3
639err4; stvx vr9,r3,r9 639err4; stvx v9,r3,r9
640err4; stvx vr10,r3,r10 640err4; stvx v10,r3,r10
641err4; stvx vr11,r3,r11 641err4; stvx v11,r3,r11
642err4; stvx vr12,r3,r12 642err4; stvx v12,r3,r12
643err4; stvx vr13,r3,r14 643err4; stvx v13,r3,r14
644err4; stvx vr14,r3,r15 644err4; stvx v14,r3,r15
645err4; stvx vr15,r3,r16 645err4; stvx v15,r3,r16
646 addi r3,r3,128 646 addi r3,r3,128
647 bdnz 8b 647 bdnz 8b
648 648
@@ -656,36 +656,36 @@ err4; stvx vr15,r3,r16
656 mtocrf 0x01,r6 656 mtocrf 0x01,r6
657 657
658 bf cr7*4+1,9f 658 bf cr7*4+1,9f
659err3; lvx vr3,r0,r4 659err3; lvx v3,r0,r4
660 VPERM(vr8,vr0,vr3,vr16) 660 VPERM(v8,v0,v3,v16)
661err3; lvx vr2,r4,r9 661err3; lvx v2,r4,r9
662 VPERM(vr9,vr3,vr2,vr16) 662 VPERM(v9,v3,v2,v16)
663err3; lvx vr1,r4,r10 663err3; lvx v1,r4,r10
664 VPERM(vr10,vr2,vr1,vr16) 664 VPERM(v10,v2,v1,v16)
665err3; lvx vr0,r4,r11 665err3; lvx v0,r4,r11
666 VPERM(vr11,vr1,vr0,vr16) 666 VPERM(v11,v1,v0,v16)
667 addi r4,r4,64 667 addi r4,r4,64
668err3; stvx vr8,r0,r3 668err3; stvx v8,r0,r3
669err3; stvx vr9,r3,r9 669err3; stvx v9,r3,r9
670err3; stvx vr10,r3,r10 670err3; stvx v10,r3,r10
671err3; stvx vr11,r3,r11 671err3; stvx v11,r3,r11
672 addi r3,r3,64 672 addi r3,r3,64
673 673
6749: bf cr7*4+2,10f 6749: bf cr7*4+2,10f
675err3; lvx vr1,r0,r4 675err3; lvx v1,r0,r4
676 VPERM(vr8,vr0,vr1,vr16) 676 VPERM(v8,v0,v1,v16)
677err3; lvx vr0,r4,r9 677err3; lvx v0,r4,r9
678 VPERM(vr9,vr1,vr0,vr16) 678 VPERM(v9,v1,v0,v16)
679 addi r4,r4,32 679 addi r4,r4,32
680err3; stvx vr8,r0,r3 680err3; stvx v8,r0,r3
681err3; stvx vr9,r3,r9 681err3; stvx v9,r3,r9
682 addi r3,r3,32 682 addi r3,r3,32
683 683
68410: bf cr7*4+3,11f 68410: bf cr7*4+3,11f
685err3; lvx vr1,r0,r4 685err3; lvx v1,r0,r4
686 VPERM(vr8,vr0,vr1,vr16) 686 VPERM(v8,v0,v1,v16)
687 addi r4,r4,16 687 addi r4,r4,16
688err3; stvx vr8,r0,r3 688err3; stvx v8,r0,r3
689 addi r3,r3,16 689 addi r3,r3,16
690 690
691 /* Up to 15B to go */ 691 /* Up to 15B to go */
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index a5b30c71a8d3..18af0b3d3eb2 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -236,78 +236,78 @@ _GLOBAL(_rest32gpr_31_x)
236 236
237_GLOBAL(_savevr_20) 237_GLOBAL(_savevr_20)
238 li r11,-192 238 li r11,-192
239 stvx vr20,r11,r0 239 stvx v20,r11,r0
240_GLOBAL(_savevr_21) 240_GLOBAL(_savevr_21)
241 li r11,-176 241 li r11,-176
242 stvx vr21,r11,r0 242 stvx v21,r11,r0
243_GLOBAL(_savevr_22) 243_GLOBAL(_savevr_22)
244 li r11,-160 244 li r11,-160
245 stvx vr22,r11,r0 245 stvx v22,r11,r0
246_GLOBAL(_savevr_23) 246_GLOBAL(_savevr_23)
247 li r11,-144 247 li r11,-144
248 stvx vr23,r11,r0 248 stvx v23,r11,r0
249_GLOBAL(_savevr_24) 249_GLOBAL(_savevr_24)
250 li r11,-128 250 li r11,-128
251 stvx vr24,r11,r0 251 stvx v24,r11,r0
252_GLOBAL(_savevr_25) 252_GLOBAL(_savevr_25)
253 li r11,-112 253 li r11,-112
254 stvx vr25,r11,r0 254 stvx v25,r11,r0
255_GLOBAL(_savevr_26) 255_GLOBAL(_savevr_26)
256 li r11,-96 256 li r11,-96
257 stvx vr26,r11,r0 257 stvx v26,r11,r0
258_GLOBAL(_savevr_27) 258_GLOBAL(_savevr_27)
259 li r11,-80 259 li r11,-80
260 stvx vr27,r11,r0 260 stvx v27,r11,r0
261_GLOBAL(_savevr_28) 261_GLOBAL(_savevr_28)
262 li r11,-64 262 li r11,-64
263 stvx vr28,r11,r0 263 stvx v28,r11,r0
264_GLOBAL(_savevr_29) 264_GLOBAL(_savevr_29)
265 li r11,-48 265 li r11,-48
266 stvx vr29,r11,r0 266 stvx v29,r11,r0
267_GLOBAL(_savevr_30) 267_GLOBAL(_savevr_30)
268 li r11,-32 268 li r11,-32
269 stvx vr30,r11,r0 269 stvx v30,r11,r0
270_GLOBAL(_savevr_31) 270_GLOBAL(_savevr_31)
271 li r11,-16 271 li r11,-16
272 stvx vr31,r11,r0 272 stvx v31,r11,r0
273 blr 273 blr
274 274
275_GLOBAL(_restvr_20) 275_GLOBAL(_restvr_20)
276 li r11,-192 276 li r11,-192
277 lvx vr20,r11,r0 277 lvx v20,r11,r0
278_GLOBAL(_restvr_21) 278_GLOBAL(_restvr_21)
279 li r11,-176 279 li r11,-176
280 lvx vr21,r11,r0 280 lvx v21,r11,r0
281_GLOBAL(_restvr_22) 281_GLOBAL(_restvr_22)
282 li r11,-160 282 li r11,-160
283 lvx vr22,r11,r0 283 lvx v22,r11,r0
284_GLOBAL(_restvr_23) 284_GLOBAL(_restvr_23)
285 li r11,-144 285 li r11,-144
286 lvx vr23,r11,r0 286 lvx v23,r11,r0
287_GLOBAL(_restvr_24) 287_GLOBAL(_restvr_24)
288 li r11,-128 288 li r11,-128
289 lvx vr24,r11,r0 289 lvx v24,r11,r0
290_GLOBAL(_restvr_25) 290_GLOBAL(_restvr_25)
291 li r11,-112 291 li r11,-112
292 lvx vr25,r11,r0 292 lvx v25,r11,r0
293_GLOBAL(_restvr_26) 293_GLOBAL(_restvr_26)
294 li r11,-96 294 li r11,-96
295 lvx vr26,r11,r0 295 lvx v26,r11,r0
296_GLOBAL(_restvr_27) 296_GLOBAL(_restvr_27)
297 li r11,-80 297 li r11,-80
298 lvx vr27,r11,r0 298 lvx v27,r11,r0
299_GLOBAL(_restvr_28) 299_GLOBAL(_restvr_28)
300 li r11,-64 300 li r11,-64
301 lvx vr28,r11,r0 301 lvx v28,r11,r0
302_GLOBAL(_restvr_29) 302_GLOBAL(_restvr_29)
303 li r11,-48 303 li r11,-48
304 lvx vr29,r11,r0 304 lvx v29,r11,r0
305_GLOBAL(_restvr_30) 305_GLOBAL(_restvr_30)
306 li r11,-32 306 li r11,-32
307 lvx vr30,r11,r0 307 lvx v30,r11,r0
308_GLOBAL(_restvr_31) 308_GLOBAL(_restvr_31)
309 li r11,-16 309 li r11,-16
310 lvx vr31,r11,r0 310 lvx v31,r11,r0
311 blr 311 blr
312 312
313#endif /* CONFIG_ALTIVEC */ 313#endif /* CONFIG_ALTIVEC */
@@ -443,101 +443,101 @@ _restgpr0_31:
443.globl _savevr_20 443.globl _savevr_20
444_savevr_20: 444_savevr_20:
445 li r12,-192 445 li r12,-192
446 stvx vr20,r12,r0 446 stvx v20,r12,r0
447.globl _savevr_21 447.globl _savevr_21
448_savevr_21: 448_savevr_21:
449 li r12,-176 449 li r12,-176
450 stvx vr21,r12,r0 450 stvx v21,r12,r0
451.globl _savevr_22 451.globl _savevr_22
452_savevr_22: 452_savevr_22:
453 li r12,-160 453 li r12,-160
454 stvx vr22,r12,r0 454 stvx v22,r12,r0
455.globl _savevr_23 455.globl _savevr_23
456_savevr_23: 456_savevr_23:
457 li r12,-144 457 li r12,-144
458 stvx vr23,r12,r0 458 stvx v23,r12,r0
459.globl _savevr_24 459.globl _savevr_24
460_savevr_24: 460_savevr_24:
461 li r12,-128 461 li r12,-128
462 stvx vr24,r12,r0 462 stvx v24,r12,r0
463.globl _savevr_25 463.globl _savevr_25
464_savevr_25: 464_savevr_25:
465 li r12,-112 465 li r12,-112
466 stvx vr25,r12,r0 466 stvx v25,r12,r0
467.globl _savevr_26 467.globl _savevr_26
468_savevr_26: 468_savevr_26:
469 li r12,-96 469 li r12,-96
470 stvx vr26,r12,r0 470 stvx v26,r12,r0
471.globl _savevr_27 471.globl _savevr_27
472_savevr_27: 472_savevr_27:
473 li r12,-80 473 li r12,-80
474 stvx vr27,r12,r0 474 stvx v27,r12,r0
475.globl _savevr_28 475.globl _savevr_28
476_savevr_28: 476_savevr_28:
477 li r12,-64 477 li r12,-64
478 stvx vr28,r12,r0 478 stvx v28,r12,r0
479.globl _savevr_29 479.globl _savevr_29
480_savevr_29: 480_savevr_29:
481 li r12,-48 481 li r12,-48
482 stvx vr29,r12,r0 482 stvx v29,r12,r0
483.globl _savevr_30 483.globl _savevr_30
484_savevr_30: 484_savevr_30:
485 li r12,-32 485 li r12,-32
486 stvx vr30,r12,r0 486 stvx v30,r12,r0
487.globl _savevr_31 487.globl _savevr_31
488_savevr_31: 488_savevr_31:
489 li r12,-16 489 li r12,-16
490 stvx vr31,r12,r0 490 stvx v31,r12,r0
491 blr 491 blr
492 492
493.globl _restvr_20 493.globl _restvr_20
494_restvr_20: 494_restvr_20:
495 li r12,-192 495 li r12,-192
496 lvx vr20,r12,r0 496 lvx v20,r12,r0
497.globl _restvr_21 497.globl _restvr_21
498_restvr_21: 498_restvr_21:
499 li r12,-176 499 li r12,-176
500 lvx vr21,r12,r0 500 lvx v21,r12,r0
501.globl _restvr_22 501.globl _restvr_22
502_restvr_22: 502_restvr_22:
503 li r12,-160 503 li r12,-160
504 lvx vr22,r12,r0 504 lvx v22,r12,r0
505.globl _restvr_23 505.globl _restvr_23
506_restvr_23: 506_restvr_23:
507 li r12,-144 507 li r12,-144
508 lvx vr23,r12,r0 508 lvx v23,r12,r0
509.globl _restvr_24 509.globl _restvr_24
510_restvr_24: 510_restvr_24:
511 li r12,-128 511 li r12,-128
512 lvx vr24,r12,r0 512 lvx v24,r12,r0
513.globl _restvr_25 513.globl _restvr_25
514_restvr_25: 514_restvr_25:
515 li r12,-112 515 li r12,-112
516 lvx vr25,r12,r0 516 lvx v25,r12,r0
517.globl _restvr_26 517.globl _restvr_26
518_restvr_26: 518_restvr_26:
519 li r12,-96 519 li r12,-96
520 lvx vr26,r12,r0 520 lvx v26,r12,r0
521.globl _restvr_27 521.globl _restvr_27
522_restvr_27: 522_restvr_27:
523 li r12,-80 523 li r12,-80
524 lvx vr27,r12,r0 524 lvx v27,r12,r0
525.globl _restvr_28 525.globl _restvr_28
526_restvr_28: 526_restvr_28:
527 li r12,-64 527 li r12,-64
528 lvx vr28,r12,r0 528 lvx v28,r12,r0
529.globl _restvr_29 529.globl _restvr_29
530_restvr_29: 530_restvr_29:
531 li r12,-48 531 li r12,-48
532 lvx vr29,r12,r0 532 lvx v29,r12,r0
533.globl _restvr_30 533.globl _restvr_30
534_restvr_30: 534_restvr_30:
535 li r12,-32 535 li r12,-32
536 lvx vr30,r12,r0 536 lvx v30,r12,r0
537.globl _restvr_31 537.globl _restvr_31
538_restvr_31: 538_restvr_31:
539 li r12,-16 539 li r12,-16
540 lvx vr31,r12,r0 540 lvx v31,r12,r0
541 blr 541 blr
542 542
543#endif /* CONFIG_ALTIVEC */ 543#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
index 85aec08ab234..5d0cdbfbe3f2 100644
--- a/arch/powerpc/lib/ldstfp.S
+++ b/arch/powerpc/lib/ldstfp.S
@@ -184,16 +184,16 @@ _GLOBAL(do_stfd)
184 extab 2b,3b 184 extab 2b,3b
185 185
186#ifdef CONFIG_ALTIVEC 186#ifdef CONFIG_ALTIVEC
187/* Get the contents of vrN into vr0; N is in r3. */ 187/* Get the contents of vrN into v0; N is in r3. */
188_GLOBAL(get_vr) 188_GLOBAL(get_vr)
189 mflr r0 189 mflr r0
190 rlwinm r3,r3,3,0xf8 190 rlwinm r3,r3,3,0xf8
191 bcl 20,31,1f 191 bcl 20,31,1f
192 blr /* vr0 is already in vr0 */ 192 blr /* v0 is already in v0 */
193 nop 193 nop
194reg = 1 194reg = 1
195 .rept 31 195 .rept 31
196 vor vr0,reg,reg /* assembler doesn't know vmr? */ 196 vor v0,reg,reg /* assembler doesn't know vmr? */
197 blr 197 blr
198reg = reg + 1 198reg = reg + 1
199 .endr 199 .endr
@@ -203,16 +203,16 @@ reg = reg + 1
203 mtlr r0 203 mtlr r0
204 bctr 204 bctr
205 205
206/* Put the contents of vr0 into vrN; N is in r3. */ 206/* Put the contents of v0 into vrN; N is in r3. */
207_GLOBAL(put_vr) 207_GLOBAL(put_vr)
208 mflr r0 208 mflr r0
209 rlwinm r3,r3,3,0xf8 209 rlwinm r3,r3,3,0xf8
210 bcl 20,31,1f 210 bcl 20,31,1f
211 blr /* vr0 is already in vr0 */ 211 blr /* v0 is already in v0 */
212 nop 212 nop
213reg = 1 213reg = 1
214 .rept 31 214 .rept 31
215 vor reg,vr0,vr0 215 vor reg,v0,v0
216 blr 216 blr
217reg = reg + 1 217reg = reg + 1
218 .endr 218 .endr
@@ -234,13 +234,13 @@ _GLOBAL(do_lvx)
234 MTMSRD(r7) 234 MTMSRD(r7)
235 isync 235 isync
236 beq cr7,1f 236 beq cr7,1f
237 stvx vr0,r1,r8 237 stvx v0,r1,r8
2381: li r9,-EFAULT 2381: li r9,-EFAULT
2392: lvx vr0,0,r4 2392: lvx v0,0,r4
240 li r9,0 240 li r9,0
2413: beq cr7,4f 2413: beq cr7,4f
242 bl put_vr 242 bl put_vr
243 lvx vr0,r1,r8 243 lvx v0,r1,r8
2444: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) 2444: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
245 mtlr r0 245 mtlr r0
246 MTMSRD(r6) 246 MTMSRD(r6)
@@ -262,13 +262,13 @@ _GLOBAL(do_stvx)
262 MTMSRD(r7) 262 MTMSRD(r7)
263 isync 263 isync
264 beq cr7,1f 264 beq cr7,1f
265 stvx vr0,r1,r8 265 stvx v0,r1,r8
266 bl get_vr 266 bl get_vr
2671: li r9,-EFAULT 2671: li r9,-EFAULT
2682: stvx vr0,0,r4 2682: stvx v0,0,r4
269 li r9,0 269 li r9,0
2703: beq cr7,4f 2703: beq cr7,4f
271 lvx vr0,r1,r8 271 lvx v0,r1,r8
2724: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) 2724: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
273 mtlr r0 273 mtlr r0
274 MTMSRD(r6) 274 MTMSRD(r6)
@@ -280,12 +280,12 @@ _GLOBAL(do_stvx)
280#endif /* CONFIG_ALTIVEC */ 280#endif /* CONFIG_ALTIVEC */
281 281
282#ifdef CONFIG_VSX 282#ifdef CONFIG_VSX
283/* Get the contents of vsrN into vsr0; N is in r3. */ 283/* Get the contents of vsN into vs0; N is in r3. */
284_GLOBAL(get_vsr) 284_GLOBAL(get_vsr)
285 mflr r0 285 mflr r0
286 rlwinm r3,r3,3,0x1f8 286 rlwinm r3,r3,3,0x1f8
287 bcl 20,31,1f 287 bcl 20,31,1f
288 blr /* vsr0 is already in vsr0 */ 288 blr /* vs0 is already in vs0 */
289 nop 289 nop
290reg = 1 290reg = 1
291 .rept 63 291 .rept 63
@@ -299,12 +299,12 @@ reg = reg + 1
299 mtlr r0 299 mtlr r0
300 bctr 300 bctr
301 301
302/* Put the contents of vsr0 into vsrN; N is in r3. */ 302/* Put the contents of vs0 into vsN; N is in r3. */
303_GLOBAL(put_vsr) 303_GLOBAL(put_vsr)
304 mflr r0 304 mflr r0
305 rlwinm r3,r3,3,0x1f8 305 rlwinm r3,r3,3,0x1f8
306 bcl 20,31,1f 306 bcl 20,31,1f
307 blr /* vr0 is already in vr0 */ 307 blr /* v0 is already in v0 */
308 nop 308 nop
309reg = 1 309reg = 1
310 .rept 63 310 .rept 63
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 170a0346f756..f7deebdf3365 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -41,6 +41,7 @@ void __spin_yield(arch_spinlock_t *lock)
41 plpar_hcall_norets(H_CONFER, 41 plpar_hcall_norets(H_CONFER,
42 get_hard_smp_processor_id(holder_cpu), yield_count); 42 get_hard_smp_processor_id(holder_cpu), yield_count);
43} 43}
44EXPORT_SYMBOL_GPL(__spin_yield);
44 45
45/* 46/*
46 * Waiting for a read lock or a write lock on a rwlock... 47 * Waiting for a read lock or a write lock on a rwlock...
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0830587df16e..786234fd4e91 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -321,29 +321,29 @@ _GLOBAL(memcpy_power7)
321 li r11,48 321 li r11,48
322 322
323 bf cr7*4+3,5f 323 bf cr7*4+3,5f
324 lvx vr1,r0,r4 324 lvx v1,r0,r4
325 addi r4,r4,16 325 addi r4,r4,16
326 stvx vr1,r0,r3 326 stvx v1,r0,r3
327 addi r3,r3,16 327 addi r3,r3,16
328 328
3295: bf cr7*4+2,6f 3295: bf cr7*4+2,6f
330 lvx vr1,r0,r4 330 lvx v1,r0,r4
331 lvx vr0,r4,r9 331 lvx v0,r4,r9
332 addi r4,r4,32 332 addi r4,r4,32
333 stvx vr1,r0,r3 333 stvx v1,r0,r3
334 stvx vr0,r3,r9 334 stvx v0,r3,r9
335 addi r3,r3,32 335 addi r3,r3,32
336 336
3376: bf cr7*4+1,7f 3376: bf cr7*4+1,7f
338 lvx vr3,r0,r4 338 lvx v3,r0,r4
339 lvx vr2,r4,r9 339 lvx v2,r4,r9
340 lvx vr1,r4,r10 340 lvx v1,r4,r10
341 lvx vr0,r4,r11 341 lvx v0,r4,r11
342 addi r4,r4,64 342 addi r4,r4,64
343 stvx vr3,r0,r3 343 stvx v3,r0,r3
344 stvx vr2,r3,r9 344 stvx v2,r3,r9
345 stvx vr1,r3,r10 345 stvx v1,r3,r10
346 stvx vr0,r3,r11 346 stvx v0,r3,r11
347 addi r3,r3,64 347 addi r3,r3,64
348 348
3497: sub r5,r5,r6 3497: sub r5,r5,r6
@@ -366,23 +366,23 @@ _GLOBAL(memcpy_power7)
366 */ 366 */
367 .align 5 367 .align 5
3688: 3688:
369 lvx vr7,r0,r4 369 lvx v7,r0,r4
370 lvx vr6,r4,r9 370 lvx v6,r4,r9
371 lvx vr5,r4,r10 371 lvx v5,r4,r10
372 lvx vr4,r4,r11 372 lvx v4,r4,r11
373 lvx vr3,r4,r12 373 lvx v3,r4,r12
374 lvx vr2,r4,r14 374 lvx v2,r4,r14
375 lvx vr1,r4,r15 375 lvx v1,r4,r15
376 lvx vr0,r4,r16 376 lvx v0,r4,r16
377 addi r4,r4,128 377 addi r4,r4,128
378 stvx vr7,r0,r3 378 stvx v7,r0,r3
379 stvx vr6,r3,r9 379 stvx v6,r3,r9
380 stvx vr5,r3,r10 380 stvx v5,r3,r10
381 stvx vr4,r3,r11 381 stvx v4,r3,r11
382 stvx vr3,r3,r12 382 stvx v3,r3,r12
383 stvx vr2,r3,r14 383 stvx v2,r3,r14
384 stvx vr1,r3,r15 384 stvx v1,r3,r15
385 stvx vr0,r3,r16 385 stvx v0,r3,r16
386 addi r3,r3,128 386 addi r3,r3,128
387 bdnz 8b 387 bdnz 8b
388 388
@@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)
396 mtocrf 0x01,r6 396 mtocrf 0x01,r6
397 397
398 bf cr7*4+1,9f 398 bf cr7*4+1,9f
399 lvx vr3,r0,r4 399 lvx v3,r0,r4
400 lvx vr2,r4,r9 400 lvx v2,r4,r9
401 lvx vr1,r4,r10 401 lvx v1,r4,r10
402 lvx vr0,r4,r11 402 lvx v0,r4,r11
403 addi r4,r4,64 403 addi r4,r4,64
404 stvx vr3,r0,r3 404 stvx v3,r0,r3
405 stvx vr2,r3,r9 405 stvx v2,r3,r9
406 stvx vr1,r3,r10 406 stvx v1,r3,r10
407 stvx vr0,r3,r11 407 stvx v0,r3,r11
408 addi r3,r3,64 408 addi r3,r3,64
409 409
4109: bf cr7*4+2,10f 4109: bf cr7*4+2,10f
411 lvx vr1,r0,r4 411 lvx v1,r0,r4
412 lvx vr0,r4,r9 412 lvx v0,r4,r9
413 addi r4,r4,32 413 addi r4,r4,32
414 stvx vr1,r0,r3 414 stvx v1,r0,r3
415 stvx vr0,r3,r9 415 stvx v0,r3,r9
416 addi r3,r3,32 416 addi r3,r3,32
417 417
41810: bf cr7*4+3,11f 41810: bf cr7*4+3,11f
419 lvx vr1,r0,r4 419 lvx v1,r0,r4
420 addi r4,r4,16 420 addi r4,r4,16
421 stvx vr1,r0,r3 421 stvx v1,r0,r3
422 addi r3,r3,16 422 addi r3,r3,16
423 423
424 /* Up to 15B to go */ 424 /* Up to 15B to go */
@@ -494,42 +494,42 @@ _GLOBAL(memcpy_power7)
494 li r10,32 494 li r10,32
495 li r11,48 495 li r11,48
496 496
497 LVS(vr16,0,r4) /* Setup permute control vector */ 497 LVS(v16,0,r4) /* Setup permute control vector */
498 lvx vr0,0,r4 498 lvx v0,0,r4
499 addi r4,r4,16 499 addi r4,r4,16
500 500
501 bf cr7*4+3,5f 501 bf cr7*4+3,5f
502 lvx vr1,r0,r4 502 lvx v1,r0,r4
503 VPERM(vr8,vr0,vr1,vr16) 503 VPERM(v8,v0,v1,v16)
504 addi r4,r4,16 504 addi r4,r4,16
505 stvx vr8,r0,r3 505 stvx v8,r0,r3
506 addi r3,r3,16 506 addi r3,r3,16
507 vor vr0,vr1,vr1 507 vor v0,v1,v1
508 508
5095: bf cr7*4+2,6f 5095: bf cr7*4+2,6f
510 lvx vr1,r0,r4 510 lvx v1,r0,r4
511 VPERM(vr8,vr0,vr1,vr16) 511 VPERM(v8,v0,v1,v16)
512 lvx vr0,r4,r9 512 lvx v0,r4,r9
513 VPERM(vr9,vr1,vr0,vr16) 513 VPERM(v9,v1,v0,v16)
514 addi r4,r4,32 514 addi r4,r4,32
515 stvx vr8,r0,r3 515 stvx v8,r0,r3
516 stvx vr9,r3,r9 516 stvx v9,r3,r9
517 addi r3,r3,32 517 addi r3,r3,32
518 518
5196: bf cr7*4+1,7f 5196: bf cr7*4+1,7f
520 lvx vr3,r0,r4 520 lvx v3,r0,r4
521 VPERM(vr8,vr0,vr3,vr16) 521 VPERM(v8,v0,v3,v16)
522 lvx vr2,r4,r9 522 lvx v2,r4,r9
523 VPERM(vr9,vr3,vr2,vr16) 523 VPERM(v9,v3,v2,v16)
524 lvx vr1,r4,r10 524 lvx v1,r4,r10
525 VPERM(vr10,vr2,vr1,vr16) 525 VPERM(v10,v2,v1,v16)
526 lvx vr0,r4,r11 526 lvx v0,r4,r11
527 VPERM(vr11,vr1,vr0,vr16) 527 VPERM(v11,v1,v0,v16)
528 addi r4,r4,64 528 addi r4,r4,64
529 stvx vr8,r0,r3 529 stvx v8,r0,r3
530 stvx vr9,r3,r9 530 stvx v9,r3,r9
531 stvx vr10,r3,r10 531 stvx v10,r3,r10
532 stvx vr11,r3,r11 532 stvx v11,r3,r11
533 addi r3,r3,64 533 addi r3,r3,64
534 534
5357: sub r5,r5,r6 5357: sub r5,r5,r6
@@ -552,31 +552,31 @@ _GLOBAL(memcpy_power7)
552 */ 552 */
553 .align 5 553 .align 5
5548: 5548:
555 lvx vr7,r0,r4 555 lvx v7,r0,r4
556 VPERM(vr8,vr0,vr7,vr16) 556 VPERM(v8,v0,v7,v16)
557 lvx vr6,r4,r9 557 lvx v6,r4,r9
558 VPERM(vr9,vr7,vr6,vr16) 558 VPERM(v9,v7,v6,v16)
559 lvx vr5,r4,r10 559 lvx v5,r4,r10
560 VPERM(vr10,vr6,vr5,vr16) 560 VPERM(v10,v6,v5,v16)
561 lvx vr4,r4,r11 561 lvx v4,r4,r11
562 VPERM(vr11,vr5,vr4,vr16) 562 VPERM(v11,v5,v4,v16)
563 lvx vr3,r4,r12 563 lvx v3,r4,r12
564 VPERM(vr12,vr4,vr3,vr16) 564 VPERM(v12,v4,v3,v16)
565 lvx vr2,r4,r14 565 lvx v2,r4,r14
566 VPERM(vr13,vr3,vr2,vr16) 566 VPERM(v13,v3,v2,v16)
567 lvx vr1,r4,r15 567 lvx v1,r4,r15
568 VPERM(vr14,vr2,vr1,vr16) 568 VPERM(v14,v2,v1,v16)
569 lvx vr0,r4,r16 569 lvx v0,r4,r16
570 VPERM(vr15,vr1,vr0,vr16) 570 VPERM(v15,v1,v0,v16)
571 addi r4,r4,128 571 addi r4,r4,128
572 stvx vr8,r0,r3 572 stvx v8,r0,r3
573 stvx vr9,r3,r9 573 stvx v9,r3,r9
574 stvx vr10,r3,r10 574 stvx v10,r3,r10
575 stvx vr11,r3,r11 575 stvx v11,r3,r11
576 stvx vr12,r3,r12 576 stvx v12,r3,r12
577 stvx vr13,r3,r14 577 stvx v13,r3,r14
578 stvx vr14,r3,r15 578 stvx v14,r3,r15
579 stvx vr15,r3,r16 579 stvx v15,r3,r16
580 addi r3,r3,128 580 addi r3,r3,128
581 bdnz 8b 581 bdnz 8b
582 582
@@ -590,36 +590,36 @@ _GLOBAL(memcpy_power7)
590 mtocrf 0x01,r6 590 mtocrf 0x01,r6
591 591
592 bf cr7*4+1,9f 592 bf cr7*4+1,9f
593 lvx vr3,r0,r4 593 lvx v3,r0,r4
594 VPERM(vr8,vr0,vr3,vr16) 594 VPERM(v8,v0,v3,v16)
595 lvx vr2,r4,r9 595 lvx v2,r4,r9
596 VPERM(vr9,vr3,vr2,vr16) 596 VPERM(v9,v3,v2,v16)
597 lvx vr1,r4,r10 597 lvx v1,r4,r10
598 VPERM(vr10,vr2,vr1,vr16) 598 VPERM(v10,v2,v1,v16)
599 lvx vr0,r4,r11 599 lvx v0,r4,r11
600 VPERM(vr11,vr1,vr0,vr16) 600 VPERM(v11,v1,v0,v16)
601 addi r4,r4,64 601 addi r4,r4,64
602 stvx vr8,r0,r3 602 stvx v8,r0,r3
603 stvx vr9,r3,r9 603 stvx v9,r3,r9
604 stvx vr10,r3,r10 604 stvx v10,r3,r10
605 stvx vr11,r3,r11 605 stvx v11,r3,r11
606 addi r3,r3,64 606 addi r3,r3,64
607 607
6089: bf cr7*4+2,10f 6089: bf cr7*4+2,10f
609 lvx vr1,r0,r4 609 lvx v1,r0,r4
610 VPERM(vr8,vr0,vr1,vr16) 610 VPERM(v8,v0,v1,v16)
611 lvx vr0,r4,r9 611 lvx v0,r4,r9
612 VPERM(vr9,vr1,vr0,vr16) 612 VPERM(v9,v1,v0,v16)
613 addi r4,r4,32 613 addi r4,r4,32
614 stvx vr8,r0,r3 614 stvx v8,r0,r3
615 stvx vr9,r3,r9 615 stvx v9,r3,r9
616 addi r3,r3,32 616 addi r3,r3,32
617 617
61810: bf cr7*4+3,11f 61810: bf cr7*4+3,11f
619 lvx vr1,r0,r4 619 lvx v1,r0,r4
620 VPERM(vr8,vr0,vr1,vr16) 620 VPERM(v8,v0,v1,v16)
621 addi r4,r4,16 621 addi r4,r4,16
622 stvx vr8,r0,r3 622 stvx v8,r0,r3
623 addi r3,r3,16 623 addi r3,r3,16
624 624
625 /* Up to 15B to go */ 625 /* Up to 15B to go */
diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c
index f993959647b5..c7f8e9586316 100644
--- a/arch/powerpc/lib/ppc_ksyms.c
+++ b/arch/powerpc/lib/ppc_ksyms.c
@@ -8,10 +8,6 @@ EXPORT_SYMBOL(memset);
8EXPORT_SYMBOL(memmove); 8EXPORT_SYMBOL(memmove);
9EXPORT_SYMBOL(memcmp); 9EXPORT_SYMBOL(memcmp);
10EXPORT_SYMBOL(memchr); 10EXPORT_SYMBOL(memchr);
11#ifdef CONFIG_PPC32
12EXPORT_SYMBOL(cacheable_memcpy);
13EXPORT_SYMBOL(cacheable_memzero);
14#endif
15 11
16EXPORT_SYMBOL(strcpy); 12EXPORT_SYMBOL(strcpy);
17EXPORT_SYMBOL(strncpy); 13EXPORT_SYMBOL(strncpy);
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
index a1060a868e69..69abf844c2c3 100644
--- a/arch/powerpc/lib/rheap.c
+++ b/arch/powerpc/lib/rheap.c
@@ -284,7 +284,7 @@ EXPORT_SYMBOL_GPL(rh_create);
284 */ 284 */
285void rh_destroy(rh_info_t * info) 285void rh_destroy(rh_info_t * info)
286{ 286{
287 if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) 287 if ((info->flags & RHIF_STATIC_BLOCK) == 0)
288 kfree(info->block); 288 kfree(info->block);
289 289
290 if ((info->flags & RHIF_STATIC_INFO) == 0) 290 if ((info->flags & RHIF_STATIC_INFO) == 0)
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 438dcd3fd0d1..9c8770b5f96f 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_40x) += 40x_mmu.o
24obj-$(CONFIG_44x) += 44x_mmu.o 24obj-$(CONFIG_44x) += 44x_mmu.o
25obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o 25obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
26obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o 26obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
27obj-$(CONFIG_PPC_SPLPAR) += vphn.o
27obj-$(CONFIG_PPC_MM_SLICES) += slice.o 28obj-$(CONFIG_PPC_MM_SLICES) += slice.o
28obj-y += hugetlbpage.o 29obj-y += hugetlbpage.o
29ifeq ($(CONFIG_HUGETLB_PAGE),y) 30ifeq ($(CONFIG_HUGETLB_PAGE),y)
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index d85e86aac7fb..169aba446a74 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
228 do { 228 do {
229 SetPageReserved(page); 229 SetPageReserved(page);
230 map_page(vaddr, page_to_phys(page), 230 map_page(vaddr, page_to_phys(page),
231 pgprot_noncached(PAGE_KERNEL)); 231 pgprot_val(pgprot_noncached(PAGE_KERNEL)));
232 page++; 232 page++;
233 vaddr += PAGE_SIZE; 233 vaddr += PAGE_SIZE;
234 } while (size -= PAGE_SIZE); 234 } while (size -= PAGE_SIZE);
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index b46912fee7cd..9c90e66cffb6 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -181,7 +181,7 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
181 unsigned long cam_sz; 181 unsigned long cam_sz;
182 182
183 cam_sz = calc_cam_sz(ram, virt, phys); 183 cam_sz = calc_cam_sz(ram, virt, phys);
184 settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0); 184 settlbcam(i, virt, phys, cam_sz, pgprot_val(PAGE_KERNEL_X), 0);
185 185
186 ram -= cam_sz; 186 ram -= cam_sz;
187 amount_mapped += cam_sz; 187 amount_mapped += cam_sz;
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 86686514ae13..43dafb9d6a46 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -33,7 +33,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
33 * atomically mark the linux large page PMD busy and dirty 33 * atomically mark the linux large page PMD busy and dirty
34 */ 34 */
35 do { 35 do {
36 pmd_t pmd = ACCESS_ONCE(*pmdp); 36 pmd_t pmd = READ_ONCE(*pmdp);
37 37
38 old_pmd = pmd_val(pmd); 38 old_pmd = pmd_val(pmd);
39 /* If PMD busy, retry the access */ 39 /* If PMD busy, retry the access */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7e408bfc7948..fa9d5c238d22 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -964,7 +964,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
964 *shift = 0; 964 *shift = 0;
965 965
966 pgdp = pgdir + pgd_index(ea); 966 pgdp = pgdir + pgd_index(ea);
967 pgd = ACCESS_ONCE(*pgdp); 967 pgd = READ_ONCE(*pgdp);
968 /* 968 /*
969 * Always operate on the local stack value. This make sure the 969 * Always operate on the local stack value. This make sure the
970 * value don't get updated by a parallel THP split/collapse, 970 * value don't get updated by a parallel THP split/collapse,
@@ -1045,7 +1045,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
1045 if (pte_end < end) 1045 if (pte_end < end)
1046 end = pte_end; 1046 end = pte_end;
1047 1047
1048 pte = ACCESS_ONCE(*ptep); 1048 pte = READ_ONCE(*ptep);
1049 mask = _PAGE_PRESENT | _PAGE_USER; 1049 mask = _PAGE_PRESENT | _PAGE_USER;
1050 if (write) 1050 if (write)
1051 mask |= _PAGE_RW; 1051 mask |= _PAGE_RW;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 10471f9bb63f..d747dd7bc90b 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -132,6 +132,7 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
132 align = max_t(unsigned long, align, minalign); 132 align = max_t(unsigned long, align, minalign);
133 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 133 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
134 new = kmem_cache_create(name, table_size, align, 0, ctor); 134 new = kmem_cache_create(name, table_size, align, 0, ctor);
135 kfree(name);
135 pgtable_cache[shift - 1] = new; 136 pgtable_cache[shift - 1] = new;
136 pr_debug("Allocated pgtable cache for order %d\n", shift); 137 pr_debug("Allocated pgtable cache for order %d\n", shift);
137} 138}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b7285a5870f8..45fda71feb27 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -61,7 +61,6 @@
61#define CPU_FTR_NOEXECUTE 0 61#define CPU_FTR_NOEXECUTE 0
62#endif 62#endif
63 63
64int mem_init_done;
65unsigned long long memory_limit; 64unsigned long long memory_limit;
66 65
67#ifdef CONFIG_HIGHMEM 66#ifdef CONFIG_HIGHMEM
@@ -377,8 +376,6 @@ void __init mem_init(void)
377 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 376 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
378 VMALLOC_START, VMALLOC_END); 377 VMALLOC_START, VMALLOC_END);
379#endif /* CONFIG_PPC32 */ 378#endif /* CONFIG_PPC32 */
380
381 mem_init_done = 1;
382} 379}
383 380
384void free_initmem(void) 381void free_initmem(void)
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 78c45f392f5b..085b66b10891 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -96,7 +96,7 @@ extern void _tlbia(void);
96extern void mapin_ram(void); 96extern void mapin_ram(void);
97extern int map_page(unsigned long va, phys_addr_t pa, int flags); 97extern int map_page(unsigned long va, phys_addr_t pa, int flags);
98extern void setbat(int index, unsigned long virt, phys_addr_t phys, 98extern void setbat(int index, unsigned long virt, phys_addr_t phys,
99 unsigned int size, int flags); 99 unsigned int size, pgprot_t prot);
100 100
101extern int __map_without_bats; 101extern int __map_without_bats;
102extern int __allow_ioremap_reserved; 102extern int __allow_ioremap_reserved;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0257a7d659ef..5e80621d9324 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -958,6 +958,13 @@ void __init initmem_init(void)
958 958
959 memblock_dump_all(); 959 memblock_dump_all();
960 960
961 /*
962 * Reduce the possible NUMA nodes to the online NUMA nodes,
963 * since we do not support node hotplug. This ensures that we
964 * lower the maximum NUMA node ID to what is actually present.
965 */
966 nodes_and(node_possible_map, node_possible_map, node_online_map);
967
961 for_each_online_node(nid) { 968 for_each_online_node(nid) {
962 unsigned long start_pfn, end_pfn; 969 unsigned long start_pfn, end_pfn;
963 970
@@ -1177,6 +1184,9 @@ u64 memory_hotplug_max(void)
1177 1184
1178/* Virtual Processor Home Node (VPHN) support */ 1185/* Virtual Processor Home Node (VPHN) support */
1179#ifdef CONFIG_PPC_SPLPAR 1186#ifdef CONFIG_PPC_SPLPAR
1187
1188#include "vphn.h"
1189
1180struct topology_update_data { 1190struct topology_update_data {
1181 struct topology_update_data *next; 1191 struct topology_update_data *next;
1182 unsigned int cpu; 1192 unsigned int cpu;
@@ -1248,55 +1258,6 @@ static int update_cpu_associativity_changes_mask(void)
1248} 1258}
1249 1259
1250/* 1260/*
1251 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1252 * the complete property we have to add the length in the first cell.
1253 */
1254#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1255
1256/*
1257 * Convert the associativity domain numbers returned from the hypervisor
1258 * to the sequence they would appear in the ibm,associativity property.
1259 */
1260static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
1261{
1262 int i, nr_assoc_doms = 0;
1263 const __be16 *field = (const __be16 *) packed;
1264
1265#define VPHN_FIELD_UNUSED (0xffff)
1266#define VPHN_FIELD_MSB (0x8000)
1267#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1268
1269 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1270 if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
1271 /* All significant fields processed, and remaining
1272 * fields contain the reserved value of all 1's.
1273 * Just store them.
1274 */
1275 unpacked[i] = *((__be32 *)field);
1276 field += 2;
1277 } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
1278 /* Data is in the lower 15 bits of this field */
1279 unpacked[i] = cpu_to_be32(
1280 be16_to_cpup(field) & VPHN_FIELD_MASK);
1281 field++;
1282 nr_assoc_doms++;
1283 } else {
1284 /* Data is in the lower 15 bits of this field
1285 * concatenated with the next 16 bit field
1286 */
1287 unpacked[i] = *((__be32 *)field);
1288 field += 2;
1289 nr_assoc_doms++;
1290 }
1291 }
1292
1293 /* The first cell contains the length of the property */
1294 unpacked[0] = cpu_to_be32(nr_assoc_doms);
1295
1296 return nr_assoc_doms;
1297}
1298
1299/*
1300 * Retrieve the new associativity information for a virtual processor's 1261 * Retrieve the new associativity information for a virtual processor's
1301 * home node. 1262 * home node.
1302 */ 1263 */
@@ -1306,11 +1267,8 @@ static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1306 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 1267 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1307 u64 flags = 1; 1268 u64 flags = 1;
1308 int hwcpu = get_hard_smp_processor_id(cpu); 1269 int hwcpu = get_hard_smp_processor_id(cpu);
1309 int i;
1310 1270
1311 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); 1271 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1312 for (i = 0; i < 6; i++)
1313 retbuf[i] = cpu_to_be64(retbuf[i]);
1314 vphn_unpack_associativity(retbuf, associativity); 1272 vphn_unpack_associativity(retbuf, associativity);
1315 1273
1316 return rc; 1274 return rc;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 03b1a3b0fbd5..7692d1bb1bc6 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -54,9 +54,6 @@ extern char etext[], _stext[];
54#ifdef HAVE_BATS 54#ifdef HAVE_BATS
55extern phys_addr_t v_mapped_by_bats(unsigned long va); 55extern phys_addr_t v_mapped_by_bats(unsigned long va);
56extern unsigned long p_mapped_by_bats(phys_addr_t pa); 56extern unsigned long p_mapped_by_bats(phys_addr_t pa);
57void setbat(int index, unsigned long virt, phys_addr_t phys,
58 unsigned int size, int flags);
59
60#else /* !HAVE_BATS */ 57#else /* !HAVE_BATS */
61#define v_mapped_by_bats(x) (0UL) 58#define v_mapped_by_bats(x) (0UL)
62#define p_mapped_by_bats(x) (0UL) 59#define p_mapped_by_bats(x) (0UL)
@@ -110,9 +107,8 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
110__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 107__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
111{ 108{
112 pte_t *pte; 109 pte_t *pte;
113 extern int mem_init_done;
114 110
115 if (mem_init_done) { 111 if (slab_is_available()) {
116 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 112 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
117 } else { 113 } else {
118 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); 114 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
@@ -192,7 +188,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
192 188
193 /* Make sure we have the base flags */ 189 /* Make sure we have the base flags */
194 if ((flags & _PAGE_PRESENT) == 0) 190 if ((flags & _PAGE_PRESENT) == 0)
195 flags |= PAGE_KERNEL; 191 flags |= pgprot_val(PAGE_KERNEL);
196 192
197 /* Non-cacheable page cannot be coherent */ 193 /* Non-cacheable page cannot be coherent */
198 if (flags & _PAGE_NO_CACHE) 194 if (flags & _PAGE_NO_CACHE)
@@ -219,9 +215,9 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
219 * Don't allow anybody to remap normal RAM that we're using. 215 * Don't allow anybody to remap normal RAM that we're using.
220 * mem_init() sets high_memory so only do the check after that. 216 * mem_init() sets high_memory so only do the check after that.
221 */ 217 */
222 if (mem_init_done && (p < virt_to_phys(high_memory)) && 218 if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
223 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { 219 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
224 printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n", 220 printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
225 (unsigned long long)p, __builtin_return_address(0)); 221 (unsigned long long)p, __builtin_return_address(0));
226 return NULL; 222 return NULL;
227 } 223 }
@@ -247,7 +243,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
247 if ((v = p_mapped_by_tlbcam(p))) 243 if ((v = p_mapped_by_tlbcam(p)))
248 goto out; 244 goto out;
249 245
250 if (mem_init_done) { 246 if (slab_is_available()) {
251 struct vm_struct *area; 247 struct vm_struct *area;
252 area = get_vm_area_caller(size, VM_IOREMAP, caller); 248 area = get_vm_area_caller(size, VM_IOREMAP, caller);
253 if (area == 0) 249 if (area == 0)
@@ -266,7 +262,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
266 for (i = 0; i < size && err == 0; i += PAGE_SIZE) 262 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
267 err = map_page(v+i, p+i, flags); 263 err = map_page(v+i, p+i, flags);
268 if (err) { 264 if (err) {
269 if (mem_init_done) 265 if (slab_is_available())
270 vunmap((void *)v); 266 vunmap((void *)v);
271 return NULL; 267 return NULL;
272 } 268 }
@@ -327,7 +323,7 @@ void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
327 p = memstart_addr + s; 323 p = memstart_addr + s;
328 for (; s < top; s += PAGE_SIZE) { 324 for (; s < top; s += PAGE_SIZE) {
329 ktext = ((char *) v >= _stext && (char *) v < etext); 325 ktext = ((char *) v >= _stext && (char *) v < etext);
330 f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; 326 f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
331 map_page(v, p, f); 327 map_page(v, p, f);
332#ifdef CONFIG_PPC_STD_MMU_32 328#ifdef CONFIG_PPC_STD_MMU_32
333 if (ktext) 329 if (ktext)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 6957cc1ca0a7..59daa5eeec25 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -231,7 +231,7 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
231 if ((size == 0) || (paligned == 0)) 231 if ((size == 0) || (paligned == 0))
232 return NULL; 232 return NULL;
233 233
234 if (mem_init_done) { 234 if (slab_is_available()) {
235 struct vm_struct *area; 235 struct vm_struct *area;
236 236
237 area = __get_vm_area_caller(size, VM_IOREMAP, 237 area = __get_vm_area_caller(size, VM_IOREMAP,
@@ -315,7 +315,7 @@ void __iounmap(volatile void __iomem *token)
315{ 315{
316 void *addr; 316 void *addr;
317 317
318 if (!mem_init_done) 318 if (!slab_is_available())
319 return; 319 return;
320 320
321 addr = (void *) ((unsigned long __force) 321 addr = (void *) ((unsigned long __force)
@@ -723,7 +723,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
723 assert_spin_locked(&mm->page_table_lock); 723 assert_spin_locked(&mm->page_table_lock);
724 WARN_ON(!pmd_trans_huge(pmd)); 724 WARN_ON(!pmd_trans_huge(pmd));
725#endif 725#endif
726 trace_hugepage_set_pmd(addr, pmd); 726 trace_hugepage_set_pmd(addr, pmd_val(pmd));
727 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); 727 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
728} 728}
729 729
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 5029dc19b517..6b2f3e457171 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -113,11 +113,12 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
113 * of 2 between 128k and 256M. 113 * of 2 between 128k and 256M.
114 */ 114 */
115void __init setbat(int index, unsigned long virt, phys_addr_t phys, 115void __init setbat(int index, unsigned long virt, phys_addr_t phys,
116 unsigned int size, int flags) 116 unsigned int size, pgprot_t prot)
117{ 117{
118 unsigned int bl; 118 unsigned int bl;
119 int wimgxpp; 119 int wimgxpp;
120 struct ppc_bat *bat = BATS[index]; 120 struct ppc_bat *bat = BATS[index];
121 unsigned long flags = pgprot_val(prot);
121 122
122 if ((flags & _PAGE_NO_CACHE) || 123 if ((flags & _PAGE_NO_CACHE) ||
123 (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0)) 124 (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
@@ -224,7 +225,7 @@ void __init MMU_init_hw(void)
224 */ 225 */
225 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); 226 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
226 Hash = __va(memblock_alloc(Hash_size, Hash_size)); 227 Hash = __va(memblock_alloc(Hash_size, Hash_size));
227 cacheable_memzero(Hash, Hash_size); 228 memset(Hash, 0, Hash_size);
228 _SDR1 = __pa(Hash) | SDR1_LOW_BITS; 229 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
229 230
230 Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size); 231 Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index d2a94b85dbc2..c522969f012d 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
216 continue; 216 continue;
217 pte = pte_val(*ptep); 217 pte = pte_val(*ptep);
218 if (hugepage_shift) 218 if (hugepage_shift)
219 trace_hugepage_invalidate(start, pte_val(pte)); 219 trace_hugepage_invalidate(start, pte);
220 if (!(pte & _PAGE_HASHPTE)) 220 if (!(pte & _PAGE_HASHPTE))
221 continue; 221 continue;
222 if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) 222 if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
diff --git a/arch/powerpc/mm/vphn.c b/arch/powerpc/mm/vphn.c
new file mode 100644
index 000000000000..5f8ef50e5c66
--- /dev/null
+++ b/arch/powerpc/mm/vphn.c
@@ -0,0 +1,70 @@
1#include <asm/byteorder.h>
2#include "vphn.h"
3
4/*
5 * The associativity domain numbers are returned from the hypervisor as a
6 * stream of mixed 16-bit and 32-bit fields. The stream is terminated by the
7 * special value of "all ones" (aka. 0xffff) and its size may not exceed 48
8 * bytes.
9 *
10 * --- 16-bit fields -->
11 * _________________________
12 * | 0 | 1 | 2 | 3 | be_packed[0]
13 * ------+-----+-----+------
14 * _________________________
15 * | 4 | 5 | 6 | 7 | be_packed[1]
16 * -------------------------
17 * ...
18 * _________________________
19 * | 20 | 21 | 22 | 23 | be_packed[5]
20 * -------------------------
21 *
22 * Convert to the sequence they would appear in the ibm,associativity property.
23 */
24int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
25{
26 __be64 be_packed[VPHN_REGISTER_COUNT];
27 int i, nr_assoc_doms = 0;
28 const __be16 *field = (const __be16 *) be_packed;
29 u16 last = 0;
30 bool is_32bit = false;
31
32#define VPHN_FIELD_UNUSED (0xffff)
33#define VPHN_FIELD_MSB (0x8000)
34#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
35
36 /* Let's fix the values returned by plpar_hcall9() */
37 for (i = 0; i < VPHN_REGISTER_COUNT; i++)
38 be_packed[i] = cpu_to_be64(packed[i]);
39
40 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
41 u16 new = be16_to_cpup(field++);
42
43 if (is_32bit) {
44 /* Let's concatenate the 16 bits of this field to the
45 * 15 lower bits of the previous field
46 */
47 unpacked[++nr_assoc_doms] =
48 cpu_to_be32(last << 16 | new);
49 is_32bit = false;
50 } else if (new == VPHN_FIELD_UNUSED)
51 /* This is the list terminator */
52 break;
53 else if (new & VPHN_FIELD_MSB) {
54 /* Data is in the lower 15 bits of this field */
55 unpacked[++nr_assoc_doms] =
56 cpu_to_be32(new & VPHN_FIELD_MASK);
57 } else {
58 /* Data is in the lower 15 bits of this field
59 * concatenated with the next 16 bit field
60 */
61 last = new;
62 is_32bit = true;
63 }
64 }
65
66 /* The first cell contains the length of the property */
67 unpacked[0] = cpu_to_be32(nr_assoc_doms);
68
69 return nr_assoc_doms;
70}
diff --git a/arch/powerpc/mm/vphn.h b/arch/powerpc/mm/vphn.h
new file mode 100644
index 000000000000..fe8b7805b78f
--- /dev/null
+++ b/arch/powerpc/mm/vphn.h
@@ -0,0 +1,16 @@
1#ifndef _ARCH_POWERPC_MM_VPHN_H_
2#define _ARCH_POWERPC_MM_VPHN_H_
3
4/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers.
5 */
6#define VPHN_REGISTER_COUNT 6
7
8/*
9 * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
10 * form the complete property we have to add the length in the first cell.
11 */
12#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
13
14extern int vphn_unpack_associativity(const long *packed, __be32 *unpacked);
15
16#endif
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 2396dda282cd..ead55351b254 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
243 sp = regs->gpr[1]; 243 sp = regs->gpr[1];
244 perf_callchain_store(entry, next_ip); 244 perf_callchain_store(entry, next_ip);
245 245
246 for (;;) { 246 while (entry->nr < PERF_MAX_STACK_DEPTH) {
247 fp = (unsigned long __user *) sp; 247 fp = (unsigned long __user *) sp;
248 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) 248 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
249 return; 249 return;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 7fd60dcb2cb0..12b638425bb9 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1837,8 +1837,10 @@ static int power_pmu_event_init(struct perf_event *event)
1837 cpuhw->bhrb_filter = ppmu->bhrb_filter_map( 1837 cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
1838 event->attr.branch_sample_type); 1838 event->attr.branch_sample_type);
1839 1839
1840 if(cpuhw->bhrb_filter == -1) 1840 if (cpuhw->bhrb_filter == -1) {
1841 put_cpu_var(cpu_hw_events);
1841 return -EOPNOTSUPP; 1842 return -EOPNOTSUPP;
1843 }
1842 } 1844 }
1843 1845
1844 put_cpu_var(cpu_hw_events); 1846 put_cpu_var(cpu_hw_events);
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index abeb9ec0d117..ec2eb20631d1 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -142,6 +142,15 @@ static struct attribute_group event_long_desc_group = {
142 142
143static struct kmem_cache *hv_page_cache; 143static struct kmem_cache *hv_page_cache;
144 144
145/*
146 * request_buffer and result_buffer are not required to be 4k aligned,
147 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
148 * the simplest way to ensure that.
149 */
150#define H24x7_DATA_BUFFER_SIZE 4096
151DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
152DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
153
145static char *event_name(struct hv_24x7_event_data *ev, int *len) 154static char *event_name(struct hv_24x7_event_data *ev, int *len)
146{ 155{
147 *len = be16_to_cpu(ev->event_name_len) - 2; 156 *len = be16_to_cpu(ev->event_name_len) - 2;
@@ -152,6 +161,7 @@ static char *event_desc(struct hv_24x7_event_data *ev, int *len)
152{ 161{
153 unsigned nl = be16_to_cpu(ev->event_name_len); 162 unsigned nl = be16_to_cpu(ev->event_name_len);
154 __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2); 163 __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
164
155 *len = be16_to_cpu(*desc_len) - 2; 165 *len = be16_to_cpu(*desc_len) - 2;
156 return (char *)ev->remainder + nl; 166 return (char *)ev->remainder + nl;
157} 167}
@@ -162,6 +172,7 @@ static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
162 __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2); 172 __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
163 unsigned desc_len = be16_to_cpu(*desc_len_); 173 unsigned desc_len = be16_to_cpu(*desc_len_);
164 __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2); 174 __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
175
165 *len = be16_to_cpu(*long_desc_len) - 2; 176 *len = be16_to_cpu(*long_desc_len) - 2;
166 return (char *)ev->remainder + nl + desc_len; 177 return (char *)ev->remainder + nl + desc_len;
167} 178}
@@ -239,14 +250,12 @@ static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
239 unsigned long index) 250 unsigned long index)
240{ 251{
241 pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)", 252 pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
242 phys_4096, 253 phys_4096, version, index);
243 version, 254
244 index);
245 WARN_ON(!IS_ALIGNED(phys_4096, 4096)); 255 WARN_ON(!IS_ALIGNED(phys_4096, 4096));
256
246 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE, 257 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
247 phys_4096, 258 phys_4096, version, index);
248 version,
249 index);
250} 259}
251 260
252static unsigned long h_get_24x7_catalog_page(char page[], 261static unsigned long h_get_24x7_catalog_page(char page[],
@@ -300,6 +309,7 @@ static ssize_t device_show_string(struct device *dev,
300 struct dev_ext_attribute *d; 309 struct dev_ext_attribute *d;
301 310
302 d = container_of(attr, struct dev_ext_attribute, attr); 311 d = container_of(attr, struct dev_ext_attribute, attr);
312
303 return sprintf(buf, "%s\n", (char *)d->var); 313 return sprintf(buf, "%s\n", (char *)d->var);
304} 314}
305 315
@@ -314,6 +324,7 @@ static struct attribute *device_str_attr_create_(char *name, char *str)
314 attr->attr.attr.name = name; 324 attr->attr.attr.name = name;
315 attr->attr.attr.mode = 0444; 325 attr->attr.attr.mode = 0444;
316 attr->attr.show = device_show_string; 326 attr->attr.show = device_show_string;
327
317 return &attr->attr.attr; 328 return &attr->attr.attr;
318} 329}
319 330
@@ -387,7 +398,6 @@ static struct attribute *event_to_attr(unsigned ix,
387 a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d", 398 a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d",
388 (int)event_name_len, ev_name, ev_suffix, nonce); 399 (int)event_name_len, ev_name, ev_suffix, nonce);
389 400
390
391 if (!a_ev_name) 401 if (!a_ev_name)
392 goto out_val; 402 goto out_val;
393 403
@@ -637,7 +647,7 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
637 647
638#define MAX_4K (SIZE_MAX / 4096) 648#define MAX_4K (SIZE_MAX / 4096)
639 649
640static void create_events_from_catalog(struct attribute ***events_, 650static int create_events_from_catalog(struct attribute ***events_,
641 struct attribute ***event_descs_, 651 struct attribute ***event_descs_,
642 struct attribute ***event_long_descs_) 652 struct attribute ***event_long_descs_)
643{ 653{
@@ -655,19 +665,25 @@ static void create_events_from_catalog(struct attribute ***events_,
655 void *event_data, *end; 665 void *event_data, *end;
656 struct hv_24x7_event_data *event; 666 struct hv_24x7_event_data *event;
657 struct rb_root ev_uniq = RB_ROOT; 667 struct rb_root ev_uniq = RB_ROOT;
668 int ret = 0;
658 669
659 if (!page) 670 if (!page) {
671 ret = -ENOMEM;
660 goto e_out; 672 goto e_out;
673 }
661 674
662 hret = h_get_24x7_catalog_page(page, 0, 0); 675 hret = h_get_24x7_catalog_page(page, 0, 0);
663 if (hret) 676 if (hret) {
677 ret = -EIO;
664 goto e_free; 678 goto e_free;
679 }
665 680
666 catalog_version_num = be64_to_cpu(page_0->version); 681 catalog_version_num = be64_to_cpu(page_0->version);
667 catalog_page_len = be32_to_cpu(page_0->length); 682 catalog_page_len = be32_to_cpu(page_0->length);
668 683
669 if (MAX_4K < catalog_page_len) { 684 if (MAX_4K < catalog_page_len) {
670 pr_err("invalid page count: %zu\n", catalog_page_len); 685 pr_err("invalid page count: %zu\n", catalog_page_len);
686 ret = -EIO;
671 goto e_free; 687 goto e_free;
672 } 688 }
673 689
@@ -686,6 +702,7 @@ static void create_events_from_catalog(struct attribute ***events_,
686 || (MAX_4K - event_data_offs < event_data_len)) { 702 || (MAX_4K - event_data_offs < event_data_len)) {
687 pr_err("invalid event data offs %zu and/or len %zu\n", 703 pr_err("invalid event data offs %zu and/or len %zu\n",
688 event_data_offs, event_data_len); 704 event_data_offs, event_data_len);
705 ret = -EIO;
689 goto e_free; 706 goto e_free;
690 } 707 }
691 708
@@ -694,12 +711,14 @@ static void create_events_from_catalog(struct attribute ***events_,
694 event_data_offs, 711 event_data_offs,
695 event_data_offs + event_data_len, 712 event_data_offs + event_data_len,
696 catalog_page_len); 713 catalog_page_len);
714 ret = -EIO;
697 goto e_free; 715 goto e_free;
698 } 716 }
699 717
700 if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) { 718 if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) {
701 pr_err("event_entry_count %zu is invalid\n", 719 pr_err("event_entry_count %zu is invalid\n",
702 event_entry_count); 720 event_entry_count);
721 ret = -EIO;
703 goto e_free; 722 goto e_free;
704 } 723 }
705 724
@@ -712,6 +731,7 @@ static void create_events_from_catalog(struct attribute ***events_,
712 event_data = vmalloc(event_data_bytes); 731 event_data = vmalloc(event_data_bytes);
713 if (!event_data) { 732 if (!event_data) {
714 pr_err("could not allocate event data\n"); 733 pr_err("could not allocate event data\n");
734 ret = -ENOMEM;
715 goto e_free; 735 goto e_free;
716 } 736 }
717 737
@@ -731,6 +751,7 @@ static void create_events_from_catalog(struct attribute ***events_,
731 if (hret) { 751 if (hret) {
732 pr_err("failed to get event data in page %zu\n", 752 pr_err("failed to get event data in page %zu\n",
733 i + event_data_offs); 753 i + event_data_offs);
754 ret = -EIO;
734 goto e_event_data; 755 goto e_event_data;
735 } 756 }
736 } 757 }
@@ -778,18 +799,24 @@ static void create_events_from_catalog(struct attribute ***events_,
778 event_idx_last, event_entry_count, junk_events); 799 event_idx_last, event_entry_count, junk_events);
779 800
780 events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL); 801 events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
781 if (!events) 802 if (!events) {
803 ret = -ENOMEM;
782 goto e_event_data; 804 goto e_event_data;
805 }
783 806
784 event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs), 807 event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
785 GFP_KERNEL); 808 GFP_KERNEL);
786 if (!event_descs) 809 if (!event_descs) {
810 ret = -ENOMEM;
787 goto e_event_attrs; 811 goto e_event_attrs;
812 }
788 813
789 event_long_descs = kmalloc_array(event_idx + 1, 814 event_long_descs = kmalloc_array(event_idx + 1,
790 sizeof(*event_long_descs), GFP_KERNEL); 815 sizeof(*event_long_descs), GFP_KERNEL);
791 if (!event_long_descs) 816 if (!event_long_descs) {
817 ret = -ENOMEM;
792 goto e_event_descs; 818 goto e_event_descs;
819 }
793 820
794 /* Iterate over the catalog filling in the attribute vector */ 821 /* Iterate over the catalog filling in the attribute vector */
795 for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0, 822 for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
@@ -843,7 +870,7 @@ static void create_events_from_catalog(struct attribute ***events_,
843 *events_ = events; 870 *events_ = events;
844 *event_descs_ = event_descs; 871 *event_descs_ = event_descs;
845 *event_long_descs_ = event_long_descs; 872 *event_long_descs_ = event_long_descs;
846 return; 873 return 0;
847 874
848e_event_descs: 875e_event_descs:
849 kfree(event_descs); 876 kfree(event_descs);
@@ -857,6 +884,7 @@ e_out:
857 *events_ = NULL; 884 *events_ = NULL;
858 *event_descs_ = NULL; 885 *event_descs_ = NULL;
859 *event_long_descs_ = NULL; 886 *event_long_descs_ = NULL;
887 return ret;
860} 888}
861 889
862static ssize_t catalog_read(struct file *filp, struct kobject *kobj, 890static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
@@ -872,6 +900,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
872 uint64_t catalog_version_num = 0; 900 uint64_t catalog_version_num = 0;
873 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); 901 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
874 struct hv_24x7_catalog_page_0 *page_0 = page; 902 struct hv_24x7_catalog_page_0 *page_0 = page;
903
875 if (!page) 904 if (!page)
876 return -ENOMEM; 905 return -ENOMEM;
877 906
@@ -976,31 +1005,104 @@ static const struct attribute_group *attr_groups[] = {
976 NULL, 1005 NULL,
977}; 1006};
978 1007
979DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096); 1008static void log_24x7_hcall(struct hv_24x7_request_buffer *request_buffer,
980DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096); 1009 struct hv_24x7_data_result_buffer *result_buffer,
1010 unsigned long ret)
1011{
1012 struct hv_24x7_request *req;
1013
1014 req = &request_buffer->requests[0];
1015 pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => "
1016 "ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
1017 req->performance_domain, req->data_offset,
1018 req->starting_ix, req->starting_lpar_ix, ret, ret,
1019 result_buffer->detailed_rc,
1020 result_buffer->failing_request_ix);
1021}
1022
1023/*
1024 * Start the process for a new H_GET_24x7_DATA hcall.
1025 */
1026static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
1027 struct hv_24x7_data_result_buffer *result_buffer)
1028{
1029
1030 memset(request_buffer, 0, 4096);
1031 memset(result_buffer, 0, 4096);
1032
1033 request_buffer->interface_version = HV_24X7_IF_VERSION_CURRENT;
1034 /* memset above set request_buffer->num_requests to 0 */
1035}
981 1036
982static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix, 1037/*
983 u16 lpar, u64 *res, 1038 * Commit (i.e perform) the H_GET_24x7_DATA hcall using the data collected
984 bool success_expected) 1039 * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
1040 */
1041static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
1042 struct hv_24x7_data_result_buffer *result_buffer)
985{ 1043{
986 unsigned long ret; 1044 unsigned long ret;
987 1045
988 /* 1046 /*
989 * request_buffer and result_buffer are not required to be 4k aligned, 1047 * NOTE: Due to variable number of array elements in request and
990 * but are not allowed to cross any 4k boundary. Aligning them to 4k is 1048 * result buffer(s), sizeof() is not reliable. Use the actual
991 * the simplest way to ensure that. 1049 * allocated buffer size, H24x7_DATA_BUFFER_SIZE.
992 */ 1050 */
993 struct reqb { 1051 ret = plpar_hcall_norets(H_GET_24X7_DATA,
994 struct hv_24x7_request_buffer buf; 1052 virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
995 struct hv_24x7_request req; 1053 virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE);
996 } __packed *request_buffer; 1054
997 1055 if (ret)
998 struct { 1056 log_24x7_hcall(request_buffer, result_buffer, ret);
999 struct hv_24x7_data_result_buffer buf; 1057
1000 struct hv_24x7_result res; 1058 return ret;
1001 struct hv_24x7_result_element elem; 1059}
1002 __be64 result; 1060
1003 } __packed *result_buffer; 1061/*
1062 * Add the given @event to the next slot in the 24x7 request_buffer.
1063 *
1064 * Note that H_GET_24X7_DATA hcall allows reading several counters'
1065 * values in a single HCALL. We expect the caller to add events to the
1066 * request buffer one by one, make the HCALL and process the results.
1067 */
1068static int add_event_to_24x7_request(struct perf_event *event,
1069 struct hv_24x7_request_buffer *request_buffer)
1070{
1071 u16 idx;
1072 int i;
1073 struct hv_24x7_request *req;
1074
1075 if (request_buffer->num_requests > 254) {
1076 pr_devel("Too many requests for 24x7 HCALL %d\n",
1077 request_buffer->num_requests);
1078 return -EINVAL;
1079 }
1080
1081 if (is_physical_domain(event_get_domain(event)))
1082 idx = event_get_core(event);
1083 else
1084 idx = event_get_vcpu(event);
1085
1086 i = request_buffer->num_requests++;
1087 req = &request_buffer->requests[i];
1088
1089 req->performance_domain = event_get_domain(event);
1090 req->data_size = cpu_to_be16(8);
1091 req->data_offset = cpu_to_be32(event_get_offset(event));
1092 req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)),
1093 req->max_num_lpars = cpu_to_be16(1);
1094 req->starting_ix = cpu_to_be16(idx);
1095 req->max_ix = cpu_to_be16(1);
1096
1097 return 0;
1098}
1099
1100static unsigned long single_24x7_request(struct perf_event *event, u64 *count)
1101{
1102 unsigned long ret;
1103 struct hv_24x7_request_buffer *request_buffer;
1104 struct hv_24x7_data_result_buffer *result_buffer;
1105 struct hv_24x7_result *resb;
1004 1106
1005 BUILD_BUG_ON(sizeof(*request_buffer) > 4096); 1107 BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
1006 BUILD_BUG_ON(sizeof(*result_buffer) > 4096); 1108 BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
@@ -1008,63 +1110,28 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
1008 request_buffer = (void *)get_cpu_var(hv_24x7_reqb); 1110 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1009 result_buffer = (void *)get_cpu_var(hv_24x7_resb); 1111 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1010 1112
1011 memset(request_buffer, 0, 4096); 1113 init_24x7_request(request_buffer, result_buffer);
1012 memset(result_buffer, 0, 4096);
1013
1014 *request_buffer = (struct reqb) {
1015 .buf = {
1016 .interface_version = HV_24X7_IF_VERSION_CURRENT,
1017 .num_requests = 1,
1018 },
1019 .req = {
1020 .performance_domain = domain,
1021 .data_size = cpu_to_be16(8),
1022 .data_offset = cpu_to_be32(offset),
1023 .starting_lpar_ix = cpu_to_be16(lpar),
1024 .max_num_lpars = cpu_to_be16(1),
1025 .starting_ix = cpu_to_be16(ix),
1026 .max_ix = cpu_to_be16(1),
1027 }
1028 };
1029 1114
1030 ret = plpar_hcall_norets(H_GET_24X7_DATA, 1115 ret = add_event_to_24x7_request(event, request_buffer);
1031 virt_to_phys(request_buffer), sizeof(*request_buffer), 1116 if (ret)
1032 virt_to_phys(result_buffer), sizeof(*result_buffer)); 1117 goto out;
1033 1118
1119 ret = make_24x7_request(request_buffer, result_buffer);
1034 if (ret) { 1120 if (ret) {
1035 if (success_expected) 1121 log_24x7_hcall(request_buffer, result_buffer, ret);
1036 pr_err_ratelimited("hcall failed: %d %#x %#x %d => "
1037 "0x%lx (%ld) detail=0x%x failing ix=%x\n",
1038 domain, offset, ix, lpar, ret, ret,
1039 result_buffer->buf.detailed_rc,
1040 result_buffer->buf.failing_request_ix);
1041 goto out; 1122 goto out;
1042 } 1123 }
1043 1124
1044 *res = be64_to_cpu(result_buffer->result); 1125 /* process result from hcall */
1126 resb = &result_buffer->results[0];
1127 *count = be64_to_cpu(resb->elements[0].element_data[0]);
1045 1128
1046out: 1129out:
1130 put_cpu_var(hv_24x7_reqb);
1131 put_cpu_var(hv_24x7_resb);
1047 return ret; 1132 return ret;
1048} 1133}
1049 1134
1050static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
1051 bool success_expected)
1052{
1053 u16 idx;
1054 unsigned domain = event_get_domain(event);
1055
1056 if (is_physical_domain(domain))
1057 idx = event_get_core(event);
1058 else
1059 idx = event_get_vcpu(event);
1060
1061 return single_24x7_request(event_get_domain(event),
1062 event_get_offset(event),
1063 idx,
1064 event_get_lpar(event),
1065 res,
1066 success_expected);
1067}
1068 1135
1069static int h_24x7_event_init(struct perf_event *event) 1136static int h_24x7_event_init(struct perf_event *event)
1070{ 1137{
@@ -1133,7 +1200,7 @@ static int h_24x7_event_init(struct perf_event *event)
1133 } 1200 }
1134 1201
1135 /* see if the event complains */ 1202 /* see if the event complains */
1136 if (event_24x7_request(event, &ct, false)) { 1203 if (single_24x7_request(event, &ct)) {
1137 pr_devel("test hcall failed\n"); 1204 pr_devel("test hcall failed\n");
1138 return -EIO; 1205 return -EIO;
1139 } 1206 }
@@ -1145,7 +1212,7 @@ static u64 h_24x7_get_value(struct perf_event *event)
1145{ 1212{
1146 unsigned long ret; 1213 unsigned long ret;
1147 u64 ct; 1214 u64 ct;
1148 ret = event_24x7_request(event, &ct, true); 1215 ret = single_24x7_request(event, &ct);
1149 if (ret) 1216 if (ret)
1150 /* We checked this in event init, shouldn't fail here... */ 1217 /* We checked this in event init, shouldn't fail here... */
1151 return 0; 1218 return 0;
@@ -1153,15 +1220,22 @@ static u64 h_24x7_get_value(struct perf_event *event)
1153 return ct; 1220 return ct;
1154} 1221}
1155 1222
1156static void h_24x7_event_update(struct perf_event *event) 1223static void update_event_count(struct perf_event *event, u64 now)
1157{ 1224{
1158 s64 prev; 1225 s64 prev;
1159 u64 now; 1226
1160 now = h_24x7_get_value(event);
1161 prev = local64_xchg(&event->hw.prev_count, now); 1227 prev = local64_xchg(&event->hw.prev_count, now);
1162 local64_add(now - prev, &event->count); 1228 local64_add(now - prev, &event->count);
1163} 1229}
1164 1230
1231static void h_24x7_event_read(struct perf_event *event)
1232{
1233 u64 now;
1234
1235 now = h_24x7_get_value(event);
1236 update_event_count(event, now);
1237}
1238
1165static void h_24x7_event_start(struct perf_event *event, int flags) 1239static void h_24x7_event_start(struct perf_event *event, int flags)
1166{ 1240{
1167 if (flags & PERF_EF_RELOAD) 1241 if (flags & PERF_EF_RELOAD)
@@ -1170,7 +1244,7 @@ static void h_24x7_event_start(struct perf_event *event, int flags)
1170 1244
1171static void h_24x7_event_stop(struct perf_event *event, int flags) 1245static void h_24x7_event_stop(struct perf_event *event, int flags)
1172{ 1246{
1173 h_24x7_event_update(event); 1247 h_24x7_event_read(event);
1174} 1248}
1175 1249
1176static int h_24x7_event_add(struct perf_event *event, int flags) 1250static int h_24x7_event_add(struct perf_event *event, int flags)
@@ -1191,7 +1265,7 @@ static struct pmu h_24x7_pmu = {
1191 .del = h_24x7_event_stop, 1265 .del = h_24x7_event_stop,
1192 .start = h_24x7_event_start, 1266 .start = h_24x7_event_start,
1193 .stop = h_24x7_event_stop, 1267 .stop = h_24x7_event_stop,
1194 .read = h_24x7_event_update, 1268 .read = h_24x7_event_read,
1195}; 1269};
1196 1270
1197static int hv_24x7_init(void) 1271static int hv_24x7_init(void)
@@ -1219,10 +1293,13 @@ static int hv_24x7_init(void)
1219 /* sampling not supported */ 1293 /* sampling not supported */
1220 h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 1294 h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1221 1295
1222 create_events_from_catalog(&event_group.attrs, 1296 r = create_events_from_catalog(&event_group.attrs,
1223 &event_desc_group.attrs, 1297 &event_desc_group.attrs,
1224 &event_long_desc_group.attrs); 1298 &event_long_desc_group.attrs);
1225 1299
1300 if (r)
1301 return r;
1302
1226 r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1); 1303 r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
1227 if (r) 1304 if (r)
1228 return r; 1305 return r;
diff --git a/arch/powerpc/perf/hv-24x7.h b/arch/powerpc/perf/hv-24x7.h
index 69cd4e690f58..0f9fa21a29f2 100644
--- a/arch/powerpc/perf/hv-24x7.h
+++ b/arch/powerpc/perf/hv-24x7.h
@@ -50,7 +50,7 @@ struct hv_24x7_request_buffer {
50 __u8 interface_version; 50 __u8 interface_version;
51 __u8 num_requests; 51 __u8 num_requests;
52 __u8 reserved[0xE]; 52 __u8 reserved[0xE];
53 struct hv_24x7_request requests[]; 53 struct hv_24x7_request requests[1];
54} __packed; 54} __packed;
55 55
56struct hv_24x7_result_element { 56struct hv_24x7_result_element {
@@ -66,7 +66,7 @@ struct hv_24x7_result_element {
66 __be32 lpar_cfg_instance_id; 66 __be32 lpar_cfg_instance_id;
67 67
68 /* size = @result_element_data_size of cointaining result. */ 68 /* size = @result_element_data_size of cointaining result. */
69 __u8 element_data[]; 69 __u64 element_data[1];
70} __packed; 70} __packed;
71 71
72struct hv_24x7_result { 72struct hv_24x7_result {
@@ -87,7 +87,7 @@ struct hv_24x7_result {
87 /* WARNING: only valid for first result element due to variable sizes 87 /* WARNING: only valid for first result element due to variable sizes
88 * of result elements */ 88 * of result elements */
89 /* struct hv_24x7_result_element[@num_elements_returned] */ 89 /* struct hv_24x7_result_element[@num_elements_returned] */
90 struct hv_24x7_result_element elements[]; 90 struct hv_24x7_result_element elements[1];
91} __packed; 91} __packed;
92 92
93struct hv_24x7_data_result_buffer { 93struct hv_24x7_data_result_buffer {
@@ -103,7 +103,7 @@ struct hv_24x7_data_result_buffer {
103 __u8 reserved2[0x8]; 103 __u8 reserved2[0x8];
104 /* WARNING: only valid for the first result due to variable sizes of 104 /* WARNING: only valid for the first result due to variable sizes of
105 * results */ 105 * results */
106 struct hv_24x7_result results[]; /* [@num_results] */ 106 struct hv_24x7_result results[1]; /* [@num_results] */
107} __packed; 107} __packed;
108 108
109#endif 109#endif
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 4a9ad871a168..7bfb9b184dd4 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -40,6 +40,7 @@ static const struct of_device_id mpc85xx_common_ids[] __initconst = {
40 { .compatible = "fsl,qoriq-pcie-v2.4", }, 40 { .compatible = "fsl,qoriq-pcie-v2.4", },
41 { .compatible = "fsl,qoriq-pcie-v2.3", }, 41 { .compatible = "fsl,qoriq-pcie-v2.3", },
42 { .compatible = "fsl,qoriq-pcie-v2.2", }, 42 { .compatible = "fsl,qoriq-pcie-v2.2", },
43 { .compatible = "fsl,fman", },
43 {}, 44 {},
44}; 45};
45 46
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 1f309ccb096e..9824d2cf79bd 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -88,6 +88,15 @@ static const struct of_device_id of_device_ids[] = {
88 .compatible = "simple-bus" 88 .compatible = "simple-bus"
89 }, 89 },
90 { 90 {
91 .compatible = "mdio-mux-gpio"
92 },
93 {
94 .compatible = "fsl,fpga-ngpixis"
95 },
96 {
97 .compatible = "fsl,fpga-qixis"
98 },
99 {
91 .compatible = "fsl,srio", 100 .compatible = "fsl,srio",
92 }, 101 },
93 { 102 {
@@ -108,6 +117,9 @@ static const struct of_device_id of_device_ids[] = {
108 { 117 {
109 .compatible = "fsl,qe", 118 .compatible = "fsl,qe",
110 }, 119 },
120 {
121 .compatible = "fsl,fman",
122 },
111 /* The following two are for the Freescale hypervisor */ 123 /* The following two are for the Freescale hypervisor */
112 { 124 {
113 .name = "hypervisor", 125 .name = "hypervisor",
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index d7c1e69f3070..8631ac5f0e57 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -360,10 +360,10 @@ static void mpc85xx_smp_kexec_down(void *arg)
360static void map_and_flush(unsigned long paddr) 360static void map_and_flush(unsigned long paddr)
361{ 361{
362 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); 362 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
363 unsigned long kaddr = (unsigned long)kmap(page); 363 unsigned long kaddr = (unsigned long)kmap_atomic(page);
364 364
365 flush_dcache_range(kaddr, kaddr + PAGE_SIZE); 365 flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
366 kunmap(page); 366 kunmap_atomic((void *)kaddr);
367} 367}
368 368
369/** 369/**
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 391b3f6b54a3..b7f9c408bf24 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -72,11 +72,6 @@ config PPC_SMP_MUXED_IPI
72 cpu. This will enable the generic code to multiplex the 4 72 cpu. This will enable the generic code to multiplex the 4
73 messages on to one ipi. 73 messages on to one ipi.
74 74
75config PPC_UDBG_BEAT
76 bool "BEAT based debug console"
77 depends on PPC_CELLEB
78 default n
79
80config IPIC 75config IPIC
81 bool 76 bool
82 default n 77 default n
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 76483e3acd60..7264e91190be 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -2,6 +2,7 @@ config PPC64
2 bool "64-bit kernel" 2 bool "64-bit kernel"
3 default n 3 default n
4 select HAVE_VIRT_CPU_ACCOUNTING 4 select HAVE_VIRT_CPU_ACCOUNTING
5 select ZLIB_DEFLATE
5 help 6 help
6 This option selects whether a 32-bit or a 64-bit kernel 7 This option selects whether a 32-bit or a 64-bit kernel
7 will be built. 8 will be built.
@@ -15,7 +16,7 @@ choice
15 The most common ones are the desktop and server CPUs (601, 603, 16 The most common ones are the desktop and server CPUs (601, 603,
16 604, 740, 750, 74xx) CPUs from Freescale and IBM, with their 17 604, 740, 750, 74xx) CPUs from Freescale and IBM, with their
17 embedded 512x/52xx/82xx/83xx/86xx counterparts. 18 embedded 512x/52xx/82xx/83xx/86xx counterparts.
18 The other embeeded parts, namely 4xx, 8xx, e200 (55xx) and e500 19 The other embedded parts, namely 4xx, 8xx, e200 (55xx) and e500
19 (85xx) each form a family of their own that is not compatible 20 (85xx) each form a family of their own that is not compatible
20 with the others. 21 with the others.
21 22
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 870b6dbd4d18..2f23133ab3d1 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -33,17 +33,6 @@ config PPC_IBM_CELL_BLADE
33 select PPC_UDBG_16550 33 select PPC_UDBG_16550
34 select UDBG_RTAS_CONSOLE 34 select UDBG_RTAS_CONSOLE
35 35
36config PPC_CELLEB
37 bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
38 depends on PPC64 && PPC_BOOK3S
39 select PPC_CELL_NATIVE
40 select PPC_OF_PLATFORM_PCI
41 select PCI
42 select HAS_TXX9_SERIAL
43 select PPC_UDBG_BEAT
44 select USB_OHCI_BIG_ENDIAN_MMIO
45 select USB_EHCI_BIG_ENDIAN_MMIO
46
47config PPC_CELL_QPACE 36config PPC_CELL_QPACE
48 bool "IBM Cell - QPACE" 37 bool "IBM Cell - QPACE"
49 depends on PPC64 && PPC_BOOK3S 38 depends on PPC64 && PPC_BOOK3S
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 2d16884f67b9..34699bddfddd 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -29,18 +29,3 @@ obj-$(CONFIG_AXON_MSI) += axon_msi.o
29 29
30# qpace setup 30# qpace setup
31obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o 31obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o
32
33# celleb stuff
34ifeq ($(CONFIG_PPC_CELLEB),y)
35obj-y += celleb_setup.o \
36 celleb_pci.o celleb_scc_epci.o \
37 celleb_scc_pciex.o \
38 celleb_scc_uhc.o \
39 spider-pci.o beat.o beat_htab.o \
40 beat_hvCall.o beat_interrupt.o \
41 beat_iommu.o
42
43obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o
44obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o
45obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o
46endif
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
deleted file mode 100644
index affcf566d460..000000000000
--- a/arch/powerpc/platforms/cell/beat.c
+++ /dev/null
@@ -1,264 +0,0 @@
1/*
2 * Simple routines for Celleb/Beat
3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <linux/export.h>
22#include <linux/init.h>
23#include <linux/err.h>
24#include <linux/rtc.h>
25#include <linux/interrupt.h>
26#include <linux/irqreturn.h>
27#include <linux/reboot.h>
28
29#include <asm/hvconsole.h>
30#include <asm/time.h>
31#include <asm/machdep.h>
32#include <asm/firmware.h>
33
34#include "beat_wrapper.h"
35#include "beat.h"
36#include "beat_interrupt.h"
37
38static int beat_pm_poweroff_flag;
39
40void beat_restart(char *cmd)
41{
42 beat_shutdown_logical_partition(!beat_pm_poweroff_flag);
43}
44
45void beat_power_off(void)
46{
47 beat_shutdown_logical_partition(0);
48}
49
50u64 beat_halt_code = 0x1000000000000000UL;
51EXPORT_SYMBOL(beat_halt_code);
52
53void beat_halt(void)
54{
55 beat_shutdown_logical_partition(beat_halt_code);
56}
57
58int beat_set_rtc_time(struct rtc_time *rtc_time)
59{
60 u64 tim;
61 tim = mktime(rtc_time->tm_year+1900,
62 rtc_time->tm_mon+1, rtc_time->tm_mday,
63 rtc_time->tm_hour, rtc_time->tm_min, rtc_time->tm_sec);
64 if (beat_rtc_write(tim))
65 return -1;
66 return 0;
67}
68
69void beat_get_rtc_time(struct rtc_time *rtc_time)
70{
71 u64 tim;
72
73 if (beat_rtc_read(&tim))
74 tim = 0;
75 to_tm(tim, rtc_time);
76 rtc_time->tm_year -= 1900;
77 rtc_time->tm_mon -= 1;
78}
79
80#define BEAT_NVRAM_SIZE 4096
81
82ssize_t beat_nvram_read(char *buf, size_t count, loff_t *index)
83{
84 unsigned int i;
85 unsigned long len;
86 char *p = buf;
87
88 if (*index >= BEAT_NVRAM_SIZE)
89 return -ENODEV;
90 i = *index;
91 if (i + count > BEAT_NVRAM_SIZE)
92 count = BEAT_NVRAM_SIZE - i;
93
94 for (; count != 0; count -= len) {
95 len = count;
96 if (len > BEAT_NVRW_CNT)
97 len = BEAT_NVRW_CNT;
98 if (beat_eeprom_read(i, len, p))
99 return -EIO;
100
101 p += len;
102 i += len;
103 }
104 *index = i;
105 return p - buf;
106}
107
108ssize_t beat_nvram_write(char *buf, size_t count, loff_t *index)
109{
110 unsigned int i;
111 unsigned long len;
112 char *p = buf;
113
114 if (*index >= BEAT_NVRAM_SIZE)
115 return -ENODEV;
116 i = *index;
117 if (i + count > BEAT_NVRAM_SIZE)
118 count = BEAT_NVRAM_SIZE - i;
119
120 for (; count != 0; count -= len) {
121 len = count;
122 if (len > BEAT_NVRW_CNT)
123 len = BEAT_NVRW_CNT;
124 if (beat_eeprom_write(i, len, p))
125 return -EIO;
126
127 p += len;
128 i += len;
129 }
130 *index = i;
131 return p - buf;
132}
133
134ssize_t beat_nvram_get_size(void)
135{
136 return BEAT_NVRAM_SIZE;
137}
138
139int beat_set_xdabr(unsigned long dabr, unsigned long dabrx)
140{
141 if (beat_set_dabr(dabr, dabrx))
142 return -1;
143 return 0;
144}
145
146int64_t beat_get_term_char(u64 vterm, u64 *len, u64 *t1, u64 *t2)
147{
148 u64 db[2];
149 s64 ret;
150
151 ret = beat_get_characters_from_console(vterm, len, (u8 *)db);
152 if (ret == 0) {
153 *t1 = db[0];
154 *t2 = db[1];
155 }
156 return ret;
157}
158EXPORT_SYMBOL(beat_get_term_char);
159
160int64_t beat_put_term_char(u64 vterm, u64 len, u64 t1, u64 t2)
161{
162 u64 db[2];
163
164 db[0] = t1;
165 db[1] = t2;
166 return beat_put_characters_to_console(vterm, len, (u8 *)db);
167}
168EXPORT_SYMBOL(beat_put_term_char);
169
170void beat_power_save(void)
171{
172 beat_pause(0);
173}
174
175#ifdef CONFIG_KEXEC
176void beat_kexec_cpu_down(int crash, int secondary)
177{
178 beatic_deinit_IRQ();
179}
180#endif
181
182static irqreturn_t beat_power_event(int virq, void *arg)
183{
184 printk(KERN_DEBUG "Beat: power button pressed\n");
185 beat_pm_poweroff_flag = 1;
186 ctrl_alt_del();
187 return IRQ_HANDLED;
188}
189
190static irqreturn_t beat_reset_event(int virq, void *arg)
191{
192 printk(KERN_DEBUG "Beat: reset button pressed\n");
193 beat_pm_poweroff_flag = 0;
194 ctrl_alt_del();
195 return IRQ_HANDLED;
196}
197
198static struct beat_event_list {
199 const char *typecode;
200 irq_handler_t handler;
201 unsigned int virq;
202} beat_event_list[] = {
203 { "power", beat_power_event, 0 },
204 { "reset", beat_reset_event, 0 },
205};
206
207static int __init beat_register_event(void)
208{
209 u64 path[4], data[2];
210 int rc, i;
211 unsigned int virq;
212
213 for (i = 0; i < ARRAY_SIZE(beat_event_list); i++) {
214 struct beat_event_list *ev = &beat_event_list[i];
215
216 if (beat_construct_event_receive_port(data) != 0) {
217 printk(KERN_ERR "Beat: "
218 "cannot construct event receive port for %s\n",
219 ev->typecode);
220 return -EINVAL;
221 }
222
223 virq = irq_create_mapping(NULL, data[0]);
224 if (virq == NO_IRQ) {
225 printk(KERN_ERR "Beat: failed to get virtual IRQ"
226 " for event receive port for %s\n",
227 ev->typecode);
228 beat_destruct_event_receive_port(data[0]);
229 return -EIO;
230 }
231 ev->virq = virq;
232
233 rc = request_irq(virq, ev->handler, 0,
234 ev->typecode, NULL);
235 if (rc != 0) {
236 printk(KERN_ERR "Beat: failed to request virtual IRQ"
237 " for event receive port for %s\n",
238 ev->typecode);
239 beat_destruct_event_receive_port(data[0]);
240 return rc;
241 }
242
243 path[0] = 0x1000000065780000ul; /* 1,ex */
244 path[1] = 0x627574746f6e0000ul; /* button */
245 path[2] = 0;
246 strncpy((char *)&path[2], ev->typecode, 8);
247 path[3] = 0;
248 data[1] = 0;
249
250 beat_create_repository_node(path, data);
251 }
252 return 0;
253}
254
255static int __init beat_event_init(void)
256{
257 if (!firmware_has_feature(FW_FEATURE_BEAT))
258 return -EINVAL;
259
260 beat_pm_poweroff_flag = 0;
261 return beat_register_event();
262}
263
264device_initcall(beat_event_init);
diff --git a/arch/powerpc/platforms/cell/beat.h b/arch/powerpc/platforms/cell/beat.h
deleted file mode 100644
index bfcb8e351ae5..000000000000
--- a/arch/powerpc/platforms/cell/beat.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Guest OS Interfaces.
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#ifndef _CELLEB_BEAT_H
22#define _CELLEB_BEAT_H
23
24int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *);
25int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t);
26int64_t beat_repository_encode(int, const char *, uint64_t[4]);
27void beat_restart(char *);
28void beat_power_off(void);
29void beat_halt(void);
30int beat_set_rtc_time(struct rtc_time *);
31void beat_get_rtc_time(struct rtc_time *);
32ssize_t beat_nvram_get_size(void);
33ssize_t beat_nvram_read(char *, size_t, loff_t *);
34ssize_t beat_nvram_write(char *, size_t, loff_t *);
35int beat_set_xdabr(unsigned long, unsigned long);
36void beat_power_save(void);
37void beat_kexec_cpu_down(int, int);
38
39#endif /* _CELLEB_BEAT_H */
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
deleted file mode 100644
index bee9232fe619..000000000000
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ /dev/null
@@ -1,445 +0,0 @@
1/*
2 * "Cell Reference Set" HTAB support.
3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 *
6 * This code is based on arch/powerpc/platforms/pseries/lpar.c:
7 * Copyright (C) 2001 Todd Inglett, IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24#undef DEBUG_LOW
25
26#include <linux/kernel.h>
27#include <linux/spinlock.h>
28
29#include <asm/mmu.h>
30#include <asm/page.h>
31#include <asm/pgtable.h>
32#include <asm/machdep.h>
33#include <asm/udbg.h>
34
35#include "beat_wrapper.h"
36
37#ifdef DEBUG_LOW
38#define DBG_LOW(fmt...) do { udbg_printf(fmt); } while (0)
39#else
40#define DBG_LOW(fmt...) do { } while (0)
41#endif
42
43static DEFINE_RAW_SPINLOCK(beat_htab_lock);
44
45static inline unsigned int beat_read_mask(unsigned hpte_group)
46{
47 unsigned long rmask = 0;
48 u64 hpte_v[5];
49
50 beat_read_htab_entries(0, hpte_group + 0, hpte_v);
51 if (!(hpte_v[0] & HPTE_V_BOLTED))
52 rmask |= 0x8000;
53 if (!(hpte_v[1] & HPTE_V_BOLTED))
54 rmask |= 0x4000;
55 if (!(hpte_v[2] & HPTE_V_BOLTED))
56 rmask |= 0x2000;
57 if (!(hpte_v[3] & HPTE_V_BOLTED))
58 rmask |= 0x1000;
59 beat_read_htab_entries(0, hpte_group + 4, hpte_v);
60 if (!(hpte_v[0] & HPTE_V_BOLTED))
61 rmask |= 0x0800;
62 if (!(hpte_v[1] & HPTE_V_BOLTED))
63 rmask |= 0x0400;
64 if (!(hpte_v[2] & HPTE_V_BOLTED))
65 rmask |= 0x0200;
66 if (!(hpte_v[3] & HPTE_V_BOLTED))
67 rmask |= 0x0100;
68 hpte_group = ~hpte_group & (htab_hash_mask * HPTES_PER_GROUP);
69 beat_read_htab_entries(0, hpte_group + 0, hpte_v);
70 if (!(hpte_v[0] & HPTE_V_BOLTED))
71 rmask |= 0x80;
72 if (!(hpte_v[1] & HPTE_V_BOLTED))
73 rmask |= 0x40;
74 if (!(hpte_v[2] & HPTE_V_BOLTED))
75 rmask |= 0x20;
76 if (!(hpte_v[3] & HPTE_V_BOLTED))
77 rmask |= 0x10;
78 beat_read_htab_entries(0, hpte_group + 4, hpte_v);
79 if (!(hpte_v[0] & HPTE_V_BOLTED))
80 rmask |= 0x08;
81 if (!(hpte_v[1] & HPTE_V_BOLTED))
82 rmask |= 0x04;
83 if (!(hpte_v[2] & HPTE_V_BOLTED))
84 rmask |= 0x02;
85 if (!(hpte_v[3] & HPTE_V_BOLTED))
86 rmask |= 0x01;
87 return rmask;
88}
89
90static long beat_lpar_hpte_insert(unsigned long hpte_group,
91 unsigned long vpn, unsigned long pa,
92 unsigned long rflags, unsigned long vflags,
93 int psize, int apsize, int ssize)
94{
95 unsigned long lpar_rc;
96 u64 hpte_v, hpte_r, slot;
97
98 if (vflags & HPTE_V_SECONDARY)
99 return -1;
100
101 if (!(vflags & HPTE_V_BOLTED))
102 DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
103 "rflags=%lx, vflags=%lx, psize=%d)\n",
104 hpte_group, va, pa, rflags, vflags, psize);
105
106 hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
107 vflags | HPTE_V_VALID;
108 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
109
110 if (!(vflags & HPTE_V_BOLTED))
111 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
112
113 if (rflags & _PAGE_NO_CACHE)
114 hpte_r &= ~HPTE_R_M;
115
116 raw_spin_lock(&beat_htab_lock);
117 lpar_rc = beat_read_mask(hpte_group);
118 if (lpar_rc == 0) {
119 if (!(vflags & HPTE_V_BOLTED))
120 DBG_LOW(" full\n");
121 raw_spin_unlock(&beat_htab_lock);
122 return -1;
123 }
124
125 lpar_rc = beat_insert_htab_entry(0, hpte_group, lpar_rc << 48,
126 hpte_v, hpte_r, &slot);
127 raw_spin_unlock(&beat_htab_lock);
128
129 /*
130 * Since we try and ioremap PHBs we don't own, the pte insert
131 * will fail. However we must catch the failure in hash_page
132 * or we will loop forever, so return -2 in this case.
133 */
134 if (unlikely(lpar_rc != 0)) {
135 if (!(vflags & HPTE_V_BOLTED))
136 DBG_LOW(" lpar err %lx\n", lpar_rc);
137 return -2;
138 }
139 if (!(vflags & HPTE_V_BOLTED))
140 DBG_LOW(" -> slot: %lx\n", slot);
141
142 /* We have to pass down the secondary bucket bit here as well */
143 return (slot ^ hpte_group) & 15;
144}
145
146static long beat_lpar_hpte_remove(unsigned long hpte_group)
147{
148 DBG_LOW("hpte_remove(group=%lx)\n", hpte_group);
149 return -1;
150}
151
152static unsigned long beat_lpar_hpte_getword0(unsigned long slot)
153{
154 unsigned long dword0;
155 unsigned long lpar_rc;
156 u64 dword[5];
157
158 lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword);
159
160 dword0 = dword[slot&3];
161
162 BUG_ON(lpar_rc != 0);
163
164 return dword0;
165}
166
167static void beat_lpar_hptab_clear(void)
168{
169 unsigned long size_bytes = 1UL << ppc64_pft_size;
170 unsigned long hpte_count = size_bytes >> 4;
171 int i;
172 u64 dummy0, dummy1;
173
174 /* TODO: Use bulk call */
175 for (i = 0; i < hpte_count; i++)
176 beat_write_htab_entry(0, i, 0, 0, -1UL, -1UL, &dummy0, &dummy1);
177}
178
179/*
180 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
181 * the low 3 bits of flags happen to line up. So no transform is needed.
182 * We can probably optimize here and assume the high bits of newpp are
183 * already zero. For now I am paranoid.
184 */
185static long beat_lpar_hpte_updatepp(unsigned long slot,
186 unsigned long newpp,
187 unsigned long vpn,
188 int psize, int apsize,
189 int ssize, unsigned long flags)
190{
191 unsigned long lpar_rc;
192 u64 dummy0, dummy1;
193 unsigned long want_v;
194
195 want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
196
197 DBG_LOW(" update: "
198 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
199 want_v & HPTE_V_AVPN, slot, psize, newpp);
200
201 raw_spin_lock(&beat_htab_lock);
202 dummy0 = beat_lpar_hpte_getword0(slot);
203 if ((dummy0 & ~0x7FUL) != (want_v & ~0x7FUL)) {
204 DBG_LOW("not found !\n");
205 raw_spin_unlock(&beat_htab_lock);
206 return -1;
207 }
208
209 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0,
210 &dummy1);
211 raw_spin_unlock(&beat_htab_lock);
212 if (lpar_rc != 0 || dummy0 == 0) {
213 DBG_LOW("not found !\n");
214 return -1;
215 }
216
217 DBG_LOW("ok %lx %lx\n", dummy0, dummy1);
218
219 BUG_ON(lpar_rc != 0);
220
221 return 0;
222}
223
224static long beat_lpar_hpte_find(unsigned long vpn, int psize)
225{
226 unsigned long hash;
227 unsigned long i, j;
228 long slot;
229 unsigned long want_v, hpte_v;
230
231 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
232 want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
233
234 for (j = 0; j < 2; j++) {
235 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
236 for (i = 0; i < HPTES_PER_GROUP; i++) {
237 hpte_v = beat_lpar_hpte_getword0(slot);
238
239 if (HPTE_V_COMPARE(hpte_v, want_v)
240 && (hpte_v & HPTE_V_VALID)
241 && (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
242 /* HPTE matches */
243 if (j)
244 slot = -slot;
245 return slot;
246 }
247 ++slot;
248 }
249 hash = ~hash;
250 }
251
252 return -1;
253}
254
255static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
256 unsigned long ea,
257 int psize, int ssize)
258{
259 unsigned long vpn;
260 unsigned long lpar_rc, slot, vsid;
261 u64 dummy0, dummy1;
262
263 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
264 vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
265
266 raw_spin_lock(&beat_htab_lock);
267 slot = beat_lpar_hpte_find(vpn, psize);
268 BUG_ON(slot == -1);
269
270 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
271 &dummy0, &dummy1);
272 raw_spin_unlock(&beat_htab_lock);
273
274 BUG_ON(lpar_rc != 0);
275}
276
277static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
278 int psize, int apsize,
279 int ssize, int local)
280{
281 unsigned long want_v;
282 unsigned long lpar_rc;
283 u64 dummy1, dummy2;
284 unsigned long flags;
285
286 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
287 slot, va, psize, local);
288 want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
289
290 raw_spin_lock_irqsave(&beat_htab_lock, flags);
291 dummy1 = beat_lpar_hpte_getword0(slot);
292
293 if ((dummy1 & ~0x7FUL) != (want_v & ~0x7FUL)) {
294 DBG_LOW("not found !\n");
295 raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
296 return;
297 }
298
299 lpar_rc = beat_write_htab_entry(0, slot, 0, 0, HPTE_V_VALID, 0,
300 &dummy1, &dummy2);
301 raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
302
303 BUG_ON(lpar_rc != 0);
304}
305
306void __init hpte_init_beat(void)
307{
308 ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate;
309 ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp;
310 ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
311 ppc_md.hpte_insert = beat_lpar_hpte_insert;
312 ppc_md.hpte_remove = beat_lpar_hpte_remove;
313 ppc_md.hpte_clear_all = beat_lpar_hptab_clear;
314}
315
316static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
317 unsigned long vpn, unsigned long pa,
318 unsigned long rflags, unsigned long vflags,
319 int psize, int apsize, int ssize)
320{
321 unsigned long lpar_rc;
322 u64 hpte_v, hpte_r, slot;
323
324 if (vflags & HPTE_V_SECONDARY)
325 return -1;
326
327 if (!(vflags & HPTE_V_BOLTED))
328 DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
329 "rflags=%lx, vflags=%lx, psize=%d)\n",
330 hpte_group, vpn, pa, rflags, vflags, psize);
331
332 hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
333 vflags | HPTE_V_VALID;
334 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
335
336 if (!(vflags & HPTE_V_BOLTED))
337 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
338
339 if (rflags & _PAGE_NO_CACHE)
340 hpte_r &= ~HPTE_R_M;
341
342 /* insert into not-volted entry */
343 lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r,
344 HPTE_V_BOLTED, 0, &slot);
345 /*
346 * Since we try and ioremap PHBs we don't own, the pte insert
347 * will fail. However we must catch the failure in hash_page
348 * or we will loop forever, so return -2 in this case.
349 */
350 if (unlikely(lpar_rc != 0)) {
351 if (!(vflags & HPTE_V_BOLTED))
352 DBG_LOW(" lpar err %lx\n", lpar_rc);
353 return -2;
354 }
355 if (!(vflags & HPTE_V_BOLTED))
356 DBG_LOW(" -> slot: %lx\n", slot);
357
358 /* We have to pass down the secondary bucket bit here as well */
359 return (slot ^ hpte_group) & 15;
360}
361
362/*
363 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
364 * the low 3 bits of flags happen to line up. So no transform is needed.
365 * We can probably optimize here and assume the high bits of newpp are
366 * already zero. For now I am paranoid.
367 */
368static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
369 unsigned long newpp,
370 unsigned long vpn,
371 int psize, int apsize,
372 int ssize, unsigned long flags)
373{
374 unsigned long lpar_rc;
375 unsigned long want_v;
376 unsigned long pss;
377
378 want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
379 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
380
381 DBG_LOW(" update: "
382 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
383 want_v & HPTE_V_AVPN, slot, psize, newpp);
384
385 lpar_rc = beat_update_htab_permission3(0, slot, want_v, pss, 7, newpp);
386
387 if (lpar_rc == 0xfffffff7) {
388 DBG_LOW("not found !\n");
389 return -1;
390 }
391
392 DBG_LOW("ok\n");
393
394 BUG_ON(lpar_rc != 0);
395
396 return 0;
397}
398
399static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
400 int psize, int apsize,
401 int ssize, int local)
402{
403 unsigned long want_v;
404 unsigned long lpar_rc;
405 unsigned long pss;
406
407 DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
408 slot, vpn, psize, local);
409 want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
410 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
411
412 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
413
414 /* E_busy can be valid output: page may be already replaced */
415 BUG_ON(lpar_rc != 0 && lpar_rc != 0xfffffff7);
416}
417
418static int64_t _beat_lpar_hptab_clear_v3(void)
419{
420 return beat_clear_htab3(0);
421}
422
423static void beat_lpar_hptab_clear_v3(void)
424{
425 _beat_lpar_hptab_clear_v3();
426}
427
428void __init hpte_init_beat_v3(void)
429{
430 if (_beat_lpar_hptab_clear_v3() == 0) {
431 ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate_v3;
432 ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp_v3;
433 ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
434 ppc_md.hpte_insert = beat_lpar_hpte_insert_v3;
435 ppc_md.hpte_remove = beat_lpar_hpte_remove;
436 ppc_md.hpte_clear_all = beat_lpar_hptab_clear_v3;
437 } else {
438 ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate;
439 ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp;
440 ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
441 ppc_md.hpte_insert = beat_lpar_hpte_insert;
442 ppc_md.hpte_remove = beat_lpar_hpte_remove;
443 ppc_md.hpte_clear_all = beat_lpar_hptab_clear;
444 }
445}
diff --git a/arch/powerpc/platforms/cell/beat_hvCall.S b/arch/powerpc/platforms/cell/beat_hvCall.S
deleted file mode 100644
index 96c801907126..000000000000
--- a/arch/powerpc/platforms/cell/beat_hvCall.S
+++ /dev/null
@@ -1,285 +0,0 @@
1/*
2 * Beat hypervisor call I/F
3 *
4 * (C) Copyright 2007 TOSHIBA CORPORATION
5 *
6 * This code is based on arch/powerpc/platforms/pseries/hvCall.S.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <asm/ppc_asm.h>
24
25/* Not implemented on Beat, now */
26#define HCALL_INST_PRECALL
27#define HCALL_INST_POSTCALL
28
29 .text
30
31#define HVSC .long 0x44000022
32
33/* Note: takes only 7 input parameters at maximum */
34_GLOBAL(beat_hcall_norets)
35 HMT_MEDIUM
36
37 mfcr r0
38 stw r0,8(r1)
39
40 HCALL_INST_PRECALL
41
42 mr r11,r3
43 mr r3,r4
44 mr r4,r5
45 mr r5,r6
46 mr r6,r7
47 mr r7,r8
48 mr r8,r9
49
50 HVSC /* invoke the hypervisor */
51
52 HCALL_INST_POSTCALL
53
54 lwz r0,8(r1)
55 mtcrf 0xff,r0
56
57 blr /* return r3 = status */
58
59/* Note: takes 8 input parameters at maximum */
60_GLOBAL(beat_hcall_norets8)
61 HMT_MEDIUM
62
63 mfcr r0
64 stw r0,8(r1)
65
66 HCALL_INST_PRECALL
67
68 mr r11,r3
69 mr r3,r4
70 mr r4,r5
71 mr r5,r6
72 mr r6,r7
73 mr r7,r8
74 mr r8,r9
75 ld r10,STK_PARAM(R10)(r1)
76
77 HVSC /* invoke the hypervisor */
78
79 HCALL_INST_POSTCALL
80
81 lwz r0,8(r1)
82 mtcrf 0xff,r0
83
84 blr /* return r3 = status */
85
86/* Note: takes only 6 input parameters, 1 output parameters at maximum */
87_GLOBAL(beat_hcall1)
88 HMT_MEDIUM
89
90 mfcr r0
91 stw r0,8(r1)
92
93 HCALL_INST_PRECALL
94
95 std r4,STK_PARAM(R4)(r1) /* save ret buffer */
96
97 mr r11,r3
98 mr r3,r5
99 mr r4,r6
100 mr r5,r7
101 mr r6,r8
102 mr r7,r9
103 mr r8,r10
104
105 HVSC /* invoke the hypervisor */
106
107 HCALL_INST_POSTCALL
108
109 ld r12,STK_PARAM(R4)(r1)
110 std r4, 0(r12)
111
112 lwz r0,8(r1)
113 mtcrf 0xff,r0
114
115 blr /* return r3 = status */
116
117/* Note: takes only 6 input parameters, 2 output parameters at maximum */
118_GLOBAL(beat_hcall2)
119 HMT_MEDIUM
120
121 mfcr r0
122 stw r0,8(r1)
123
124 HCALL_INST_PRECALL
125
126 std r4,STK_PARAM(R4)(r1) /* save ret buffer */
127
128 mr r11,r3
129 mr r3,r5
130 mr r4,r6
131 mr r5,r7
132 mr r6,r8
133 mr r7,r9
134 mr r8,r10
135
136 HVSC /* invoke the hypervisor */
137
138 HCALL_INST_POSTCALL
139
140 ld r12,STK_PARAM(R4)(r1)
141 std r4, 0(r12)
142 std r5, 8(r12)
143
144 lwz r0,8(r1)
145 mtcrf 0xff,r0
146
147 blr /* return r3 = status */
148
149/* Note: takes only 6 input parameters, 3 output parameters at maximum */
150_GLOBAL(beat_hcall3)
151 HMT_MEDIUM
152
153 mfcr r0
154 stw r0,8(r1)
155
156 HCALL_INST_PRECALL
157
158 std r4,STK_PARAM(R4)(r1) /* save ret buffer */
159
160 mr r11,r3
161 mr r3,r5
162 mr r4,r6
163 mr r5,r7
164 mr r6,r8
165 mr r7,r9
166 mr r8,r10
167
168 HVSC /* invoke the hypervisor */
169
170 HCALL_INST_POSTCALL
171
172 ld r12,STK_PARAM(R4)(r1)
173 std r4, 0(r12)
174 std r5, 8(r12)
175 std r6, 16(r12)
176
177 lwz r0,8(r1)
178 mtcrf 0xff,r0
179
180 blr /* return r3 = status */
181
182/* Note: takes only 6 input parameters, 4 output parameters at maximum */
183_GLOBAL(beat_hcall4)
184 HMT_MEDIUM
185
186 mfcr r0
187 stw r0,8(r1)
188
189 HCALL_INST_PRECALL
190
191 std r4,STK_PARAM(R4)(r1) /* save ret buffer */
192
193 mr r11,r3
194 mr r3,r5
195 mr r4,r6
196 mr r5,r7
197 mr r6,r8
198 mr r7,r9
199 mr r8,r10
200
201 HVSC /* invoke the hypervisor */
202
203 HCALL_INST_POSTCALL
204
205 ld r12,STK_PARAM(R4)(r1)
206 std r4, 0(r12)
207 std r5, 8(r12)
208 std r6, 16(r12)
209 std r7, 24(r12)
210
211 lwz r0,8(r1)
212 mtcrf 0xff,r0
213
214 blr /* return r3 = status */
215
216/* Note: takes only 6 input parameters, 5 output parameters at maximum */
217_GLOBAL(beat_hcall5)
218 HMT_MEDIUM
219
220 mfcr r0
221 stw r0,8(r1)
222
223 HCALL_INST_PRECALL
224
225 std r4,STK_PARAM(R4)(r1) /* save ret buffer */
226
227 mr r11,r3
228 mr r3,r5
229 mr r4,r6
230 mr r5,r7
231 mr r6,r8
232 mr r7,r9
233 mr r8,r10
234
235 HVSC /* invoke the hypervisor */
236
237 HCALL_INST_POSTCALL
238
239 ld r12,STK_PARAM(R4)(r1)
240 std r4, 0(r12)
241 std r5, 8(r12)
242 std r6, 16(r12)
243 std r7, 24(r12)
244 std r8, 32(r12)
245
246 lwz r0,8(r1)
247 mtcrf 0xff,r0
248
249 blr /* return r3 = status */
250
251/* Note: takes only 6 input parameters, 6 output parameters at maximum */
252_GLOBAL(beat_hcall6)
253 HMT_MEDIUM
254
255 mfcr r0
256 stw r0,8(r1)
257
258 HCALL_INST_PRECALL
259
260 std r4,STK_PARAM(R4)(r1) /* save ret buffer */
261
262 mr r11,r3
263 mr r3,r5
264 mr r4,r6
265 mr r5,r7
266 mr r6,r8
267 mr r7,r9
268 mr r8,r10
269
270 HVSC /* invoke the hypervisor */
271
272 HCALL_INST_POSTCALL
273
274 ld r12,STK_PARAM(R4)(r1)
275 std r4, 0(r12)
276 std r5, 8(r12)
277 std r6, 16(r12)
278 std r7, 24(r12)
279 std r8, 32(r12)
280 std r9, 40(r12)
281
282 lwz r0,8(r1)
283 mtcrf 0xff,r0
284
285 blr /* return r3 = status */
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
deleted file mode 100644
index 9e5dfbcc00af..000000000000
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ /dev/null
@@ -1,253 +0,0 @@
1/*
2 * Celleb/Beat Interrupt controller
3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/irq.h>
24#include <linux/percpu.h>
25#include <linux/types.h>
26
27#include <asm/machdep.h>
28
29#include "beat_interrupt.h"
30#include "beat_wrapper.h"
31
32#define MAX_IRQS NR_IRQS
33static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock);
34static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64];
35static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64];
36
37static struct irq_domain *beatic_host;
38
39/*
40 * In this implementation, "virq" == "IRQ plug number",
41 * "(irq_hw_number_t)hwirq" == "IRQ outlet number".
42 */
43
44/* assumption: locked */
45static inline void beatic_update_irq_mask(unsigned int irq_plug)
46{
47 int off;
48 unsigned long masks[4];
49
50 off = (irq_plug / 256) * 4;
51 masks[0] = beatic_irq_mask_enable[off + 0]
52 & beatic_irq_mask_ack[off + 0];
53 masks[1] = beatic_irq_mask_enable[off + 1]
54 & beatic_irq_mask_ack[off + 1];
55 masks[2] = beatic_irq_mask_enable[off + 2]
56 & beatic_irq_mask_ack[off + 2];
57 masks[3] = beatic_irq_mask_enable[off + 3]
58 & beatic_irq_mask_ack[off + 3];
59 if (beat_set_interrupt_mask(irq_plug&~255UL,
60 masks[0], masks[1], masks[2], masks[3]) != 0)
61 panic("Failed to set mask IRQ!");
62}
63
64static void beatic_mask_irq(struct irq_data *d)
65{
66 unsigned long flags;
67
68 raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
69 beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
70 beatic_update_irq_mask(d->irq);
71 raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
72}
73
74static void beatic_unmask_irq(struct irq_data *d)
75{
76 unsigned long flags;
77
78 raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
79 beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64));
80 beatic_update_irq_mask(d->irq);
81 raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
82}
83
84static void beatic_ack_irq(struct irq_data *d)
85{
86 unsigned long flags;
87
88 raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
89 beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
90 beatic_update_irq_mask(d->irq);
91 raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
92}
93
94static void beatic_end_irq(struct irq_data *d)
95{
96 s64 err;
97 unsigned long flags;
98
99 err = beat_downcount_of_interrupt(d->irq);
100 if (err != 0) {
101 if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */
102 panic("Failed to downcount IRQ! Error = %16llx", err);
103
104 printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq);
105 }
106 raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
107 beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64));
108 beatic_update_irq_mask(d->irq);
109 raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
110}
111
112static struct irq_chip beatic_pic = {
113 .name = "CELL-BEAT",
114 .irq_unmask = beatic_unmask_irq,
115 .irq_mask = beatic_mask_irq,
116 .irq_eoi = beatic_end_irq,
117};
118
119/*
120 * Dispose binding hardware IRQ number (hw) and Virtuql IRQ number (virq),
121 * update flags.
122 *
123 * Note that the number (virq) is already assigned at upper layer.
124 */
125static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq)
126{
127 beat_destruct_irq_plug(virq);
128}
129
130/*
131 * Create or update binding hardware IRQ number (hw) and Virtuql
132 * IRQ number (virq). This is called only once for a given mapping.
133 *
134 * Note that the number (virq) is already assigned at upper layer.
135 */
136static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq,
137 irq_hw_number_t hw)
138{
139 int64_t err;
140
141 err = beat_construct_and_connect_irq_plug(virq, hw);
142 if (err < 0)
143 return -EIO;
144
145 irq_set_status_flags(virq, IRQ_LEVEL);
146 irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq);
147 return 0;
148}
149
150/*
151 * Translate device-tree interrupt spec to irq_hw_number_t style (ulong),
152 * to pass away to irq_create_mapping().
153 *
154 * Called from irq_create_of_mapping() only.
155 * Note: We have only 1 entry to translate.
156 */
157static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
158 const u32 *intspec, unsigned int intsize,
159 irq_hw_number_t *out_hwirq,
160 unsigned int *out_flags)
161{
162 const u64 *intspec2 = (const u64 *)intspec;
163
164 *out_hwirq = *intspec2;
165 *out_flags |= IRQ_TYPE_LEVEL_LOW;
166 return 0;
167}
168
169static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np)
170{
171 /* Match all */
172 return 1;
173}
174
175static const struct irq_domain_ops beatic_pic_host_ops = {
176 .map = beatic_pic_host_map,
177 .unmap = beatic_pic_host_unmap,
178 .xlate = beatic_pic_host_xlate,
179 .match = beatic_pic_host_match,
180};
181
182/*
183 * Get an IRQ number
184 * Note: returns VIRQ
185 */
186static inline unsigned int beatic_get_irq_plug(void)
187{
188 int i;
189 uint64_t pending[4], ub;
190
191 for (i = 0; i < MAX_IRQS; i += 256) {
192 beat_detect_pending_interrupts(i, pending);
193 __asm__ ("cntlzd %0,%1":"=r"(ub):
194 "r"(pending[0] & beatic_irq_mask_enable[i/64+0]
195 & beatic_irq_mask_ack[i/64+0]));
196 if (ub != 64)
197 return i + ub + 0;
198 __asm__ ("cntlzd %0,%1":"=r"(ub):
199 "r"(pending[1] & beatic_irq_mask_enable[i/64+1]
200 & beatic_irq_mask_ack[i/64+1]));
201 if (ub != 64)
202 return i + ub + 64;
203 __asm__ ("cntlzd %0,%1":"=r"(ub):
204 "r"(pending[2] & beatic_irq_mask_enable[i/64+2]
205 & beatic_irq_mask_ack[i/64+2]));
206 if (ub != 64)
207 return i + ub + 128;
208 __asm__ ("cntlzd %0,%1":"=r"(ub):
209 "r"(pending[3] & beatic_irq_mask_enable[i/64+3]
210 & beatic_irq_mask_ack[i/64+3]));
211 if (ub != 64)
212 return i + ub + 192;
213 }
214
215 return NO_IRQ;
216}
217unsigned int beatic_get_irq(void)
218{
219 unsigned int ret;
220
221 ret = beatic_get_irq_plug();
222 if (ret != NO_IRQ)
223 beatic_ack_irq(irq_get_irq_data(ret));
224 return ret;
225}
226
227/*
228 */
229void __init beatic_init_IRQ(void)
230{
231 int i;
232
233 memset(beatic_irq_mask_enable, 0, sizeof(beatic_irq_mask_enable));
234 memset(beatic_irq_mask_ack, 255, sizeof(beatic_irq_mask_ack));
235 for (i = 0; i < MAX_IRQS; i += 256)
236 beat_set_interrupt_mask(i, 0L, 0L, 0L, 0L);
237
238 /* Set out get_irq function */
239 ppc_md.get_irq = beatic_get_irq;
240
241 /* Allocate an irq host */
242 beatic_host = irq_domain_add_nomap(NULL, ~0, &beatic_pic_host_ops, NULL);
243 BUG_ON(beatic_host == NULL);
244 irq_set_default_host(beatic_host);
245}
246
247void beatic_deinit_IRQ(void)
248{
249 int i;
250
251 for (i = 1; i < nr_irqs; i++)
252 beat_destruct_irq_plug(i);
253}
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h
deleted file mode 100644
index a7e52f91a078..000000000000
--- a/arch/powerpc/platforms/cell/beat_interrupt.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Celleb/Beat Interrupt controller
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#ifndef ASM_BEAT_PIC_H
22#define ASM_BEAT_PIC_H
23#ifdef __KERNEL__
24
25extern void beatic_init_IRQ(void);
26extern unsigned int beatic_get_irq(void);
27extern void beatic_deinit_IRQ(void);
28
29#endif
30#endif /* ASM_BEAT_PIC_H */
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
deleted file mode 100644
index 3ce685568935..000000000000
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Support for IOMMU on Celleb platform.
3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/dma-mapping.h>
24#include <linux/pci.h>
25#include <linux/of_platform.h>
26
27#include <asm/machdep.h>
28
29#include "beat_wrapper.h"
30
31#define DMA_FLAGS 0xf800000000000000UL /* r/w permitted, coherency required,
32 strongest order */
33
34static int __init find_dma_window(u64 *io_space_id, u64 *ioid,
35 u64 *base, u64 *size, u64 *io_page_size)
36{
37 struct device_node *dn;
38 const unsigned long *dma_window;
39
40 for_each_node_by_type(dn, "ioif") {
41 dma_window = of_get_property(dn, "toshiba,dma-window", NULL);
42 if (dma_window) {
43 *io_space_id = (dma_window[0] >> 32) & 0xffffffffUL;
44 *ioid = dma_window[0] & 0x7ffUL;
45 *base = dma_window[1];
46 *size = dma_window[2];
47 *io_page_size = 1 << dma_window[3];
48 of_node_put(dn);
49 return 1;
50 }
51 }
52 return 0;
53}
54
55static unsigned long celleb_dma_direct_offset;
56
57static void __init celleb_init_direct_mapping(void)
58{
59 u64 lpar_addr, io_addr;
60 u64 io_space_id, ioid, dma_base, dma_size, io_page_size;
61
62 if (!find_dma_window(&io_space_id, &ioid, &dma_base, &dma_size,
63 &io_page_size)) {
64 pr_info("No dma window found !\n");
65 return;
66 }
67
68 for (lpar_addr = 0; lpar_addr < dma_size; lpar_addr += io_page_size) {
69 io_addr = lpar_addr + dma_base;
70 (void)beat_put_iopte(io_space_id, io_addr, lpar_addr,
71 ioid, DMA_FLAGS);
72 }
73
74 celleb_dma_direct_offset = dma_base;
75}
76
77static void celleb_dma_dev_setup(struct device *dev)
78{
79 set_dma_ops(dev, &dma_direct_ops);
80 set_dma_offset(dev, celleb_dma_direct_offset);
81}
82
83static void celleb_pci_dma_dev_setup(struct pci_dev *pdev)
84{
85 celleb_dma_dev_setup(&pdev->dev);
86}
87
88static int celleb_of_bus_notify(struct notifier_block *nb,
89 unsigned long action, void *data)
90{
91 struct device *dev = data;
92
93 /* We are only intereted in device addition */
94 if (action != BUS_NOTIFY_ADD_DEVICE)
95 return 0;
96
97 celleb_dma_dev_setup(dev);
98
99 return 0;
100}
101
102static struct notifier_block celleb_of_bus_notifier = {
103 .notifier_call = celleb_of_bus_notify
104};
105
106static int __init celleb_init_iommu(void)
107{
108 celleb_init_direct_mapping();
109 ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
110 bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
111
112 return 0;
113}
114
115machine_arch_initcall(celleb_beat, celleb_init_iommu);
diff --git a/arch/powerpc/platforms/cell/beat_spu_priv1.c b/arch/powerpc/platforms/cell/beat_spu_priv1.c
deleted file mode 100644
index 13f52589d3a9..000000000000
--- a/arch/powerpc/platforms/cell/beat_spu_priv1.c
+++ /dev/null
@@ -1,205 +0,0 @@
1/*
2 * spu hypervisor abstraction for Beat
3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <asm/types.h>
22#include <asm/spu.h>
23#include <asm/spu_priv1.h>
24
25#include "beat_wrapper.h"
26
27static inline void _int_mask_set(struct spu *spu, int class, u64 mask)
28{
29 spu->shadow_int_mask_RW[class] = mask;
30 beat_set_irq_mask_for_spe(spu->spe_id, class, mask);
31}
32
33static inline u64 _int_mask_get(struct spu *spu, int class)
34{
35 return spu->shadow_int_mask_RW[class];
36}
37
38static void int_mask_set(struct spu *spu, int class, u64 mask)
39{
40 _int_mask_set(spu, class, mask);
41}
42
43static u64 int_mask_get(struct spu *spu, int class)
44{
45 return _int_mask_get(spu, class);
46}
47
48static void int_mask_and(struct spu *spu, int class, u64 mask)
49{
50 u64 old_mask;
51 old_mask = _int_mask_get(spu, class);
52 _int_mask_set(spu, class, old_mask & mask);
53}
54
55static void int_mask_or(struct spu *spu, int class, u64 mask)
56{
57 u64 old_mask;
58 old_mask = _int_mask_get(spu, class);
59 _int_mask_set(spu, class, old_mask | mask);
60}
61
62static void int_stat_clear(struct spu *spu, int class, u64 stat)
63{
64 beat_clear_interrupt_status_of_spe(spu->spe_id, class, stat);
65}
66
67static u64 int_stat_get(struct spu *spu, int class)
68{
69 u64 int_stat;
70 beat_get_interrupt_status_of_spe(spu->spe_id, class, &int_stat);
71 return int_stat;
72}
73
74static void cpu_affinity_set(struct spu *spu, int cpu)
75{
76 return;
77}
78
79static u64 mfc_dar_get(struct spu *spu)
80{
81 u64 dar;
82 beat_get_spe_privileged_state_1_registers(
83 spu->spe_id,
84 offsetof(struct spu_priv1, mfc_dar_RW), &dar);
85 return dar;
86}
87
88static u64 mfc_dsisr_get(struct spu *spu)
89{
90 u64 dsisr;
91 beat_get_spe_privileged_state_1_registers(
92 spu->spe_id,
93 offsetof(struct spu_priv1, mfc_dsisr_RW), &dsisr);
94 return dsisr;
95}
96
97static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
98{
99 beat_set_spe_privileged_state_1_registers(
100 spu->spe_id,
101 offsetof(struct spu_priv1, mfc_dsisr_RW), dsisr);
102}
103
104static void mfc_sdr_setup(struct spu *spu)
105{
106 return;
107}
108
109static void mfc_sr1_set(struct spu *spu, u64 sr1)
110{
111 beat_set_spe_privileged_state_1_registers(
112 spu->spe_id,
113 offsetof(struct spu_priv1, mfc_sr1_RW), sr1);
114}
115
116static u64 mfc_sr1_get(struct spu *spu)
117{
118 u64 sr1;
119 beat_get_spe_privileged_state_1_registers(
120 spu->spe_id,
121 offsetof(struct spu_priv1, mfc_sr1_RW), &sr1);
122 return sr1;
123}
124
125static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
126{
127 beat_set_spe_privileged_state_1_registers(
128 spu->spe_id,
129 offsetof(struct spu_priv1, mfc_tclass_id_RW), tclass_id);
130}
131
132static u64 mfc_tclass_id_get(struct spu *spu)
133{
134 u64 tclass_id;
135 beat_get_spe_privileged_state_1_registers(
136 spu->spe_id,
137 offsetof(struct spu_priv1, mfc_tclass_id_RW), &tclass_id);
138 return tclass_id;
139}
140
141static void tlb_invalidate(struct spu *spu)
142{
143 beat_set_spe_privileged_state_1_registers(
144 spu->spe_id,
145 offsetof(struct spu_priv1, tlb_invalidate_entry_W), 0ul);
146}
147
148static void resource_allocation_groupID_set(struct spu *spu, u64 id)
149{
150 beat_set_spe_privileged_state_1_registers(
151 spu->spe_id,
152 offsetof(struct spu_priv1, resource_allocation_groupID_RW),
153 id);
154}
155
156static u64 resource_allocation_groupID_get(struct spu *spu)
157{
158 u64 id;
159 beat_get_spe_privileged_state_1_registers(
160 spu->spe_id,
161 offsetof(struct spu_priv1, resource_allocation_groupID_RW),
162 &id);
163 return id;
164}
165
166static void resource_allocation_enable_set(struct spu *spu, u64 enable)
167{
168 beat_set_spe_privileged_state_1_registers(
169 spu->spe_id,
170 offsetof(struct spu_priv1, resource_allocation_enable_RW),
171 enable);
172}
173
174static u64 resource_allocation_enable_get(struct spu *spu)
175{
176 u64 enable;
177 beat_get_spe_privileged_state_1_registers(
178 spu->spe_id,
179 offsetof(struct spu_priv1, resource_allocation_enable_RW),
180 &enable);
181 return enable;
182}
183
184const struct spu_priv1_ops spu_priv1_beat_ops = {
185 .int_mask_and = int_mask_and,
186 .int_mask_or = int_mask_or,
187 .int_mask_set = int_mask_set,
188 .int_mask_get = int_mask_get,
189 .int_stat_clear = int_stat_clear,
190 .int_stat_get = int_stat_get,
191 .cpu_affinity_set = cpu_affinity_set,
192 .mfc_dar_get = mfc_dar_get,
193 .mfc_dsisr_get = mfc_dsisr_get,
194 .mfc_dsisr_set = mfc_dsisr_set,
195 .mfc_sdr_setup = mfc_sdr_setup,
196 .mfc_sr1_set = mfc_sr1_set,
197 .mfc_sr1_get = mfc_sr1_get,
198 .mfc_tclass_id_set = mfc_tclass_id_set,
199 .mfc_tclass_id_get = mfc_tclass_id_get,
200 .tlb_invalidate = tlb_invalidate,
201 .resource_allocation_groupID_set = resource_allocation_groupID_set,
202 .resource_allocation_groupID_get = resource_allocation_groupID_get,
203 .resource_allocation_enable_set = resource_allocation_enable_set,
204 .resource_allocation_enable_get = resource_allocation_enable_get,
205};
diff --git a/arch/powerpc/platforms/cell/beat_syscall.h b/arch/powerpc/platforms/cell/beat_syscall.h
deleted file mode 100644
index 8580dc7e1798..000000000000
--- a/arch/powerpc/platforms/cell/beat_syscall.h
+++ /dev/null
@@ -1,164 +0,0 @@
1/*
2 * Beat hypervisor call numbers
3 *
4 * (C) Copyright 2004-2007 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#ifndef BEAT_BEAT_syscall_H
22#define BEAT_BEAT_syscall_H
23
24#ifdef __ASSEMBLY__
25#define __BEAT_ADD_VENDOR_ID(__x, __v) ((__v)<<60|(__x))
26#else
27#define __BEAT_ADD_VENDOR_ID(__x, __v) ((u64)(__v)<<60|(__x))
28#endif
29#define HV_allocate_memory __BEAT_ADD_VENDOR_ID(0, 0)
30#define HV_construct_virtual_address_space __BEAT_ADD_VENDOR_ID(2, 0)
31#define HV_destruct_virtual_address_space __BEAT_ADD_VENDOR_ID(10, 0)
32#define HV_get_virtual_address_space_id_of_ppe __BEAT_ADD_VENDOR_ID(4, 0)
33#define HV_query_logical_partition_address_region_info \
34 __BEAT_ADD_VENDOR_ID(6, 0)
35#define HV_release_memory __BEAT_ADD_VENDOR_ID(13, 0)
36#define HV_select_virtual_address_space __BEAT_ADD_VENDOR_ID(7, 0)
37#define HV_load_range_registers __BEAT_ADD_VENDOR_ID(68, 0)
38#define HV_set_ppe_l2cache_rmt_entry __BEAT_ADD_VENDOR_ID(70, 0)
39#define HV_set_ppe_tlb_rmt_entry __BEAT_ADD_VENDOR_ID(71, 0)
40#define HV_set_spe_tlb_rmt_entry __BEAT_ADD_VENDOR_ID(72, 0)
41#define HV_get_io_address_translation_fault_info __BEAT_ADD_VENDOR_ID(14, 0)
42#define HV_get_iopte __BEAT_ADD_VENDOR_ID(16, 0)
43#define HV_preload_iopt_cache __BEAT_ADD_VENDOR_ID(17, 0)
44#define HV_put_iopte __BEAT_ADD_VENDOR_ID(15, 0)
45#define HV_connect_event_ports __BEAT_ADD_VENDOR_ID(21, 0)
46#define HV_construct_event_receive_port __BEAT_ADD_VENDOR_ID(18, 0)
47#define HV_destruct_event_receive_port __BEAT_ADD_VENDOR_ID(19, 0)
48#define HV_destruct_event_send_port __BEAT_ADD_VENDOR_ID(22, 0)
49#define HV_get_state_of_event_send_port __BEAT_ADD_VENDOR_ID(25, 0)
50#define HV_request_to_connect_event_ports __BEAT_ADD_VENDOR_ID(20, 0)
51#define HV_send_event_externally __BEAT_ADD_VENDOR_ID(23, 0)
52#define HV_send_event_locally __BEAT_ADD_VENDOR_ID(24, 0)
53#define HV_construct_and_connect_irq_plug __BEAT_ADD_VENDOR_ID(28, 0)
54#define HV_destruct_irq_plug __BEAT_ADD_VENDOR_ID(29, 0)
55#define HV_detect_pending_interrupts __BEAT_ADD_VENDOR_ID(26, 0)
56#define HV_end_of_interrupt __BEAT_ADD_VENDOR_ID(27, 0)
57#define HV_assign_control_signal_notification_port __BEAT_ADD_VENDOR_ID(45, 0)
58#define HV_end_of_control_signal_processing __BEAT_ADD_VENDOR_ID(48, 0)
59#define HV_get_control_signal __BEAT_ADD_VENDOR_ID(46, 0)
60#define HV_set_irq_mask_for_spe __BEAT_ADD_VENDOR_ID(61, 0)
61#define HV_shutdown_logical_partition __BEAT_ADD_VENDOR_ID(44, 0)
62#define HV_connect_message_ports __BEAT_ADD_VENDOR_ID(35, 0)
63#define HV_destruct_message_port __BEAT_ADD_VENDOR_ID(36, 0)
64#define HV_receive_message __BEAT_ADD_VENDOR_ID(37, 0)
65#define HV_get_message_port_info __BEAT_ADD_VENDOR_ID(34, 0)
66#define HV_request_to_connect_message_ports __BEAT_ADD_VENDOR_ID(33, 0)
67#define HV_send_message __BEAT_ADD_VENDOR_ID(32, 0)
68#define HV_get_logical_ppe_id __BEAT_ADD_VENDOR_ID(69, 0)
69#define HV_pause __BEAT_ADD_VENDOR_ID(9, 0)
70#define HV_destruct_shared_memory_handle __BEAT_ADD_VENDOR_ID(51, 0)
71#define HV_get_shared_memory_info __BEAT_ADD_VENDOR_ID(52, 0)
72#define HV_permit_sharing_memory __BEAT_ADD_VENDOR_ID(50, 0)
73#define HV_request_to_attach_shared_memory __BEAT_ADD_VENDOR_ID(49, 0)
74#define HV_enable_logical_spe_execution __BEAT_ADD_VENDOR_ID(55, 0)
75#define HV_construct_logical_spe __BEAT_ADD_VENDOR_ID(53, 0)
76#define HV_disable_logical_spe_execution __BEAT_ADD_VENDOR_ID(56, 0)
77#define HV_destruct_logical_spe __BEAT_ADD_VENDOR_ID(54, 0)
78#define HV_sense_spe_execution_status __BEAT_ADD_VENDOR_ID(58, 0)
79#define HV_insert_htab_entry __BEAT_ADD_VENDOR_ID(101, 0)
80#define HV_read_htab_entries __BEAT_ADD_VENDOR_ID(95, 0)
81#define HV_write_htab_entry __BEAT_ADD_VENDOR_ID(94, 0)
82#define HV_assign_io_address_translation_fault_port \
83 __BEAT_ADD_VENDOR_ID(100, 0)
84#define HV_set_interrupt_mask __BEAT_ADD_VENDOR_ID(73, 0)
85#define HV_get_logical_partition_id __BEAT_ADD_VENDOR_ID(74, 0)
86#define HV_create_repository_node2 __BEAT_ADD_VENDOR_ID(90, 0)
87#define HV_create_repository_node __BEAT_ADD_VENDOR_ID(90, 0) /* alias */
88#define HV_get_repository_node_value2 __BEAT_ADD_VENDOR_ID(91, 0)
89#define HV_get_repository_node_value __BEAT_ADD_VENDOR_ID(91, 0) /* alias */
90#define HV_modify_repository_node_value2 __BEAT_ADD_VENDOR_ID(92, 0)
91#define HV_modify_repository_node_value __BEAT_ADD_VENDOR_ID(92, 0) /* alias */
92#define HV_remove_repository_node2 __BEAT_ADD_VENDOR_ID(93, 0)
93#define HV_remove_repository_node __BEAT_ADD_VENDOR_ID(93, 0) /* alias */
94#define HV_cancel_shared_memory __BEAT_ADD_VENDOR_ID(104, 0)
95#define HV_clear_interrupt_status_of_spe __BEAT_ADD_VENDOR_ID(206, 0)
96#define HV_construct_spe_irq_outlet __BEAT_ADD_VENDOR_ID(80, 0)
97#define HV_destruct_spe_irq_outlet __BEAT_ADD_VENDOR_ID(81, 0)
98#define HV_disconnect_ipspc_service __BEAT_ADD_VENDOR_ID(88, 0)
99#define HV_execute_ipspc_command __BEAT_ADD_VENDOR_ID(86, 0)
100#define HV_get_interrupt_status_of_spe __BEAT_ADD_VENDOR_ID(205, 0)
101#define HV_get_spe_privileged_state_1_registers __BEAT_ADD_VENDOR_ID(208, 0)
102#define HV_permit_use_of_ipspc_service __BEAT_ADD_VENDOR_ID(85, 0)
103#define HV_reinitialize_logical_spe __BEAT_ADD_VENDOR_ID(82, 0)
104#define HV_request_ipspc_service __BEAT_ADD_VENDOR_ID(84, 0)
105#define HV_stop_ipspc_command __BEAT_ADD_VENDOR_ID(87, 0)
106#define HV_set_spe_privileged_state_1_registers __BEAT_ADD_VENDOR_ID(204, 0)
107#define HV_get_status_of_ipspc_service __BEAT_ADD_VENDOR_ID(203, 0)
108#define HV_put_characters_to_console __BEAT_ADD_VENDOR_ID(0x101, 1)
109#define HV_get_characters_from_console __BEAT_ADD_VENDOR_ID(0x102, 1)
110#define HV_get_base_clock __BEAT_ADD_VENDOR_ID(0x111, 1)
111#define HV_set_base_clock __BEAT_ADD_VENDOR_ID(0x112, 1)
112#define HV_get_frame_cycle __BEAT_ADD_VENDOR_ID(0x114, 1)
113#define HV_disable_console __BEAT_ADD_VENDOR_ID(0x115, 1)
114#define HV_disable_all_console __BEAT_ADD_VENDOR_ID(0x116, 1)
115#define HV_oneshot_timer __BEAT_ADD_VENDOR_ID(0x117, 1)
116#define HV_set_dabr __BEAT_ADD_VENDOR_ID(0x118, 1)
117#define HV_get_dabr __BEAT_ADD_VENDOR_ID(0x119, 1)
118#define HV_start_hv_stats __BEAT_ADD_VENDOR_ID(0x21c, 1)
119#define HV_stop_hv_stats __BEAT_ADD_VENDOR_ID(0x21d, 1)
120#define HV_get_hv_stats __BEAT_ADD_VENDOR_ID(0x21e, 1)
121#define HV_get_hv_error_stats __BEAT_ADD_VENDOR_ID(0x221, 1)
122#define HV_get_stats __BEAT_ADD_VENDOR_ID(0x224, 1)
123#define HV_get_heap_stats __BEAT_ADD_VENDOR_ID(0x225, 1)
124#define HV_get_memory_stats __BEAT_ADD_VENDOR_ID(0x227, 1)
125#define HV_get_memory_detail __BEAT_ADD_VENDOR_ID(0x228, 1)
126#define HV_set_priority_of_irq_outlet __BEAT_ADD_VENDOR_ID(0x122, 1)
127#define HV_get_physical_spe_by_reservation_id __BEAT_ADD_VENDOR_ID(0x128, 1)
128#define HV_get_spe_context __BEAT_ADD_VENDOR_ID(0x129, 1)
129#define HV_set_spe_context __BEAT_ADD_VENDOR_ID(0x12a, 1)
130#define HV_downcount_of_interrupt __BEAT_ADD_VENDOR_ID(0x12e, 1)
131#define HV_peek_spe_context __BEAT_ADD_VENDOR_ID(0x12f, 1)
132#define HV_read_bpa_register __BEAT_ADD_VENDOR_ID(0x131, 1)
133#define HV_write_bpa_register __BEAT_ADD_VENDOR_ID(0x132, 1)
134#define HV_map_context_table_of_spe __BEAT_ADD_VENDOR_ID(0x137, 1)
135#define HV_get_slb_for_logical_spe __BEAT_ADD_VENDOR_ID(0x138, 1)
136#define HV_set_slb_for_logical_spe __BEAT_ADD_VENDOR_ID(0x139, 1)
137#define HV_init_pm __BEAT_ADD_VENDOR_ID(0x150, 1)
138#define HV_set_pm_signal __BEAT_ADD_VENDOR_ID(0x151, 1)
139#define HV_get_pm_signal __BEAT_ADD_VENDOR_ID(0x152, 1)
140#define HV_set_pm_config __BEAT_ADD_VENDOR_ID(0x153, 1)
141#define HV_get_pm_config __BEAT_ADD_VENDOR_ID(0x154, 1)
142#define HV_get_inner_trace_data __BEAT_ADD_VENDOR_ID(0x155, 1)
143#define HV_set_ext_trace_buffer __BEAT_ADD_VENDOR_ID(0x156, 1)
144#define HV_get_ext_trace_buffer __BEAT_ADD_VENDOR_ID(0x157, 1)
145#define HV_set_pm_interrupt __BEAT_ADD_VENDOR_ID(0x158, 1)
146#define HV_get_pm_interrupt __BEAT_ADD_VENDOR_ID(0x159, 1)
147#define HV_kick_pm __BEAT_ADD_VENDOR_ID(0x160, 1)
148#define HV_construct_pm_context __BEAT_ADD_VENDOR_ID(0x164, 1)
149#define HV_destruct_pm_context __BEAT_ADD_VENDOR_ID(0x165, 1)
150#define HV_be_slow __BEAT_ADD_VENDOR_ID(0x170, 1)
151#define HV_assign_ipspc_server_connection_status_notification_port \
152 __BEAT_ADD_VENDOR_ID(0x173, 1)
153#define HV_get_raid_of_physical_spe __BEAT_ADD_VENDOR_ID(0x174, 1)
154#define HV_set_physical_spe_to_rag __BEAT_ADD_VENDOR_ID(0x175, 1)
155#define HV_release_physical_spe_from_rag __BEAT_ADD_VENDOR_ID(0x176, 1)
156#define HV_rtc_read __BEAT_ADD_VENDOR_ID(0x190, 1)
157#define HV_rtc_write __BEAT_ADD_VENDOR_ID(0x191, 1)
158#define HV_eeprom_read __BEAT_ADD_VENDOR_ID(0x192, 1)
159#define HV_eeprom_write __BEAT_ADD_VENDOR_ID(0x193, 1)
160#define HV_insert_htab_entry3 __BEAT_ADD_VENDOR_ID(0x104, 1)
161#define HV_invalidate_htab_entry3 __BEAT_ADD_VENDOR_ID(0x105, 1)
162#define HV_update_htab_permission3 __BEAT_ADD_VENDOR_ID(0x106, 1)
163#define HV_clear_htab3 __BEAT_ADD_VENDOR_ID(0x107, 1)
164#endif
diff --git a/arch/powerpc/platforms/cell/beat_udbg.c b/arch/powerpc/platforms/cell/beat_udbg.c
deleted file mode 100644
index 350735bc8888..000000000000
--- a/arch/powerpc/platforms/cell/beat_udbg.c
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * udbg function for Beat
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/console.h>
23
24#include <asm/machdep.h>
25#include <asm/prom.h>
26#include <asm/udbg.h>
27
28#include "beat.h"
29
30#define celleb_vtermno 0
31
32static void udbg_putc_beat(char c)
33{
34 unsigned long rc;
35
36 if (c == '\n')
37 udbg_putc_beat('\r');
38
39 rc = beat_put_term_char(celleb_vtermno, 1, (uint64_t)c << 56, 0);
40}
41
42/* Buffered chars getc */
43static u64 inbuflen;
44static u64 inbuf[2]; /* must be 2 u64s */
45
46static int udbg_getc_poll_beat(void)
47{
48 /* The interface is tricky because it may return up to 16 chars.
49 * We save them statically for future calls to udbg_getc().
50 */
51 char ch, *buf = (char *)inbuf;
52 int i;
53 long rc;
54 if (inbuflen == 0) {
55 /* get some more chars. */
56 inbuflen = 0;
57 rc = beat_get_term_char(celleb_vtermno, &inbuflen,
58 inbuf+0, inbuf+1);
59 if (rc != 0)
60 inbuflen = 0; /* otherwise inbuflen is garbage */
61 }
62 if (inbuflen <= 0 || inbuflen > 16) {
63 /* Catch error case as well as other oddities (corruption) */
64 inbuflen = 0;
65 return -1;
66 }
67 ch = buf[0];
68 for (i = 1; i < inbuflen; i++) /* shuffle them down. */
69 buf[i-1] = buf[i];
70 inbuflen--;
71 return ch;
72}
73
74static int udbg_getc_beat(void)
75{
76 int ch;
77 for (;;) {
78 ch = udbg_getc_poll_beat();
79 if (ch == -1) {
80 /* This shouldn't be needed...but... */
81 volatile unsigned long delay;
82 for (delay = 0; delay < 2000000; delay++)
83 ;
84 } else {
85 return ch;
86 }
87 }
88}
89
90/* call this from early_init() for a working debug console on
91 * vterm capable LPAR machines
92 */
93void __init udbg_init_debug_beat(void)
94{
95 udbg_putc = udbg_putc_beat;
96 udbg_getc = udbg_getc_beat;
97 udbg_getc_poll = udbg_getc_poll_beat;
98}
diff --git a/arch/powerpc/platforms/cell/beat_wrapper.h b/arch/powerpc/platforms/cell/beat_wrapper.h
deleted file mode 100644
index c1109969f242..000000000000
--- a/arch/powerpc/platforms/cell/beat_wrapper.h
+++ /dev/null
@@ -1,290 +0,0 @@
1/*
2 * Beat hypervisor call I/F
3 *
4 * (C) Copyright 2007 TOSHIBA CORPORATION
5 *
6 * This code is based on arch/powerpc/platforms/pseries/plpar_wrapper.h.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22#ifndef BEAT_HCALL
23#include <linux/string.h>
24#include "beat_syscall.h"
25
26/* defined in hvCall.S */
27extern s64 beat_hcall_norets(u64 opcode, ...);
28extern s64 beat_hcall_norets8(u64 opcode, u64 arg1, u64 arg2, u64 arg3,
29 u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8);
30extern s64 beat_hcall1(u64 opcode, u64 retbuf[1], ...);
31extern s64 beat_hcall2(u64 opcode, u64 retbuf[2], ...);
32extern s64 beat_hcall3(u64 opcode, u64 retbuf[3], ...);
33extern s64 beat_hcall4(u64 opcode, u64 retbuf[4], ...);
34extern s64 beat_hcall5(u64 opcode, u64 retbuf[5], ...);
35extern s64 beat_hcall6(u64 opcode, u64 retbuf[6], ...);
36
37static inline s64 beat_downcount_of_interrupt(u64 plug_id)
38{
39 return beat_hcall_norets(HV_downcount_of_interrupt, plug_id);
40}
41
42static inline s64 beat_set_interrupt_mask(u64 index,
43 u64 val0, u64 val1, u64 val2, u64 val3)
44{
45 return beat_hcall_norets(HV_set_interrupt_mask, index,
46 val0, val1, val2, val3);
47}
48
49static inline s64 beat_destruct_irq_plug(u64 plug_id)
50{
51 return beat_hcall_norets(HV_destruct_irq_plug, plug_id);
52}
53
54static inline s64 beat_construct_and_connect_irq_plug(u64 plug_id,
55 u64 outlet_id)
56{
57 return beat_hcall_norets(HV_construct_and_connect_irq_plug, plug_id,
58 outlet_id);
59}
60
61static inline s64 beat_detect_pending_interrupts(u64 index, u64 *retbuf)
62{
63 return beat_hcall4(HV_detect_pending_interrupts, retbuf, index);
64}
65
66static inline s64 beat_pause(u64 style)
67{
68 return beat_hcall_norets(HV_pause, style);
69}
70
71static inline s64 beat_read_htab_entries(u64 htab_id, u64 index, u64 *retbuf)
72{
73 return beat_hcall5(HV_read_htab_entries, retbuf, htab_id, index);
74}
75
76static inline s64 beat_insert_htab_entry(u64 htab_id, u64 group,
77 u64 bitmask, u64 hpte_v, u64 hpte_r, u64 *slot)
78{
79 u64 dummy[3];
80 s64 ret;
81
82 ret = beat_hcall3(HV_insert_htab_entry, dummy, htab_id, group,
83 bitmask, hpte_v, hpte_r);
84 *slot = dummy[0];
85 return ret;
86}
87
88static inline s64 beat_write_htab_entry(u64 htab_id, u64 slot,
89 u64 hpte_v, u64 hpte_r, u64 mask_v, u64 mask_r,
90 u64 *ret_v, u64 *ret_r)
91{
92 u64 dummy[2];
93 s64 ret;
94
95 ret = beat_hcall2(HV_write_htab_entry, dummy, htab_id, slot,
96 hpte_v, hpte_r, mask_v, mask_r);
97 *ret_v = dummy[0];
98 *ret_r = dummy[1];
99 return ret;
100}
101
102static inline s64 beat_insert_htab_entry3(u64 htab_id, u64 group,
103 u64 hpte_v, u64 hpte_r, u64 mask_v, u64 value_v, u64 *slot)
104{
105 u64 dummy[1];
106 s64 ret;
107
108 ret = beat_hcall1(HV_insert_htab_entry3, dummy, htab_id, group,
109 hpte_v, hpte_r, mask_v, value_v);
110 *slot = dummy[0];
111 return ret;
112}
113
114static inline s64 beat_invalidate_htab_entry3(u64 htab_id, u64 group,
115 u64 va, u64 pss)
116{
117 return beat_hcall_norets(HV_invalidate_htab_entry3,
118 htab_id, group, va, pss);
119}
120
121static inline s64 beat_update_htab_permission3(u64 htab_id, u64 group,
122 u64 va, u64 pss, u64 ptel_mask, u64 ptel_value)
123{
124 return beat_hcall_norets(HV_update_htab_permission3,
125 htab_id, group, va, pss, ptel_mask, ptel_value);
126}
127
128static inline s64 beat_clear_htab3(u64 htab_id)
129{
130 return beat_hcall_norets(HV_clear_htab3, htab_id);
131}
132
133static inline void beat_shutdown_logical_partition(u64 code)
134{
135 (void)beat_hcall_norets(HV_shutdown_logical_partition, code);
136}
137
138static inline s64 beat_rtc_write(u64 time_from_epoch)
139{
140 return beat_hcall_norets(HV_rtc_write, time_from_epoch);
141}
142
143static inline s64 beat_rtc_read(u64 *time_from_epoch)
144{
145 u64 dummy[1];
146 s64 ret;
147
148 ret = beat_hcall1(HV_rtc_read, dummy);
149 *time_from_epoch = dummy[0];
150 return ret;
151}
152
153#define BEAT_NVRW_CNT (sizeof(u64) * 6)
154
155static inline s64 beat_eeprom_write(u64 index, u64 length, u8 *buffer)
156{
157 u64 b[6];
158
159 if (length > BEAT_NVRW_CNT)
160 return -1;
161 memcpy(b, buffer, sizeof(b));
162 return beat_hcall_norets8(HV_eeprom_write, index, length,
163 b[0], b[1], b[2], b[3], b[4], b[5]);
164}
165
166static inline s64 beat_eeprom_read(u64 index, u64 length, u8 *buffer)
167{
168 u64 b[6];
169 s64 ret;
170
171 if (length > BEAT_NVRW_CNT)
172 return -1;
173 ret = beat_hcall6(HV_eeprom_read, b, index, length);
174 memcpy(buffer, b, length);
175 return ret;
176}
177
178static inline s64 beat_set_dabr(u64 value, u64 style)
179{
180 return beat_hcall_norets(HV_set_dabr, value, style);
181}
182
183static inline s64 beat_get_characters_from_console(u64 termno, u64 *len,
184 u8 *buffer)
185{
186 u64 dummy[3];
187 s64 ret;
188
189 ret = beat_hcall3(HV_get_characters_from_console, dummy, termno, len);
190 *len = dummy[0];
191 memcpy(buffer, dummy + 1, *len);
192 return ret;
193}
194
195static inline s64 beat_put_characters_to_console(u64 termno, u64 len,
196 u8 *buffer)
197{
198 u64 b[2];
199
200 memcpy(b, buffer, len);
201 return beat_hcall_norets(HV_put_characters_to_console, termno, len,
202 b[0], b[1]);
203}
204
205static inline s64 beat_get_spe_privileged_state_1_registers(
206 u64 id, u64 offsetof, u64 *value)
207{
208 u64 dummy[1];
209 s64 ret;
210
211 ret = beat_hcall1(HV_get_spe_privileged_state_1_registers, dummy, id,
212 offsetof);
213 *value = dummy[0];
214 return ret;
215}
216
217static inline s64 beat_set_irq_mask_for_spe(u64 id, u64 class, u64 mask)
218{
219 return beat_hcall_norets(HV_set_irq_mask_for_spe, id, class, mask);
220}
221
222static inline s64 beat_clear_interrupt_status_of_spe(u64 id, u64 class,
223 u64 mask)
224{
225 return beat_hcall_norets(HV_clear_interrupt_status_of_spe,
226 id, class, mask);
227}
228
229static inline s64 beat_set_spe_privileged_state_1_registers(
230 u64 id, u64 offsetof, u64 value)
231{
232 return beat_hcall_norets(HV_set_spe_privileged_state_1_registers,
233 id, offsetof, value);
234}
235
236static inline s64 beat_get_interrupt_status_of_spe(u64 id, u64 class, u64 *val)
237{
238 u64 dummy[1];
239 s64 ret;
240
241 ret = beat_hcall1(HV_get_interrupt_status_of_spe, dummy, id, class);
242 *val = dummy[0];
243 return ret;
244}
245
246static inline s64 beat_put_iopte(u64 ioas_id, u64 io_addr, u64 real_addr,
247 u64 ioid, u64 flags)
248{
249 return beat_hcall_norets(HV_put_iopte, ioas_id, io_addr, real_addr,
250 ioid, flags);
251}
252
253static inline s64 beat_construct_event_receive_port(u64 *port)
254{
255 u64 dummy[1];
256 s64 ret;
257
258 ret = beat_hcall1(HV_construct_event_receive_port, dummy);
259 *port = dummy[0];
260 return ret;
261}
262
263static inline s64 beat_destruct_event_receive_port(u64 port)
264{
265 s64 ret;
266
267 ret = beat_hcall_norets(HV_destruct_event_receive_port, port);
268 return ret;
269}
270
271static inline s64 beat_create_repository_node(u64 path[4], u64 data[2])
272{
273 s64 ret;
274
275 ret = beat_hcall_norets(HV_create_repository_node2,
276 path[0], path[1], path[2], path[3], data[0], data[1]);
277 return ret;
278}
279
280static inline s64 beat_get_repository_node_value(u64 lpid, u64 path[4],
281 u64 data[2])
282{
283 s64 ret;
284
285 ret = beat_hcall2(HV_get_repository_node_value2, data,
286 lpid, path[0], path[1], path[2], path[3]);
287 return ret;
288}
289
290#endif
diff --git a/arch/powerpc/platforms/cell/cell.h b/arch/powerpc/platforms/cell/cell.h
new file mode 100644
index 000000000000..ef143dfee068
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cell.h
@@ -0,0 +1,24 @@
1/*
2 * Cell Platform common data structures
3 *
4 * Copyright 2015, Daniel Axtens, IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef CELL_H
18#define CELL_H
19
20#include <asm/pci-bridge.h>
21
22extern struct pci_controller_ops cell_pci_controller_ops;
23
24#endif
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
deleted file mode 100644
index 3ce70ded2d6a..000000000000
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ /dev/null
@@ -1,500 +0,0 @@
1/*
2 * Support for PCI on Celleb platform.
3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 *
6 * This code is based on arch/powerpc/kernel/rtas_pci.c:
7 * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 */
24
25#undef DEBUG
26
27#include <linux/kernel.h>
28#include <linux/threads.h>
29#include <linux/pci.h>
30#include <linux/string.h>
31#include <linux/init.h>
32#include <linux/memblock.h>
33#include <linux/pci_regs.h>
34#include <linux/of.h>
35#include <linux/of_device.h>
36#include <linux/slab.h>
37
38#include <asm/io.h>
39#include <asm/irq.h>
40#include <asm/prom.h>
41#include <asm/pci-bridge.h>
42#include <asm/ppc-pci.h>
43
44#include "celleb_pci.h"
45
46#define MAX_PCI_DEVICES 32
47#define MAX_PCI_FUNCTIONS 8
48#define MAX_PCI_BASE_ADDRS 3 /* use 64 bit address */
49
50/* definition for fake pci configuration area for GbE, .... ,and etc. */
51
52struct celleb_pci_resource {
53 struct resource r[MAX_PCI_BASE_ADDRS];
54};
55
56struct celleb_pci_private {
57 unsigned char *fake_config[MAX_PCI_DEVICES][MAX_PCI_FUNCTIONS];
58 struct celleb_pci_resource *res[MAX_PCI_DEVICES][MAX_PCI_FUNCTIONS];
59};
60
61static inline u8 celleb_fake_config_readb(void *addr)
62{
63 u8 *p = addr;
64 return *p;
65}
66
67static inline u16 celleb_fake_config_readw(void *addr)
68{
69 __le16 *p = addr;
70 return le16_to_cpu(*p);
71}
72
73static inline u32 celleb_fake_config_readl(void *addr)
74{
75 __le32 *p = addr;
76 return le32_to_cpu(*p);
77}
78
79static inline void celleb_fake_config_writeb(u32 val, void *addr)
80{
81 u8 *p = addr;
82 *p = val;
83}
84
85static inline void celleb_fake_config_writew(u32 val, void *addr)
86{
87 __le16 val16;
88 __le16 *p = addr;
89 val16 = cpu_to_le16(val);
90 *p = val16;
91}
92
93static inline void celleb_fake_config_writel(u32 val, void *addr)
94{
95 __le32 val32;
96 __le32 *p = addr;
97 val32 = cpu_to_le32(val);
98 *p = val32;
99}
100
101static unsigned char *get_fake_config_start(struct pci_controller *hose,
102 int devno, int fn)
103{
104 struct celleb_pci_private *private = hose->private_data;
105
106 if (private == NULL)
107 return NULL;
108
109 return private->fake_config[devno][fn];
110}
111
112static struct celleb_pci_resource *get_resource_start(
113 struct pci_controller *hose,
114 int devno, int fn)
115{
116 struct celleb_pci_private *private = hose->private_data;
117
118 if (private == NULL)
119 return NULL;
120
121 return private->res[devno][fn];
122}
123
124
125static void celleb_config_read_fake(unsigned char *config, int where,
126 int size, u32 *val)
127{
128 char *p = config + where;
129
130 switch (size) {
131 case 1:
132 *val = celleb_fake_config_readb(p);
133 break;
134 case 2:
135 *val = celleb_fake_config_readw(p);
136 break;
137 case 4:
138 *val = celleb_fake_config_readl(p);
139 break;
140 }
141}
142
143static void celleb_config_write_fake(unsigned char *config, int where,
144 int size, u32 val)
145{
146 char *p = config + where;
147
148 switch (size) {
149 case 1:
150 celleb_fake_config_writeb(val, p);
151 break;
152 case 2:
153 celleb_fake_config_writew(val, p);
154 break;
155 case 4:
156 celleb_fake_config_writel(val, p);
157 break;
158 }
159}
160
161static int celleb_fake_pci_read_config(struct pci_bus *bus,
162 unsigned int devfn, int where, int size, u32 *val)
163{
164 char *config;
165 struct pci_controller *hose = pci_bus_to_host(bus);
166 unsigned int devno = devfn >> 3;
167 unsigned int fn = devfn & 0x7;
168
169 /* allignment check */
170 BUG_ON(where % size);
171
172 pr_debug(" fake read: bus=0x%x, ", bus->number);
173 config = get_fake_config_start(hose, devno, fn);
174
175 pr_debug("devno=0x%x, where=0x%x, size=0x%x, ", devno, where, size);
176 if (!config) {
177 pr_debug("failed\n");
178 return PCIBIOS_DEVICE_NOT_FOUND;
179 }
180
181 celleb_config_read_fake(config, where, size, val);
182 pr_debug("val=0x%x\n", *val);
183
184 return PCIBIOS_SUCCESSFUL;
185}
186
187
188static int celleb_fake_pci_write_config(struct pci_bus *bus,
189 unsigned int devfn, int where, int size, u32 val)
190{
191 char *config;
192 struct pci_controller *hose = pci_bus_to_host(bus);
193 struct celleb_pci_resource *res;
194 unsigned int devno = devfn >> 3;
195 unsigned int fn = devfn & 0x7;
196
197 /* allignment check */
198 BUG_ON(where % size);
199
200 config = get_fake_config_start(hose, devno, fn);
201
202 if (!config)
203 return PCIBIOS_DEVICE_NOT_FOUND;
204
205 if (val == ~0) {
206 int i = (where - PCI_BASE_ADDRESS_0) >> 3;
207
208 switch (where) {
209 case PCI_BASE_ADDRESS_0:
210 case PCI_BASE_ADDRESS_2:
211 if (size != 4)
212 return PCIBIOS_DEVICE_NOT_FOUND;
213 res = get_resource_start(hose, devno, fn);
214 if (!res)
215 return PCIBIOS_DEVICE_NOT_FOUND;
216 celleb_config_write_fake(config, where, size,
217 (res->r[i].end - res->r[i].start));
218 return PCIBIOS_SUCCESSFUL;
219 case PCI_BASE_ADDRESS_1:
220 case PCI_BASE_ADDRESS_3:
221 case PCI_BASE_ADDRESS_4:
222 case PCI_BASE_ADDRESS_5:
223 break;
224 default:
225 break;
226 }
227 }
228
229 celleb_config_write_fake(config, where, size, val);
230 pr_debug(" fake write: where=%x, size=%d, val=%x\n",
231 where, size, val);
232
233 return PCIBIOS_SUCCESSFUL;
234}
235
236static struct pci_ops celleb_fake_pci_ops = {
237 .read = celleb_fake_pci_read_config,
238 .write = celleb_fake_pci_write_config,
239};
240
241static inline void celleb_setup_pci_base_addrs(struct pci_controller *hose,
242 unsigned int devno, unsigned int fn,
243 unsigned int num_base_addr)
244{
245 u32 val;
246 unsigned char *config;
247 struct celleb_pci_resource *res;
248
249 config = get_fake_config_start(hose, devno, fn);
250 res = get_resource_start(hose, devno, fn);
251
252 if (!config || !res)
253 return;
254
255 switch (num_base_addr) {
256 case 3:
257 val = (res->r[2].start & 0xfffffff0)
258 | PCI_BASE_ADDRESS_MEM_TYPE_64;
259 celleb_config_write_fake(config, PCI_BASE_ADDRESS_4, 4, val);
260 val = res->r[2].start >> 32;
261 celleb_config_write_fake(config, PCI_BASE_ADDRESS_5, 4, val);
262 /* FALLTHROUGH */
263 case 2:
264 val = (res->r[1].start & 0xfffffff0)
265 | PCI_BASE_ADDRESS_MEM_TYPE_64;
266 celleb_config_write_fake(config, PCI_BASE_ADDRESS_2, 4, val);
267 val = res->r[1].start >> 32;
268 celleb_config_write_fake(config, PCI_BASE_ADDRESS_3, 4, val);
269 /* FALLTHROUGH */
270 case 1:
271 val = (res->r[0].start & 0xfffffff0)
272 | PCI_BASE_ADDRESS_MEM_TYPE_64;
273 celleb_config_write_fake(config, PCI_BASE_ADDRESS_0, 4, val);
274 val = res->r[0].start >> 32;
275 celleb_config_write_fake(config, PCI_BASE_ADDRESS_1, 4, val);
276 break;
277 }
278
279 val = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
280 celleb_config_write_fake(config, PCI_COMMAND, 2, val);
281}
282
283static int __init celleb_setup_fake_pci_device(struct device_node *node,
284 struct pci_controller *hose)
285{
286 unsigned int rlen;
287 int num_base_addr = 0;
288 u32 val;
289 const u32 *wi0, *wi1, *wi2, *wi3, *wi4;
290 unsigned int devno, fn;
291 struct celleb_pci_private *private = hose->private_data;
292 unsigned char **config = NULL;
293 struct celleb_pci_resource **res = NULL;
294 const char *name;
295 const unsigned long *li;
296 int size, result;
297
298 if (private == NULL) {
299 printk(KERN_ERR "PCI: "
300 "memory space for pci controller is not assigned\n");
301 goto error;
302 }
303
304 name = of_get_property(node, "model", &rlen);
305 if (!name) {
306 printk(KERN_ERR "PCI: model property not found.\n");
307 goto error;
308 }
309
310 wi4 = of_get_property(node, "reg", &rlen);
311 if (wi4 == NULL)
312 goto error;
313
314 devno = ((wi4[0] >> 8) & 0xff) >> 3;
315 fn = (wi4[0] >> 8) & 0x7;
316
317 pr_debug("PCI: celleb_setup_fake_pci() %s devno=%x fn=%x\n", name,
318 devno, fn);
319
320 size = 256;
321 config = &private->fake_config[devno][fn];
322 *config = zalloc_maybe_bootmem(size, GFP_KERNEL);
323 if (*config == NULL) {
324 printk(KERN_ERR "PCI: "
325 "not enough memory for fake configuration space\n");
326 goto error;
327 }
328 pr_debug("PCI: fake config area assigned 0x%016lx\n",
329 (unsigned long)*config);
330
331 size = sizeof(struct celleb_pci_resource);
332 res = &private->res[devno][fn];
333 *res = zalloc_maybe_bootmem(size, GFP_KERNEL);
334 if (*res == NULL) {
335 printk(KERN_ERR
336 "PCI: not enough memory for resource data space\n");
337 goto error;
338 }
339 pr_debug("PCI: res assigned 0x%016lx\n", (unsigned long)*res);
340
341 wi0 = of_get_property(node, "device-id", NULL);
342 wi1 = of_get_property(node, "vendor-id", NULL);
343 wi2 = of_get_property(node, "class-code", NULL);
344 wi3 = of_get_property(node, "revision-id", NULL);
345 if (!wi0 || !wi1 || !wi2 || !wi3) {
346 printk(KERN_ERR "PCI: Missing device tree properties.\n");
347 goto error;
348 }
349
350 celleb_config_write_fake(*config, PCI_DEVICE_ID, 2, wi0[0] & 0xffff);
351 celleb_config_write_fake(*config, PCI_VENDOR_ID, 2, wi1[0] & 0xffff);
352 pr_debug("class-code = 0x%08x\n", wi2[0]);
353
354 celleb_config_write_fake(*config, PCI_CLASS_PROG, 1, wi2[0] & 0xff);
355 celleb_config_write_fake(*config, PCI_CLASS_DEVICE, 2,
356 (wi2[0] >> 8) & 0xffff);
357 celleb_config_write_fake(*config, PCI_REVISION_ID, 1, wi3[0]);
358
359 while (num_base_addr < MAX_PCI_BASE_ADDRS) {
360 result = of_address_to_resource(node,
361 num_base_addr, &(*res)->r[num_base_addr]);
362 if (result)
363 break;
364 num_base_addr++;
365 }
366
367 celleb_setup_pci_base_addrs(hose, devno, fn, num_base_addr);
368
369 li = of_get_property(node, "interrupts", &rlen);
370 if (!li) {
371 printk(KERN_ERR "PCI: interrupts not found.\n");
372 goto error;
373 }
374 val = li[0];
375 celleb_config_write_fake(*config, PCI_INTERRUPT_PIN, 1, 1);
376 celleb_config_write_fake(*config, PCI_INTERRUPT_LINE, 1, val);
377
378#ifdef DEBUG
379 pr_debug("PCI: %s irq=%ld\n", name, li[0]);
380 for (i = 0; i < 6; i++) {
381 celleb_config_read_fake(*config,
382 PCI_BASE_ADDRESS_0 + 0x4 * i, 4,
383 &val);
384 pr_debug("PCI: %s fn=%d base_address_%d=0x%x\n",
385 name, fn, i, val);
386 }
387#endif
388
389 celleb_config_write_fake(*config, PCI_HEADER_TYPE, 1,
390 PCI_HEADER_TYPE_NORMAL);
391
392 return 0;
393
394error:
395 if (mem_init_done) {
396 if (config && *config)
397 kfree(*config);
398 if (res && *res)
399 kfree(*res);
400
401 } else {
402 if (config && *config) {
403 size = 256;
404 memblock_free(__pa(*config), size);
405 }
406 if (res && *res) {
407 size = sizeof(struct celleb_pci_resource);
408 memblock_free(__pa(*res), size);
409 }
410 }
411
412 return 1;
413}
414
415static int __init phb_set_bus_ranges(struct device_node *dev,
416 struct pci_controller *phb)
417{
418 const int *bus_range;
419 unsigned int len;
420
421 bus_range = of_get_property(dev, "bus-range", &len);
422 if (bus_range == NULL || len < 2 * sizeof(int))
423 return 1;
424
425 phb->first_busno = bus_range[0];
426 phb->last_busno = bus_range[1];
427
428 return 0;
429}
430
431static void __init celleb_alloc_private_mem(struct pci_controller *hose)
432{
433 hose->private_data =
434 zalloc_maybe_bootmem(sizeof(struct celleb_pci_private),
435 GFP_KERNEL);
436}
437
438static int __init celleb_setup_fake_pci(struct device_node *dev,
439 struct pci_controller *phb)
440{
441 struct device_node *node;
442
443 phb->ops = &celleb_fake_pci_ops;
444 celleb_alloc_private_mem(phb);
445
446 for (node = of_get_next_child(dev, NULL);
447 node != NULL; node = of_get_next_child(dev, node))
448 celleb_setup_fake_pci_device(node, phb);
449
450 return 0;
451}
452
453static struct celleb_phb_spec celleb_fake_pci_spec __initdata = {
454 .setup = celleb_setup_fake_pci,
455};
456
457static const struct of_device_id celleb_phb_match[] __initconst = {
458 {
459 .name = "pci-pseudo",
460 .data = &celleb_fake_pci_spec,
461 }, {
462 .name = "epci",
463 .data = &celleb_epci_spec,
464 }, {
465 .name = "pcie",
466 .data = &celleb_pciex_spec,
467 }, {
468 },
469};
470
471int __init celleb_setup_phb(struct pci_controller *phb)
472{
473 struct device_node *dev = phb->dn;
474 const struct of_device_id *match;
475 const struct celleb_phb_spec *phb_spec;
476 int rc;
477
478 match = of_match_node(celleb_phb_match, dev);
479 if (!match)
480 return 1;
481
482 phb_set_bus_ranges(dev, phb);
483 phb->buid = 1;
484
485 phb_spec = match->data;
486 rc = (*phb_spec->setup)(dev, phb);
487 if (rc)
488 return 1;
489
490 if (phb_spec->ops)
491 iowa_register_bus(phb, phb_spec->ops,
492 phb_spec->iowa_init,
493 phb_spec->iowa_data);
494 return 0;
495}
496
497int celleb_pci_probe_mode(struct pci_bus *bus)
498{
499 return PCI_PROBE_DEVTREE;
500}
diff --git a/arch/powerpc/platforms/cell/celleb_pci.h b/arch/powerpc/platforms/cell/celleb_pci.h
deleted file mode 100644
index a801fcc5f389..000000000000
--- a/arch/powerpc/platforms/cell/celleb_pci.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * pci prototypes for Celleb platform
3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#ifndef _CELLEB_PCI_H
22#define _CELLEB_PCI_H
23
24#include <linux/pci.h>
25
26#include <asm/pci-bridge.h>
27#include <asm/prom.h>
28#include <asm/ppc-pci.h>
29#include <asm/io-workarounds.h>
30
31struct iowa_bus;
32
33struct celleb_phb_spec {
34 int (*setup)(struct device_node *, struct pci_controller *);
35 struct ppc_pci_io *ops;
36 int (*iowa_init)(struct iowa_bus *, void *);
37 void *iowa_data;
38};
39
40extern int celleb_setup_phb(struct pci_controller *);
41extern int celleb_pci_probe_mode(struct pci_bus *);
42
43extern struct celleb_phb_spec celleb_epci_spec;
44extern struct celleb_phb_spec celleb_pciex_spec;
45
46#endif /* _CELLEB_PCI_H */
diff --git a/arch/powerpc/platforms/cell/celleb_scc.h b/arch/powerpc/platforms/cell/celleb_scc.h
deleted file mode 100644
index b596a711c348..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc.h
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 * SCC (Super Companion Chip) definitions
3 *
4 * (C) Copyright 2004-2006 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#ifndef _CELLEB_SCC_H
22#define _CELLEB_SCC_H
23
24#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
25#define PCI_DEVICE_ID_TOSHIBA_SCC_PCIEXC_BRIDGE 0x01b0
26#define PCI_DEVICE_ID_TOSHIBA_SCC_EPCI_BRIDGE 0x01b1
27#define PCI_DEVICE_ID_TOSHIBA_SCC_BRIDGE 0x01b2
28#define PCI_DEVICE_ID_TOSHIBA_SCC_GBE 0x01b3
29#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
30#define PCI_DEVICE_ID_TOSHIBA_SCC_USB2 0x01b5
31#define PCI_DEVICE_ID_TOSHIBA_SCC_USB 0x01b6
32#define PCI_DEVICE_ID_TOSHIBA_SCC_ENCDEC 0x01b7
33
34#define SCC_EPCI_REG 0x0000d000
35
36/* EPCI registers */
37#define SCC_EPCI_CNF10_REG 0x010
38#define SCC_EPCI_CNF14_REG 0x014
39#define SCC_EPCI_CNF18_REG 0x018
40#define SCC_EPCI_PVBAT 0x100
41#define SCC_EPCI_VPMBAT 0x104
42#define SCC_EPCI_VPIBAT 0x108
43#define SCC_EPCI_VCSR 0x110
44#define SCC_EPCI_VIENAB 0x114
45#define SCC_EPCI_VISTAT 0x118
46#define SCC_EPCI_VRDCOUNT 0x124
47#define SCC_EPCI_BAM0 0x12c
48#define SCC_EPCI_BAM1 0x134
49#define SCC_EPCI_BAM2 0x13c
50#define SCC_EPCI_IADR 0x164
51#define SCC_EPCI_CLKRST 0x800
52#define SCC_EPCI_INTSET 0x804
53#define SCC_EPCI_STATUS 0x808
54#define SCC_EPCI_ABTSET 0x80c
55#define SCC_EPCI_WATRP 0x810
56#define SCC_EPCI_DUMYRADR 0x814
57#define SCC_EPCI_SWRESP 0x818
58#define SCC_EPCI_CNTOPT 0x81c
59#define SCC_EPCI_ECMODE 0xf00
60#define SCC_EPCI_IOM_AC_NUM 5
61#define SCC_EPCI_IOM_ACTE(n) (0xf10 + (n) * 4)
62#define SCC_EPCI_IOT_AC_NUM 4
63#define SCC_EPCI_IOT_ACTE(n) (0xf30 + (n) * 4)
64#define SCC_EPCI_MAEA 0xf50
65#define SCC_EPCI_MAEC 0xf54
66#define SCC_EPCI_CKCTRL 0xff0
67
68/* bits for SCC_EPCI_VCSR */
69#define SCC_EPCI_VCSR_FRE 0x00020000
70#define SCC_EPCI_VCSR_FWE 0x00010000
71#define SCC_EPCI_VCSR_DR 0x00000400
72#define SCC_EPCI_VCSR_SR 0x00000008
73#define SCC_EPCI_VCSR_AT 0x00000004
74
75/* bits for SCC_EPCI_VIENAB/SCC_EPCI_VISTAT */
76#define SCC_EPCI_VISTAT_PMPE 0x00000008
77#define SCC_EPCI_VISTAT_PMFE 0x00000004
78#define SCC_EPCI_VISTAT_PRA 0x00000002
79#define SCC_EPCI_VISTAT_PRD 0x00000001
80#define SCC_EPCI_VISTAT_ALL 0x0000000f
81
82#define SCC_EPCI_VIENAB_PMPEE 0x00000008
83#define SCC_EPCI_VIENAB_PMFEE 0x00000004
84#define SCC_EPCI_VIENAB_PRA 0x00000002
85#define SCC_EPCI_VIENAB_PRD 0x00000001
86#define SCC_EPCI_VIENAB_ALL 0x0000000f
87
88/* bits for SCC_EPCI_CLKRST */
89#define SCC_EPCI_CLKRST_CKS_MASK 0x00030000
90#define SCC_EPCI_CLKRST_CKS_2 0x00000000
91#define SCC_EPCI_CLKRST_CKS_4 0x00010000
92#define SCC_EPCI_CLKRST_CKS_8 0x00020000
93#define SCC_EPCI_CLKRST_PCICRST 0x00000400
94#define SCC_EPCI_CLKRST_BC 0x00000200
95#define SCC_EPCI_CLKRST_PCIRST 0x00000100
96#define SCC_EPCI_CLKRST_PCKEN 0x00000001
97
98/* bits for SCC_EPCI_INTSET/SCC_EPCI_STATUS */
99#define SCC_EPCI_INT_2M 0x01000000
100#define SCC_EPCI_INT_RERR 0x00200000
101#define SCC_EPCI_INT_SERR 0x00100000
102#define SCC_EPCI_INT_PRTER 0x00080000
103#define SCC_EPCI_INT_SER 0x00040000
104#define SCC_EPCI_INT_PER 0x00020000
105#define SCC_EPCI_INT_PAI 0x00010000
106#define SCC_EPCI_INT_1M 0x00000100
107#define SCC_EPCI_INT_PME 0x00000010
108#define SCC_EPCI_INT_INTD 0x00000008
109#define SCC_EPCI_INT_INTC 0x00000004
110#define SCC_EPCI_INT_INTB 0x00000002
111#define SCC_EPCI_INT_INTA 0x00000001
112#define SCC_EPCI_INT_DEVINT 0x0000000f
113#define SCC_EPCI_INT_ALL 0x003f001f
114#define SCC_EPCI_INT_ALLERR 0x003f0000
115
116/* bits for SCC_EPCI_CKCTRL */
117#define SCC_EPCI_CKCTRL_CRST0 0x00010000
118#define SCC_EPCI_CKCTRL_CRST1 0x00020000
119#define SCC_EPCI_CKCTRL_OCLKEN 0x00000100
120#define SCC_EPCI_CKCTRL_LCLKEN 0x00000001
121
122#define SCC_EPCI_IDSEL_AD_TO_SLOT(ad) ((ad) - 10)
123#define SCC_EPCI_MAX_DEVNU SCC_EPCI_IDSEL_AD_TO_SLOT(32)
124
125/* bits for SCC_EPCI_CNTOPT */
126#define SCC_EPCI_CNTOPT_O2PMB 0x00000002
127
128/* SCC PCIEXC SMMIO registers */
129#define PEXCADRS 0x000
130#define PEXCWDATA 0x004
131#define PEXCRDATA 0x008
132#define PEXDADRS 0x010
133#define PEXDCMND 0x014
134#define PEXDWDATA 0x018
135#define PEXDRDATA 0x01c
136#define PEXREQID 0x020
137#define PEXTIDMAP 0x024
138#define PEXINTMASK 0x028
139#define PEXINTSTS 0x02c
140#define PEXAERRMASK 0x030
141#define PEXAERRSTS 0x034
142#define PEXPRERRMASK 0x040
143#define PEXPRERRSTS 0x044
144#define PEXPRERRID01 0x048
145#define PEXPRERRID23 0x04c
146#define PEXVDMASK 0x050
147#define PEXVDSTS 0x054
148#define PEXRCVCPLIDA 0x060
149#define PEXLENERRIDA 0x068
150#define PEXPHYPLLST 0x070
151#define PEXDMRDEN0 0x100
152#define PEXDMRDADR0 0x104
153#define PEXDMRDENX 0x110
154#define PEXDMRDADRX 0x114
155#define PEXECMODE 0xf00
156#define PEXMAEA(n) (0xf50 + (8 * n))
157#define PEXMAEC(n) (0xf54 + (8 * n))
158#define PEXCCRCTRL 0xff0
159
160/* SCC PCIEXC bits and shifts for PEXCADRS */
161#define PEXCADRS_BYTE_EN_SHIFT 20
162#define PEXCADRS_CMD_SHIFT 16
163#define PEXCADRS_CMD_READ (0xa << PEXCADRS_CMD_SHIFT)
164#define PEXCADRS_CMD_WRITE (0xb << PEXCADRS_CMD_SHIFT)
165
166/* SCC PCIEXC shifts for PEXDADRS */
167#define PEXDADRS_BUSNO_SHIFT 20
168#define PEXDADRS_DEVNO_SHIFT 15
169#define PEXDADRS_FUNCNO_SHIFT 12
170
171/* SCC PCIEXC bits and shifts for PEXDCMND */
172#define PEXDCMND_BYTE_EN_SHIFT 4
173#define PEXDCMND_IO_READ 0x2
174#define PEXDCMND_IO_WRITE 0x3
175#define PEXDCMND_CONFIG_READ 0xa
176#define PEXDCMND_CONFIG_WRITE 0xb
177
178/* SCC PCIEXC bits for PEXPHYPLLST */
179#define PEXPHYPLLST_PEXPHYAPLLST 0x00000001
180
181/* SCC PCIEXC bits for PEXECMODE */
182#define PEXECMODE_ALL_THROUGH 0x00000000
183#define PEXECMODE_ALL_8BIT 0x00550155
184#define PEXECMODE_ALL_16BIT 0x00aa02aa
185
186/* SCC PCIEXC bits for PEXCCRCTRL */
187#define PEXCCRCTRL_PEXIPCOREEN 0x00040000
188#define PEXCCRCTRL_PEXIPCONTEN 0x00020000
189#define PEXCCRCTRL_PEXPHYPLLEN 0x00010000
190#define PEXCCRCTRL_PCIEXCAOCKEN 0x00000100
191
192/* SCC PCIEXC port configuration registers */
193#define PEXTCERRCHK 0x21c
194#define PEXTAMAPB0 0x220
195#define PEXTAMAPL0 0x224
196#define PEXTAMAPB(n) (PEXTAMAPB0 + 8 * (n))
197#define PEXTAMAPL(n) (PEXTAMAPL0 + 8 * (n))
198#define PEXCHVC0P 0x500
199#define PEXCHVC0NP 0x504
200#define PEXCHVC0C 0x508
201#define PEXCDVC0P 0x50c
202#define PEXCDVC0NP 0x510
203#define PEXCDVC0C 0x514
204#define PEXCHVCXP 0x518
205#define PEXCHVCXNP 0x51c
206#define PEXCHVCXC 0x520
207#define PEXCDVCXP 0x524
208#define PEXCDVCXNP 0x528
209#define PEXCDVCXC 0x52c
210#define PEXCTTRG 0x530
211#define PEXTSCTRL 0x700
212#define PEXTSSTS 0x704
213#define PEXSKPCTRL 0x708
214
215/* UHC registers */
216#define SCC_UHC_CKRCTRL 0xff0
217#define SCC_UHC_ECMODE 0xf00
218
219/* bits for SCC_UHC_CKRCTRL */
220#define SCC_UHC_F48MCKLEN 0x00000001
221#define SCC_UHC_P_SUSPEND 0x00000002
222#define SCC_UHC_PHY_SUSPEND_SEL 0x00000004
223#define SCC_UHC_HCLKEN 0x00000100
224#define SCC_UHC_USBEN 0x00010000
225#define SCC_UHC_USBCEN 0x00020000
226#define SCC_UHC_PHYEN 0x00040000
227
228/* bits for SCC_UHC_ECMODE */
229#define SCC_UHC_ECMODE_BY_BYTE 0x00000555
230#define SCC_UHC_ECMODE_BY_WORD 0x00000aaa
231
232#endif /* _CELLEB_SCC_H */
diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
deleted file mode 100644
index 9438bbed402f..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_epci.c
+++ /dev/null
@@ -1,428 +0,0 @@
1/*
2 * Support for SCC external PCI
3 *
4 * (C) Copyright 2004-2007 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#undef DEBUG
22
23#include <linux/kernel.h>
24#include <linux/threads.h>
25#include <linux/pci.h>
26#include <linux/init.h>
27#include <linux/pci_regs.h>
28
29#include <asm/io.h>
30#include <asm/irq.h>
31#include <asm/prom.h>
32#include <asm/pci-bridge.h>
33#include <asm/ppc-pci.h>
34
35#include "celleb_scc.h"
36#include "celleb_pci.h"
37
38#define MAX_PCI_DEVICES 32
39#define MAX_PCI_FUNCTIONS 8
40
41#define iob() __asm__ __volatile__("eieio; sync":::"memory")
42
43static inline PCI_IO_ADDR celleb_epci_get_epci_base(
44 struct pci_controller *hose)
45{
46 /*
47 * Note:
48 * Celleb epci uses cfg_addr as a base address for
49 * epci control registers.
50 */
51
52 return hose->cfg_addr;
53}
54
55static inline PCI_IO_ADDR celleb_epci_get_epci_cfg(
56 struct pci_controller *hose)
57{
58 /*
59 * Note:
60 * Celleb epci uses cfg_data as a base address for
61 * configuration area for epci devices.
62 */
63
64 return hose->cfg_data;
65}
66
67static inline void clear_and_disable_master_abort_interrupt(
68 struct pci_controller *hose)
69{
70 PCI_IO_ADDR epci_base;
71 PCI_IO_ADDR reg;
72 epci_base = celleb_epci_get_epci_base(hose);
73 reg = epci_base + PCI_COMMAND;
74 out_be32(reg, in_be32(reg) | (PCI_STATUS_REC_MASTER_ABORT << 16));
75}
76
77static int celleb_epci_check_abort(struct pci_controller *hose,
78 PCI_IO_ADDR addr)
79{
80 PCI_IO_ADDR reg;
81 PCI_IO_ADDR epci_base;
82 u32 val;
83
84 iob();
85 epci_base = celleb_epci_get_epci_base(hose);
86
87 reg = epci_base + PCI_COMMAND;
88 val = in_be32(reg);
89
90 if (val & (PCI_STATUS_REC_MASTER_ABORT << 16)) {
91 out_be32(reg,
92 (val & 0xffff) | (PCI_STATUS_REC_MASTER_ABORT << 16));
93
94 /* clear PCI Controller error, FRE, PMFE */
95 reg = epci_base + SCC_EPCI_STATUS;
96 out_be32(reg, SCC_EPCI_INT_PAI);
97
98 reg = epci_base + SCC_EPCI_VCSR;
99 val = in_be32(reg) & 0xffff;
100 val |= SCC_EPCI_VCSR_FRE;
101 out_be32(reg, val);
102
103 reg = epci_base + SCC_EPCI_VISTAT;
104 out_be32(reg, SCC_EPCI_VISTAT_PMFE);
105 return PCIBIOS_DEVICE_NOT_FOUND;
106 }
107
108 return PCIBIOS_SUCCESSFUL;
109}
110
111static PCI_IO_ADDR celleb_epci_make_config_addr(struct pci_bus *bus,
112 struct pci_controller *hose, unsigned int devfn, int where)
113{
114 PCI_IO_ADDR addr;
115
116 if (bus != hose->bus)
117 addr = celleb_epci_get_epci_cfg(hose) +
118 (((bus->number & 0xff) << 16)
119 | ((devfn & 0xff) << 8)
120 | (where & 0xff)
121 | 0x01000000);
122 else
123 addr = celleb_epci_get_epci_cfg(hose) +
124 (((devfn & 0xff) << 8) | (where & 0xff));
125
126 pr_debug("EPCI: config_addr = 0x%p\n", addr);
127
128 return addr;
129}
130
131static int celleb_epci_read_config(struct pci_bus *bus,
132 unsigned int devfn, int where, int size, u32 *val)
133{
134 PCI_IO_ADDR epci_base;
135 PCI_IO_ADDR addr;
136 struct pci_controller *hose = pci_bus_to_host(bus);
137
138 /* allignment check */
139 BUG_ON(where % size);
140
141 if (!celleb_epci_get_epci_cfg(hose))
142 return PCIBIOS_DEVICE_NOT_FOUND;
143
144 if (bus->number == hose->first_busno && devfn == 0) {
145 /* EPCI controller self */
146
147 epci_base = celleb_epci_get_epci_base(hose);
148 addr = epci_base + where;
149
150 switch (size) {
151 case 1:
152 *val = in_8(addr);
153 break;
154 case 2:
155 *val = in_be16(addr);
156 break;
157 case 4:
158 *val = in_be32(addr);
159 break;
160 default:
161 return PCIBIOS_DEVICE_NOT_FOUND;
162 }
163
164 } else {
165
166 clear_and_disable_master_abort_interrupt(hose);
167 addr = celleb_epci_make_config_addr(bus, hose, devfn, where);
168
169 switch (size) {
170 case 1:
171 *val = in_8(addr);
172 break;
173 case 2:
174 *val = in_le16(addr);
175 break;
176 case 4:
177 *val = in_le32(addr);
178 break;
179 default:
180 return PCIBIOS_DEVICE_NOT_FOUND;
181 }
182 }
183
184 pr_debug("EPCI: "
185 "addr=0x%p, devfn=0x%x, where=0x%x, size=0x%x, val=0x%x\n",
186 addr, devfn, where, size, *val);
187
188 return celleb_epci_check_abort(hose, NULL);
189}
190
191static int celleb_epci_write_config(struct pci_bus *bus,
192 unsigned int devfn, int where, int size, u32 val)
193{
194 PCI_IO_ADDR epci_base;
195 PCI_IO_ADDR addr;
196 struct pci_controller *hose = pci_bus_to_host(bus);
197
198 /* allignment check */
199 BUG_ON(where % size);
200
201 if (!celleb_epci_get_epci_cfg(hose))
202 return PCIBIOS_DEVICE_NOT_FOUND;
203
204 if (bus->number == hose->first_busno && devfn == 0) {
205 /* EPCI controller self */
206
207 epci_base = celleb_epci_get_epci_base(hose);
208 addr = epci_base + where;
209
210 switch (size) {
211 case 1:
212 out_8(addr, val);
213 break;
214 case 2:
215 out_be16(addr, val);
216 break;
217 case 4:
218 out_be32(addr, val);
219 break;
220 default:
221 return PCIBIOS_DEVICE_NOT_FOUND;
222 }
223
224 } else {
225
226 clear_and_disable_master_abort_interrupt(hose);
227 addr = celleb_epci_make_config_addr(bus, hose, devfn, where);
228
229 switch (size) {
230 case 1:
231 out_8(addr, val);
232 break;
233 case 2:
234 out_le16(addr, val);
235 break;
236 case 4:
237 out_le32(addr, val);
238 break;
239 default:
240 return PCIBIOS_DEVICE_NOT_FOUND;
241 }
242 }
243
244 return celleb_epci_check_abort(hose, addr);
245}
246
247struct pci_ops celleb_epci_ops = {
248 .read = celleb_epci_read_config,
249 .write = celleb_epci_write_config,
250};
251
252/* to be moved in FW */
253static int __init celleb_epci_init(struct pci_controller *hose)
254{
255 u32 val;
256 PCI_IO_ADDR reg;
257 PCI_IO_ADDR epci_base;
258 int hwres = 0;
259
260 epci_base = celleb_epci_get_epci_base(hose);
261
262 /* PCI core reset(Internal bus and PCI clock) */
263 reg = epci_base + SCC_EPCI_CKCTRL;
264 val = in_be32(reg);
265 if (val == 0x00030101)
266 hwres = 1;
267 else {
268 val &= ~(SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1);
269 out_be32(reg, val);
270
271 /* set PCI core clock */
272 val = in_be32(reg);
273 val |= (SCC_EPCI_CKCTRL_OCLKEN | SCC_EPCI_CKCTRL_LCLKEN);
274 out_be32(reg, val);
275
276 /* release PCI core reset (internal bus) */
277 val = in_be32(reg);
278 val |= SCC_EPCI_CKCTRL_CRST0;
279 out_be32(reg, val);
280
281 /* set PCI clock select */
282 reg = epci_base + SCC_EPCI_CLKRST;
283 val = in_be32(reg);
284 val &= ~SCC_EPCI_CLKRST_CKS_MASK;
285 val |= SCC_EPCI_CLKRST_CKS_2;
286 out_be32(reg, val);
287
288 /* set arbiter */
289 reg = epci_base + SCC_EPCI_ABTSET;
290 out_be32(reg, 0x0f1f001f); /* temporary value */
291
292 /* buffer on */
293 reg = epci_base + SCC_EPCI_CLKRST;
294 val = in_be32(reg);
295 val |= SCC_EPCI_CLKRST_BC;
296 out_be32(reg, val);
297
298 /* PCI clock enable */
299 val = in_be32(reg);
300 val |= SCC_EPCI_CLKRST_PCKEN;
301 out_be32(reg, val);
302
303 /* release PCI core reset (all) */
304 reg = epci_base + SCC_EPCI_CKCTRL;
305 val = in_be32(reg);
306 val |= (SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1);
307 out_be32(reg, val);
308
309 /* set base translation registers. (already set by Beat) */
310
311 /* set base address masks. (already set by Beat) */
312 }
313
314 /* release interrupt masks and clear all interrupts */
315 reg = epci_base + SCC_EPCI_INTSET;
316 out_be32(reg, 0x013f011f); /* all interrupts enable */
317 reg = epci_base + SCC_EPCI_VIENAB;
318 val = SCC_EPCI_VIENAB_PMPEE | SCC_EPCI_VIENAB_PMFEE;
319 out_be32(reg, val);
320 reg = epci_base + SCC_EPCI_STATUS;
321 out_be32(reg, 0xffffffff);
322 reg = epci_base + SCC_EPCI_VISTAT;
323 out_be32(reg, 0xffffffff);
324
325 /* disable PCI->IB address translation */
326 reg = epci_base + SCC_EPCI_VCSR;
327 val = in_be32(reg);
328 val &= ~(SCC_EPCI_VCSR_DR | SCC_EPCI_VCSR_AT);
329 out_be32(reg, val);
330
331 /* set base addresses. (no need to set?) */
332
333 /* memory space, bus master enable */
334 reg = epci_base + PCI_COMMAND;
335 val = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
336 out_be32(reg, val);
337
338 /* endian mode setup */
339 reg = epci_base + SCC_EPCI_ECMODE;
340 val = 0x00550155;
341 out_be32(reg, val);
342
343 /* set control option */
344 reg = epci_base + SCC_EPCI_CNTOPT;
345 val = in_be32(reg);
346 val |= SCC_EPCI_CNTOPT_O2PMB;
347 out_be32(reg, val);
348
349 /* XXX: temporay: set registers for address conversion setup */
350 reg = epci_base + SCC_EPCI_CNF10_REG;
351 out_be32(reg, 0x80000008);
352 reg = epci_base + SCC_EPCI_CNF14_REG;
353 out_be32(reg, 0x40000008);
354
355 reg = epci_base + SCC_EPCI_BAM0;
356 out_be32(reg, 0x80000000);
357 reg = epci_base + SCC_EPCI_BAM1;
358 out_be32(reg, 0xe0000000);
359
360 reg = epci_base + SCC_EPCI_PVBAT;
361 out_be32(reg, 0x80000000);
362
363 if (!hwres) {
364 /* release external PCI reset */
365 reg = epci_base + SCC_EPCI_CLKRST;
366 val = in_be32(reg);
367 val |= SCC_EPCI_CLKRST_PCIRST;
368 out_be32(reg, val);
369 }
370
371 return 0;
372}
373
374static int __init celleb_setup_epci(struct device_node *node,
375 struct pci_controller *hose)
376{
377 struct resource r;
378
379 pr_debug("PCI: celleb_setup_epci()\n");
380
381 /*
382 * Note:
383 * Celleb epci uses cfg_addr and cfg_data member of
384 * pci_controller structure in irregular way.
385 *
386 * cfg_addr is used to map for control registers of
387 * celleb epci.
388 *
389 * cfg_data is used for configuration area of devices
390 * on Celleb epci buses.
391 */
392
393 if (of_address_to_resource(node, 0, &r))
394 goto error;
395 hose->cfg_addr = ioremap(r.start, resource_size(&r));
396 if (!hose->cfg_addr)
397 goto error;
398 pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n",
399 r.start, (unsigned long)hose->cfg_addr, resource_size(&r));
400
401 if (of_address_to_resource(node, 2, &r))
402 goto error;
403 hose->cfg_data = ioremap(r.start, resource_size(&r));
404 if (!hose->cfg_data)
405 goto error;
406 pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n",
407 r.start, (unsigned long)hose->cfg_data, resource_size(&r));
408
409 hose->ops = &celleb_epci_ops;
410 celleb_epci_init(hose);
411
412 return 0;
413
414error:
415 if (hose->cfg_addr)
416 iounmap(hose->cfg_addr);
417
418 if (hose->cfg_data)
419 iounmap(hose->cfg_data);
420 return 1;
421}
422
423struct celleb_phb_spec celleb_epci_spec __initdata = {
424 .setup = celleb_setup_epci,
425 .ops = &spiderpci_ops,
426 .iowa_init = &spiderpci_iowa_init,
427 .iowa_data = (void *)0,
428};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
deleted file mode 100644
index 94170e4f2ce7..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ /dev/null
@@ -1,538 +0,0 @@
1/*
2 * Support for Celleb PCI-Express.
3 *
4 * (C) Copyright 2007-2008 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#undef DEBUG
22
23#include <linux/kernel.h>
24#include <linux/pci.h>
25#include <linux/string.h>
26#include <linux/slab.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30
31#include <asm/io.h>
32#include <asm/irq.h>
33#include <asm/iommu.h>
34#include <asm/byteorder.h>
35
36#include "celleb_scc.h"
37#include "celleb_pci.h"
38
39#define PEX_IN(base, off) in_be32((void __iomem *)(base) + (off))
40#define PEX_OUT(base, off, data) out_be32((void __iomem *)(base) + (off), (data))
41
42static void scc_pciex_io_flush(struct iowa_bus *bus)
43{
44 (void)PEX_IN(bus->phb->cfg_addr, PEXDMRDEN0);
45}
46
47/*
48 * Memory space access to device on PCIEX
49 */
50#define PCIEX_MMIO_READ(name, ret) \
51static ret scc_pciex_##name(const PCI_IO_ADDR addr) \
52{ \
53 ret val = __do_##name(addr); \
54 scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
55 return val; \
56}
57
58#define PCIEX_MMIO_READ_STR(name) \
59static void scc_pciex_##name(const PCI_IO_ADDR addr, void *buf, \
60 unsigned long count) \
61{ \
62 __do_##name(addr, buf, count); \
63 scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
64}
65
66PCIEX_MMIO_READ(readb, u8)
67PCIEX_MMIO_READ(readw, u16)
68PCIEX_MMIO_READ(readl, u32)
69PCIEX_MMIO_READ(readq, u64)
70PCIEX_MMIO_READ(readw_be, u16)
71PCIEX_MMIO_READ(readl_be, u32)
72PCIEX_MMIO_READ(readq_be, u64)
73PCIEX_MMIO_READ_STR(readsb)
74PCIEX_MMIO_READ_STR(readsw)
75PCIEX_MMIO_READ_STR(readsl)
76
77static void scc_pciex_memcpy_fromio(void *dest, const PCI_IO_ADDR src,
78 unsigned long n)
79{
80 __do_memcpy_fromio(dest, src, n);
81 scc_pciex_io_flush(iowa_mem_find_bus(src));
82}
83
84/*
85 * I/O port access to devices on PCIEX.
86 */
87
88static inline unsigned long get_bus_address(struct pci_controller *phb,
89 unsigned long port)
90{
91 return port - ((unsigned long)(phb->io_base_virt) - _IO_BASE);
92}
93
94static u32 scc_pciex_read_port(struct pci_controller *phb,
95 unsigned long port, int size)
96{
97 unsigned int byte_enable;
98 unsigned int cmd, shift;
99 unsigned long addr;
100 u32 data, ret;
101
102 BUG_ON(((port & 0x3ul) + size) > 4);
103
104 addr = get_bus_address(phb, port);
105 shift = addr & 0x3ul;
106 byte_enable = ((1 << size) - 1) << shift;
107 cmd = PEXDCMND_IO_READ | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
108 PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
109 PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
110 data = PEX_IN(phb->cfg_addr, PEXDRDATA);
111 ret = (data >> (shift * 8)) & (0xFFFFFFFF >> ((4 - size) * 8));
112
113 pr_debug("PCIEX:PIO READ:port=0x%lx, addr=0x%lx, size=%d, be=%x,"
114 " cmd=%x, data=%x, ret=%x\n", port, addr, size, byte_enable,
115 cmd, data, ret);
116
117 return ret;
118}
119
120static void scc_pciex_write_port(struct pci_controller *phb,
121 unsigned long port, int size, u32 val)
122{
123 unsigned int byte_enable;
124 unsigned int cmd, shift;
125 unsigned long addr;
126 u32 data;
127
128 BUG_ON(((port & 0x3ul) + size) > 4);
129
130 addr = get_bus_address(phb, port);
131 shift = addr & 0x3ul;
132 byte_enable = ((1 << size) - 1) << shift;
133 cmd = PEXDCMND_IO_WRITE | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
134 data = (val & (0xFFFFFFFF >> (4 - size) * 8)) << (shift * 8);
135 PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
136 PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
137 PEX_OUT(phb->cfg_addr, PEXDWDATA, data);
138
139 pr_debug("PCIEX:PIO WRITE:port=0x%lx, addr=%lx, size=%d, val=%x,"
140 " be=%x, cmd=%x, data=%x\n", port, addr, size, val,
141 byte_enable, cmd, data);
142}
143
144static u8 __scc_pciex_inb(struct pci_controller *phb, unsigned long port)
145{
146 return (u8)scc_pciex_read_port(phb, port, 1);
147}
148
149static u16 __scc_pciex_inw(struct pci_controller *phb, unsigned long port)
150{
151 u32 data;
152 if ((port & 0x3ul) < 3)
153 data = scc_pciex_read_port(phb, port, 2);
154 else {
155 u32 d1 = scc_pciex_read_port(phb, port, 1);
156 u32 d2 = scc_pciex_read_port(phb, port + 1, 1);
157 data = d1 | (d2 << 8);
158 }
159 return (u16)data;
160}
161
162static u32 __scc_pciex_inl(struct pci_controller *phb, unsigned long port)
163{
164 unsigned int mod = port & 0x3ul;
165 u32 data;
166 if (mod == 0)
167 data = scc_pciex_read_port(phb, port, 4);
168 else {
169 u32 d1 = scc_pciex_read_port(phb, port, 4 - mod);
170 u32 d2 = scc_pciex_read_port(phb, port + 1, mod);
171 data = d1 | (d2 << (mod * 8));
172 }
173 return data;
174}
175
176static void __scc_pciex_outb(struct pci_controller *phb,
177 u8 val, unsigned long port)
178{
179 scc_pciex_write_port(phb, port, 1, (u32)val);
180}
181
182static void __scc_pciex_outw(struct pci_controller *phb,
183 u16 val, unsigned long port)
184{
185 if ((port & 0x3ul) < 3)
186 scc_pciex_write_port(phb, port, 2, (u32)val);
187 else {
188 u32 d1 = val & 0x000000FF;
189 u32 d2 = (val & 0x0000FF00) >> 8;
190 scc_pciex_write_port(phb, port, 1, d1);
191 scc_pciex_write_port(phb, port + 1, 1, d2);
192 }
193}
194
195static void __scc_pciex_outl(struct pci_controller *phb,
196 u32 val, unsigned long port)
197{
198 unsigned int mod = port & 0x3ul;
199 if (mod == 0)
200 scc_pciex_write_port(phb, port, 4, val);
201 else {
202 u32 d1 = val & (0xFFFFFFFFul >> (mod * 8));
203 u32 d2 = val >> ((4 - mod) * 8);
204 scc_pciex_write_port(phb, port, 4 - mod, d1);
205 scc_pciex_write_port(phb, port + 1, mod, d2);
206 }
207}
208
209#define PCIEX_PIO_FUNC(size, name) \
210static u##size scc_pciex_in##name(unsigned long port) \
211{ \
212 struct iowa_bus *bus = iowa_pio_find_bus(port); \
213 u##size data = __scc_pciex_in##name(bus->phb, port); \
214 scc_pciex_io_flush(bus); \
215 return data; \
216} \
217static void scc_pciex_ins##name(unsigned long p, void *b, unsigned long c) \
218{ \
219 struct iowa_bus *bus = iowa_pio_find_bus(p); \
220 __le##size *dst = b; \
221 for (; c != 0; c--, dst++) \
222 *dst = cpu_to_le##size(__scc_pciex_in##name(bus->phb, p)); \
223 scc_pciex_io_flush(bus); \
224} \
225static void scc_pciex_out##name(u##size val, unsigned long port) \
226{ \
227 struct iowa_bus *bus = iowa_pio_find_bus(port); \
228 __scc_pciex_out##name(bus->phb, val, port); \
229} \
230static void scc_pciex_outs##name(unsigned long p, const void *b, \
231 unsigned long c) \
232{ \
233 struct iowa_bus *bus = iowa_pio_find_bus(p); \
234 const __le##size *src = b; \
235 for (; c != 0; c--, src++) \
236 __scc_pciex_out##name(bus->phb, le##size##_to_cpu(*src), p); \
237}
238#define __le8 u8
239#define cpu_to_le8(x) (x)
240#define le8_to_cpu(x) (x)
241PCIEX_PIO_FUNC(8, b)
242PCIEX_PIO_FUNC(16, w)
243PCIEX_PIO_FUNC(32, l)
244
245static struct ppc_pci_io scc_pciex_ops = {
246 .readb = scc_pciex_readb,
247 .readw = scc_pciex_readw,
248 .readl = scc_pciex_readl,
249 .readq = scc_pciex_readq,
250 .readw_be = scc_pciex_readw_be,
251 .readl_be = scc_pciex_readl_be,
252 .readq_be = scc_pciex_readq_be,
253 .readsb = scc_pciex_readsb,
254 .readsw = scc_pciex_readsw,
255 .readsl = scc_pciex_readsl,
256 .memcpy_fromio = scc_pciex_memcpy_fromio,
257 .inb = scc_pciex_inb,
258 .inw = scc_pciex_inw,
259 .inl = scc_pciex_inl,
260 .outb = scc_pciex_outb,
261 .outw = scc_pciex_outw,
262 .outl = scc_pciex_outl,
263 .insb = scc_pciex_insb,
264 .insw = scc_pciex_insw,
265 .insl = scc_pciex_insl,
266 .outsb = scc_pciex_outsb,
267 .outsw = scc_pciex_outsw,
268 .outsl = scc_pciex_outsl,
269};
270
271static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data)
272{
273 dma_addr_t dummy_page_da;
274 void *dummy_page_va;
275
276 dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
277 if (!dummy_page_va) {
278 pr_err("PCIEX:Alloc dummy_page_va failed\n");
279 return -1;
280 }
281
282 dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
283 PAGE_SIZE, DMA_FROM_DEVICE);
284 if (dma_mapping_error(bus->phb->parent, dummy_page_da)) {
285 pr_err("PCIEX:Map dummy page failed.\n");
286 kfree(dummy_page_va);
287 return -1;
288 }
289
290 PEX_OUT(bus->phb->cfg_addr, PEXDMRDADR0, dummy_page_da);
291
292 return 0;
293}
294
295/*
296 * config space access
297 */
298#define MK_PEXDADRS(bus_no, dev_no, func_no, addr) \
299 ((uint32_t)(((addr) & ~0x3UL) | \
300 ((bus_no) << PEXDADRS_BUSNO_SHIFT) | \
301 ((dev_no) << PEXDADRS_DEVNO_SHIFT) | \
302 ((func_no) << PEXDADRS_FUNCNO_SHIFT)))
303
304#define MK_PEXDCMND_BYTE_EN(addr, size) \
305 ((((0x1 << (size))-1) << ((addr) & 0x3)) << PEXDCMND_BYTE_EN_SHIFT)
306#define MK_PEXDCMND(cmd, addr, size) ((cmd) | MK_PEXDCMND_BYTE_EN(addr, size))
307
308static uint32_t config_read_pciex_dev(unsigned int __iomem *base,
309 uint64_t bus_no, uint64_t dev_no, uint64_t func_no,
310 uint64_t off, uint64_t size)
311{
312 uint32_t ret;
313 uint32_t addr, cmd;
314
315 addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
316 cmd = MK_PEXDCMND(PEXDCMND_CONFIG_READ, off, size);
317 PEX_OUT(base, PEXDADRS, addr);
318 PEX_OUT(base, PEXDCMND, cmd);
319 ret = (PEX_IN(base, PEXDRDATA)
320 >> ((off & (4-size)) * 8)) & ((0x1 << (size * 8)) - 1);
321 return ret;
322}
323
324static void config_write_pciex_dev(unsigned int __iomem *base, uint64_t bus_no,
325 uint64_t dev_no, uint64_t func_no, uint64_t off, uint64_t size,
326 uint32_t data)
327{
328 uint32_t addr, cmd;
329
330 addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
331 cmd = MK_PEXDCMND(PEXDCMND_CONFIG_WRITE, off, size);
332 PEX_OUT(base, PEXDADRS, addr);
333 PEX_OUT(base, PEXDCMND, cmd);
334 PEX_OUT(base, PEXDWDATA,
335 (data & ((0x1 << (size * 8)) - 1)) << ((off & (4-size)) * 8));
336}
337
338#define MK_PEXCADRS_BYTE_EN(off, len) \
339 ((((0x1 << (len)) - 1) << ((off) & 0x3)) << PEXCADRS_BYTE_EN_SHIFT)
340#define MK_PEXCADRS(cmd, addr, size) \
341 ((cmd) | MK_PEXCADRS_BYTE_EN(addr, size) | ((addr) & ~0x3))
342static uint32_t config_read_pciex_rc(unsigned int __iomem *base,
343 uint32_t where, uint32_t size)
344{
345 PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_READ, where, size));
346 return (PEX_IN(base, PEXCRDATA)
347 >> ((where & (4 - size)) * 8)) & ((0x1 << (size * 8)) - 1);
348}
349
350static void config_write_pciex_rc(unsigned int __iomem *base, uint32_t where,
351 uint32_t size, uint32_t val)
352{
353 uint32_t data;
354
355 data = (val & ((0x1 << (size * 8)) - 1)) << ((where & (4 - size)) * 8);
356 PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_WRITE, where, size));
357 PEX_OUT(base, PEXCWDATA, data);
358}
359
360/* Interfaces */
361/* Note: Work-around
362 * On SCC PCIEXC, one device is seen on all 32 dev_no.
363 * As SCC PCIEXC can have only one device on the bus, we look only one dev_no.
364 * (dev_no = 1)
365 */
366static int scc_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
367 int where, int size, unsigned int *val)
368{
369 struct pci_controller *phb = pci_bus_to_host(bus);
370
371 if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1) {
372 *val = ~0;
373 return PCIBIOS_DEVICE_NOT_FOUND;
374 }
375
376 if (bus->number == 0 && PCI_SLOT(devfn) == 0)
377 *val = config_read_pciex_rc(phb->cfg_addr, where, size);
378 else
379 *val = config_read_pciex_dev(phb->cfg_addr, bus->number,
380 PCI_SLOT(devfn), PCI_FUNC(devfn), where, size);
381
382 return PCIBIOS_SUCCESSFUL;
383}
384
385static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
386 int where, int size, unsigned int val)
387{
388 struct pci_controller *phb = pci_bus_to_host(bus);
389
390 if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1)
391 return PCIBIOS_DEVICE_NOT_FOUND;
392
393 if (bus->number == 0 && PCI_SLOT(devfn) == 0)
394 config_write_pciex_rc(phb->cfg_addr, where, size, val);
395 else
396 config_write_pciex_dev(phb->cfg_addr, bus->number,
397 PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
398 return PCIBIOS_SUCCESSFUL;
399}
400
401static struct pci_ops scc_pciex_pci_ops = {
402 .read = scc_pciex_read_config,
403 .write = scc_pciex_write_config,
404};
405
406static void pciex_clear_intr_all(unsigned int __iomem *base)
407{
408 PEX_OUT(base, PEXAERRSTS, 0xffffffff);
409 PEX_OUT(base, PEXPRERRSTS, 0xffffffff);
410 PEX_OUT(base, PEXINTSTS, 0xffffffff);
411}
412
413#if 0
414static void pciex_disable_intr_all(unsigned int *base)
415{
416 PEX_OUT(base, PEXINTMASK, 0x0);
417 PEX_OUT(base, PEXAERRMASK, 0x0);
418 PEX_OUT(base, PEXPRERRMASK, 0x0);
419 PEX_OUT(base, PEXVDMASK, 0x0);
420}
421#endif
422
423static void pciex_enable_intr_all(unsigned int __iomem *base)
424{
425 PEX_OUT(base, PEXINTMASK, 0x0000e7f1);
426 PEX_OUT(base, PEXAERRMASK, 0x03ff01ff);
427 PEX_OUT(base, PEXPRERRMASK, 0x0001010f);
428 PEX_OUT(base, PEXVDMASK, 0x00000001);
429}
430
431static void pciex_check_status(unsigned int __iomem *base)
432{
433 uint32_t err = 0;
434 uint32_t intsts, aerr, prerr, rcvcp, lenerr;
435 uint32_t maea, maec;
436
437 intsts = PEX_IN(base, PEXINTSTS);
438 aerr = PEX_IN(base, PEXAERRSTS);
439 prerr = PEX_IN(base, PEXPRERRSTS);
440 rcvcp = PEX_IN(base, PEXRCVCPLIDA);
441 lenerr = PEX_IN(base, PEXLENERRIDA);
442
443 if (intsts || aerr || prerr || rcvcp || lenerr)
444 err = 1;
445
446 pr_info("PCEXC interrupt!!\n");
447 pr_info("PEXINTSTS :0x%08x\n", intsts);
448 pr_info("PEXAERRSTS :0x%08x\n", aerr);
449 pr_info("PEXPRERRSTS :0x%08x\n", prerr);
450 pr_info("PEXRCVCPLIDA :0x%08x\n", rcvcp);
451 pr_info("PEXLENERRIDA :0x%08x\n", lenerr);
452
453 /* print detail of Protection Error */
454 if (intsts & 0x00004000) {
455 uint32_t i, n;
456 for (i = 0; i < 4; i++) {
457 n = 1 << i;
458 if (prerr & n) {
459 maea = PEX_IN(base, PEXMAEA(i));
460 maec = PEX_IN(base, PEXMAEC(i));
461 pr_info("PEXMAEC%d :0x%08x\n", i, maec);
462 pr_info("PEXMAEA%d :0x%08x\n", i, maea);
463 }
464 }
465 }
466
467 if (err)
468 pciex_clear_intr_all(base);
469}
470
471static irqreturn_t pciex_handle_internal_irq(int irq, void *dev_id)
472{
473 struct pci_controller *phb = dev_id;
474
475 pr_debug("PCIEX:pciex_handle_internal_irq(irq=%d)\n", irq);
476
477 BUG_ON(phb->cfg_addr == NULL);
478
479 pciex_check_status(phb->cfg_addr);
480
481 return IRQ_HANDLED;
482}
483
484static __init int celleb_setup_pciex(struct device_node *node,
485 struct pci_controller *phb)
486{
487 struct resource r;
488 int virq;
489
490 /* SMMIO registers; used inside this file */
491 if (of_address_to_resource(node, 0, &r)) {
492 pr_err("PCIEXC:Failed to get config resource.\n");
493 return 1;
494 }
495 phb->cfg_addr = ioremap(r.start, resource_size(&r));
496 if (!phb->cfg_addr) {
497 pr_err("PCIEXC:Failed to remap SMMIO region.\n");
498 return 1;
499 }
500
501 /* Not use cfg_data, cmd and data regs are near address reg */
502 phb->cfg_data = NULL;
503
504 /* set pci_ops */
505 phb->ops = &scc_pciex_pci_ops;
506
507 /* internal interrupt handler */
508 virq = irq_of_parse_and_map(node, 1);
509 if (!virq) {
510 pr_err("PCIEXC:Failed to map irq\n");
511 goto error;
512 }
513 if (request_irq(virq, pciex_handle_internal_irq,
514 0, "pciex", (void *)phb)) {
515 pr_err("PCIEXC:Failed to request irq\n");
516 goto error;
517 }
518
519 /* enable all interrupts */
520 pciex_clear_intr_all(phb->cfg_addr);
521 pciex_enable_intr_all(phb->cfg_addr);
522 /* MSI: TBD */
523
524 return 0;
525
526error:
527 phb->cfg_data = NULL;
528 if (phb->cfg_addr)
529 iounmap(phb->cfg_addr);
530 phb->cfg_addr = NULL;
531 return 1;
532}
533
534struct celleb_phb_spec celleb_pciex_spec __initdata = {
535 .setup = celleb_setup_pciex,
536 .ops = &scc_pciex_ops,
537 .iowa_init = &scc_pciex_iowa_init,
538};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c
deleted file mode 100644
index c8eb57193826..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_sio.c
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * setup serial port in SCC
3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <linux/tty.h>
22#include <linux/serial.h>
23#include <linux/serial_core.h>
24#include <linux/console.h>
25
26#include <asm/io.h>
27#include <asm/prom.h>
28
29/* sio irq0=0xb00010022 irq0=0xb00010023 irq2=0xb00010024
30 mmio=0xfff000-0x1000,0xff2000-0x1000 */
31static int txx9_serial_bitmap __initdata;
32
33static struct {
34 uint32_t offset;
35 uint32_t index;
36} txx9_scc_tab[3] __initdata = {
37 { 0x300, 0 }, /* 0xFFF300 */
38 { 0x400, 0 }, /* 0xFFF400 */
39 { 0x800, 1 } /* 0xFF2800 */
40};
41
42static int __init txx9_serial_init(void)
43{
44 extern int early_serial_txx9_setup(struct uart_port *port);
45 struct device_node *node;
46 int i;
47 struct uart_port req;
48 struct of_phandle_args irq;
49 struct resource res;
50
51 for_each_compatible_node(node, "serial", "toshiba,sio-scc") {
52 for (i = 0; i < ARRAY_SIZE(txx9_scc_tab); i++) {
53 if (!(txx9_serial_bitmap & (1<<i)))
54 continue;
55
56 if (of_irq_parse_one(node, i, &irq))
57 continue;
58 if (of_address_to_resource(node,
59 txx9_scc_tab[i].index, &res))
60 continue;
61
62 memset(&req, 0, sizeof(req));
63 req.line = i;
64 req.iotype = UPIO_MEM;
65 req.mapbase = res.start + txx9_scc_tab[i].offset;
66#ifdef CONFIG_SERIAL_TXX9_CONSOLE
67 req.membase = ioremap(req.mapbase, 0x24);
68#endif
69 req.irq = irq_create_of_mapping(&irq);
70 req.flags |= UPF_IOREMAP | UPF_BUGGY_UART
71 /*HAVE_CTS_LINE*/;
72 req.uartclk = 83300000;
73 early_serial_txx9_setup(&req);
74 }
75 }
76
77 return 0;
78}
79
80static int __init txx9_serial_config(char *ptr)
81{
82 int i;
83
84 for (;;) {
85 switch (get_option(&ptr, &i)) {
86 default:
87 return 0;
88 case 2:
89 txx9_serial_bitmap |= 1 << i;
90 break;
91 case 1:
92 txx9_serial_bitmap |= 1 << i;
93 return 0;
94 }
95 }
96}
97__setup("txx9_serial=", txx9_serial_config);
98
99console_initcall(txx9_serial_init);
diff --git a/arch/powerpc/platforms/cell/celleb_scc_uhc.c b/arch/powerpc/platforms/cell/celleb_scc_uhc.c
deleted file mode 100644
index d63b720bfe3a..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_uhc.c
+++ /dev/null
@@ -1,95 +0,0 @@
1/*
2 * SCC (Super Companion Chip) UHC setup
3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/pci.h>
23
24#include <asm/delay.h>
25#include <asm/io.h>
26#include <asm/machdep.h>
27
28#include "celleb_scc.h"
29
30#define UHC_RESET_WAIT_MAX 10000
31
32static inline int uhc_clkctrl_ready(u32 val)
33{
34 const u32 mask = SCC_UHC_USBCEN | SCC_UHC_USBCEN;
35 return((val & mask) == mask);
36}
37
38/*
39 * UHC(usb host controller) enable function.
40 * affect to both of OHCI and EHCI core module.
41 */
42static void enable_scc_uhc(struct pci_dev *dev)
43{
44 void __iomem *uhc_base;
45 u32 __iomem *uhc_clkctrl;
46 u32 __iomem *uhc_ecmode;
47 u32 val = 0;
48 int i;
49
50 if (!machine_is(celleb_beat) &&
51 !machine_is(celleb_native))
52 return;
53
54 uhc_base = ioremap(pci_resource_start(dev, 0),
55 pci_resource_len(dev, 0));
56 if (!uhc_base) {
57 printk(KERN_ERR "failed to map UHC register base.\n");
58 return;
59 }
60 uhc_clkctrl = uhc_base + SCC_UHC_CKRCTRL;
61 uhc_ecmode = uhc_base + SCC_UHC_ECMODE;
62
63 /* setup for normal mode */
64 val |= SCC_UHC_F48MCKLEN;
65 out_be32(uhc_clkctrl, val);
66 val |= SCC_UHC_PHY_SUSPEND_SEL;
67 out_be32(uhc_clkctrl, val);
68 udelay(10);
69 val |= SCC_UHC_PHYEN;
70 out_be32(uhc_clkctrl, val);
71 udelay(50);
72
73 /* disable reset */
74 val |= SCC_UHC_HCLKEN;
75 out_be32(uhc_clkctrl, val);
76 val |= (SCC_UHC_USBCEN | SCC_UHC_USBEN);
77 out_be32(uhc_clkctrl, val);
78 i = 0;
79 while (!uhc_clkctrl_ready(in_be32(uhc_clkctrl))) {
80 udelay(10);
81 if (i++ > UHC_RESET_WAIT_MAX) {
82 printk(KERN_ERR "Failed to disable UHC reset %x\n",
83 in_be32(uhc_clkctrl));
84 break;
85 }
86 }
87
88 /* Endian Conversion Mode for Master ALL area */
89 out_be32(uhc_ecmode, SCC_UHC_ECMODE_BY_BYTE);
90
91 iounmap(uhc_base);
92}
93
94DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
95 PCI_DEVICE_ID_TOSHIBA_SCC_USB, enable_scc_uhc);
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
deleted file mode 100644
index 90be8ec51686..000000000000
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ /dev/null
@@ -1,243 +0,0 @@
1/*
2 * Celleb setup code
3 *
4 * (C) Copyright 2006-2007 TOSHIBA CORPORATION
5 *
6 * This code is based on arch/powerpc/platforms/cell/setup.c:
7 * Copyright (C) 1995 Linus Torvalds
8 * Adapted from 'alpha' version by Gary Thomas
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * Modified by PPC64 Team, IBM Corp
11 * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 */
27
28#undef DEBUG
29
30#include <linux/cpu.h>
31#include <linux/sched.h>
32#include <linux/kernel.h>
33#include <linux/export.h>
34#include <linux/mm.h>
35#include <linux/stddef.h>
36#include <linux/unistd.h>
37#include <linux/reboot.h>
38#include <linux/init.h>
39#include <linux/delay.h>
40#include <linux/irq.h>
41#include <linux/seq_file.h>
42#include <linux/root_dev.h>
43#include <linux/console.h>
44#include <linux/of_platform.h>
45
46#include <asm/mmu.h>
47#include <asm/processor.h>
48#include <asm/io.h>
49#include <asm/prom.h>
50#include <asm/machdep.h>
51#include <asm/cputable.h>
52#include <asm/irq.h>
53#include <asm/time.h>
54#include <asm/spu_priv1.h>
55#include <asm/firmware.h>
56#include <asm/rtas.h>
57#include <asm/cell-regs.h>
58
59#include "beat_interrupt.h"
60#include "beat_wrapper.h"
61#include "beat.h"
62#include "celleb_pci.h"
63#include "interrupt.h"
64#include "pervasive.h"
65#include "ras.h"
66
67static char celleb_machine_type[128] = "Celleb";
68
69static void celleb_show_cpuinfo(struct seq_file *m)
70{
71 struct device_node *root;
72 const char *model = "";
73
74 root = of_find_node_by_path("/");
75 if (root)
76 model = of_get_property(root, "model", NULL);
77 /* using "CHRP" is to trick anaconda into installing FCx into Celleb */
78 seq_printf(m, "machine\t\t: %s %s\n", celleb_machine_type, model);
79 of_node_put(root);
80}
81
82static int __init celleb_machine_type_hack(char *ptr)
83{
84 strlcpy(celleb_machine_type, ptr, sizeof(celleb_machine_type));
85 return 0;
86}
87
88__setup("celleb_machine_type_hack=", celleb_machine_type_hack);
89
90static void celleb_progress(char *s, unsigned short hex)
91{
92 printk("*** %04x : %s\n", hex, s ? s : "");
93}
94
95static void __init celleb_setup_arch_common(void)
96{
97 /* init to some ~sane value until calibrate_delay() runs */
98 loops_per_jiffy = 50000000;
99
100#ifdef CONFIG_DUMMY_CONSOLE
101 conswitchp = &dummy_con;
102#endif
103}
104
105static const struct of_device_id celleb_bus_ids[] __initconst = {
106 { .type = "scc", },
107 { .type = "ioif", }, /* old style */
108 {},
109};
110
111static int __init celleb_publish_devices(void)
112{
113 /* Publish OF platform devices for southbridge IOs */
114 of_platform_bus_probe(NULL, celleb_bus_ids, NULL);
115
116 return 0;
117}
118machine_device_initcall(celleb_beat, celleb_publish_devices);
119machine_device_initcall(celleb_native, celleb_publish_devices);
120
121
122/*
123 * functions for Celleb-Beat
124 */
125static void __init celleb_setup_arch_beat(void)
126{
127#ifdef CONFIG_SPU_BASE
128 spu_priv1_ops = &spu_priv1_beat_ops;
129 spu_management_ops = &spu_management_of_ops;
130#endif
131
132 celleb_setup_arch_common();
133}
134
135static int __init celleb_probe_beat(void)
136{
137 unsigned long root = of_get_flat_dt_root();
138
139 if (!of_flat_dt_is_compatible(root, "Beat"))
140 return 0;
141
142 powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS
143 | FW_FEATURE_BEAT | FW_FEATURE_LPAR;
144 hpte_init_beat_v3();
145 pm_power_off = beat_power_off;
146
147 return 1;
148}
149
150
151/*
152 * functions for Celleb-native
153 */
154static void __init celleb_init_IRQ_native(void)
155{
156 iic_init_IRQ();
157 spider_init_IRQ();
158}
159
160static void __init celleb_setup_arch_native(void)
161{
162#ifdef CONFIG_SPU_BASE
163 spu_priv1_ops = &spu_priv1_mmio_ops;
164 spu_management_ops = &spu_management_of_ops;
165#endif
166
167 cbe_regs_init();
168
169#ifdef CONFIG_CBE_RAS
170 cbe_ras_init();
171#endif
172
173#ifdef CONFIG_SMP
174 smp_init_cell();
175#endif
176
177 cbe_pervasive_init();
178
179 /* XXX: nvram initialization should be added */
180
181 celleb_setup_arch_common();
182}
183
184static int __init celleb_probe_native(void)
185{
186 unsigned long root = of_get_flat_dt_root();
187
188 if (of_flat_dt_is_compatible(root, "Beat") ||
189 !of_flat_dt_is_compatible(root, "TOSHIBA,Celleb"))
190 return 0;
191
192 powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS;
193 hpte_init_native();
194 pm_power_off = rtas_power_off;
195
196 return 1;
197}
198
199
200/*
201 * machine definitions
202 */
203define_machine(celleb_beat) {
204 .name = "Cell Reference Set (Beat)",
205 .probe = celleb_probe_beat,
206 .setup_arch = celleb_setup_arch_beat,
207 .show_cpuinfo = celleb_show_cpuinfo,
208 .restart = beat_restart,
209 .halt = beat_halt,
210 .get_rtc_time = beat_get_rtc_time,
211 .set_rtc_time = beat_set_rtc_time,
212 .calibrate_decr = generic_calibrate_decr,
213 .progress = celleb_progress,
214 .power_save = beat_power_save,
215 .nvram_size = beat_nvram_get_size,
216 .nvram_read = beat_nvram_read,
217 .nvram_write = beat_nvram_write,
218 .set_dabr = beat_set_xdabr,
219 .init_IRQ = beatic_init_IRQ,
220 .get_irq = beatic_get_irq,
221 .pci_probe_mode = celleb_pci_probe_mode,
222 .pci_setup_phb = celleb_setup_phb,
223#ifdef CONFIG_KEXEC
224 .kexec_cpu_down = beat_kexec_cpu_down,
225#endif
226};
227
228define_machine(celleb_native) {
229 .name = "Cell Reference Set (native)",
230 .probe = celleb_probe_native,
231 .setup_arch = celleb_setup_arch_native,
232 .show_cpuinfo = celleb_show_cpuinfo,
233 .restart = rtas_restart,
234 .halt = rtas_halt,
235 .get_boot_time = rtas_get_boot_time,
236 .get_rtc_time = rtas_get_rtc_time,
237 .set_rtc_time = rtas_set_rtc_time,
238 .calibrate_decr = generic_calibrate_decr,
239 .progress = celleb_progress,
240 .pci_probe_mode = celleb_pci_probe_mode,
241 .pci_setup_phb = celleb_setup_phb,
242 .init_IRQ = celleb_init_IRQ_native,
243};
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 4c11421847be..3af8324c122e 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
163 163
164void iic_setup_cpu(void) 164void iic_setup_cpu(void)
165{ 165{
166 out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff); 166 out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
167} 167}
168 168
169u8 iic_get_target_id(int cpu) 169u8 iic_get_target_id(int cpu)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index c7c8720aa39f..21b502398bf3 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -39,6 +39,7 @@
39#include <asm/firmware.h> 39#include <asm/firmware.h>
40#include <asm/cell-regs.h> 40#include <asm/cell-regs.h>
41 41
42#include "cell.h"
42#include "interrupt.h" 43#include "interrupt.h"
43 44
44/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages 45/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
@@ -197,7 +198,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
197 198
198 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); 199 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
199 200
200 for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift) 201 for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
201 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); 202 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
202 203
203 mb(); 204 mb();
@@ -857,7 +858,7 @@ static int __init cell_iommu_init_disabled(void)
857 cell_dma_direct_offset += base; 858 cell_dma_direct_offset += base;
858 859
859 if (cell_dma_direct_offset != 0) 860 if (cell_dma_direct_offset != 0)
860 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; 861 cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
861 862
862 printk("iommu: disabled, direct DMA offset is 0x%lx\n", 863 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
863 cell_dma_direct_offset); 864 cell_dma_direct_offset);
@@ -1197,8 +1198,8 @@ static int __init cell_iommu_init(void)
1197 if (cell_iommu_init_disabled() == 0) 1198 if (cell_iommu_init_disabled() == 0)
1198 goto bail; 1199 goto bail;
1199 1200
1200 /* Setup various ppc_md. callbacks */ 1201 /* Setup various callbacks */
1201 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; 1202 cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
1202 ppc_md.dma_get_required_mask = cell_dma_get_required_mask; 1203 ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
1203 ppc_md.tce_build = tce_build_cell; 1204 ppc_md.tce_build = tce_build_cell;
1204 ppc_md.tce_free = tce_free_cell; 1205 ppc_md.tce_free = tce_free_cell;
@@ -1234,5 +1235,3 @@ static int __init cell_iommu_init(void)
1234 return 0; 1235 return 0;
1235} 1236}
1236machine_arch_initcall(cell, cell_iommu_init); 1237machine_arch_initcall(cell, cell_iommu_init);
1237machine_arch_initcall(celleb_native, cell_iommu_init);
1238
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index d62aa982d530..36cff28d0293 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -54,6 +54,7 @@
54#include <asm/cell-regs.h> 54#include <asm/cell-regs.h>
55#include <asm/io-workarounds.h> 55#include <asm/io-workarounds.h>
56 56
57#include "cell.h"
57#include "interrupt.h" 58#include "interrupt.h"
58#include "pervasive.h" 59#include "pervasive.h"
59#include "ras.h" 60#include "ras.h"
@@ -126,6 +127,8 @@ static int cell_setup_phb(struct pci_controller *phb)
126 if (rc) 127 if (rc)
127 return rc; 128 return rc;
128 129
130 phb->controller_ops = cell_pci_controller_ops;
131
129 np = phb->dn; 132 np = phb->dn;
130 model = of_get_property(np, "model", NULL); 133 model = of_get_property(np, "model", NULL);
131 if (model == NULL || strcmp(np->name, "pci")) 134 if (model == NULL || strcmp(np->name, "pci"))
@@ -279,3 +282,5 @@ define_machine(cell) {
279 .init_IRQ = cell_init_irq, 282 .init_IRQ = cell_init_irq,
280 .pci_setup_phb = cell_setup_phb, 283 .pci_setup_phb = cell_setup_phb,
281}; 284};
285
286struct pci_controller_ops cell_pci_controller_ops;
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index b64e7ead752f..895560f4be69 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -102,13 +102,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
102 return 1; 102 return 1;
103} 103}
104 104
105static int __init smp_iic_probe(void)
106{
107 iic_request_IPIs();
108
109 return num_possible_cpus();
110}
111
112static void smp_cell_setup_cpu(int cpu) 105static void smp_cell_setup_cpu(int cpu)
113{ 106{
114 if (cpu != boot_cpuid) 107 if (cpu != boot_cpuid)
@@ -139,7 +132,7 @@ static int smp_cell_kick_cpu(int nr)
139 132
140static struct smp_ops_t bpa_iic_smp_ops = { 133static struct smp_ops_t bpa_iic_smp_ops = {
141 .message_pass = iic_message_pass, 134 .message_pass = iic_message_pass,
142 .probe = smp_iic_probe, 135 .probe = iic_request_IPIs,
143 .kick_cpu = smp_cell_kick_cpu, 136 .kick_cpu = smp_cell_kick_cpu,
144 .setup_cpu = smp_cell_setup_cpu, 137 .setup_cpu = smp_cell_setup_cpu,
145 .cpu_bootable = smp_generic_cpu_bootable, 138 .cpu_bootable = smp_generic_cpu_bootable,
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index b0ec78e8ad68..a494028b2cdf 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -39,6 +39,7 @@ static void *spu_syscall_table[] = {
39#define PPC_SYS(func) sys_ni_syscall, 39#define PPC_SYS(func) sys_ni_syscall,
40#define OLDSYS(func) sys_ni_syscall, 40#define OLDSYS(func) sys_ni_syscall,
41#define SYS32ONLY(func) sys_ni_syscall, 41#define SYS32ONLY(func) sys_ni_syscall,
42#define PPC64ONLY(func) sys_ni_syscall,
42#define SYSX(f, f3264, f32) sys_ni_syscall, 43#define SYSX(f, f3264, f32) sys_ni_syscall,
43 44
44#define SYSCALL_SPU(func) sys_##func, 45#define SYSCALL_SPU(func) sys_##func,
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 860a59eb8ea2..15ebc4e8a151 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -253,7 +253,7 @@ static void briq_restart(char *cmd)
253 * But unfortunately, the firmware does not connect /chosen/{stdin,stdout} 253 * But unfortunately, the firmware does not connect /chosen/{stdin,stdout}
254 * the the built-in serial node. Instead, a /failsafe node is created. 254 * the the built-in serial node. Instead, a /failsafe node is created.
255 */ 255 */
256static void chrp_init_early(void) 256static __init void chrp_init_early(void)
257{ 257{
258 struct device_node *node; 258 struct device_node *node;
259 const char *property; 259 const char *property;
diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h
index c6911ddc479f..eecfa182b06e 100644
--- a/arch/powerpc/platforms/maple/maple.h
+++ b/arch/powerpc/platforms/maple/maple.h
@@ -10,3 +10,5 @@ extern void maple_calibrate_decr(void);
10extern void maple_pci_init(void); 10extern void maple_pci_init(void);
11extern void maple_pci_irq_fixup(struct pci_dev *dev); 11extern void maple_pci_irq_fixup(struct pci_dev *dev);
12extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel); 12extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
13
14extern struct pci_controller_ops maple_pci_controller_ops;
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index d3a13067ec42..a923230e575b 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -510,6 +510,7 @@ static int __init maple_add_bridge(struct device_node *dev)
510 return -ENOMEM; 510 return -ENOMEM;
511 hose->first_busno = bus_range ? bus_range[0] : 0; 511 hose->first_busno = bus_range ? bus_range[0] : 0;
512 hose->last_busno = bus_range ? bus_range[1] : 0xff; 512 hose->last_busno = bus_range ? bus_range[1] : 0xff;
513 hose->controller_ops = maple_pci_controller_ops;
513 514
514 disp_name = NULL; 515 disp_name = NULL;
515 if (of_device_is_compatible(dev, "u3-agp")) { 516 if (of_device_is_compatible(dev, "u3-agp")) {
@@ -660,3 +661,6 @@ static void quirk_ipr_msi(struct pci_dev *dev)
660} 661}
661DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, 662DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
662 quirk_ipr_msi); 663 quirk_ipr_msi);
664
665struct pci_controller_ops maple_pci_controller_ops = {
666};
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 56b85cd61aaf..a837188544c8 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -203,7 +203,7 @@ static void __init maple_init_early(void)
203{ 203{
204 DBG(" -> maple_init_early\n"); 204 DBG(" -> maple_init_early\n");
205 205
206 iommu_init_early_dart(); 206 iommu_init_early_dart(&maple_pci_controller_ops);
207 207
208 DBG(" <- maple_init_early\n"); 208 DBG(" <- maple_init_early\n");
209} 209}
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 2e576f2ae442..b8f567b2ea19 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -27,6 +27,8 @@
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/firmware.h> 28#include <asm/firmware.h>
29 29
30#include "pasemi.h"
31
30#define IOBMAP_PAGE_SHIFT 12 32#define IOBMAP_PAGE_SHIFT 12
31#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT) 33#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
32#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1) 34#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
@@ -248,8 +250,8 @@ void __init iommu_init_early_pasemi(void)
248 250
249 iob_init(NULL); 251 iob_init(NULL);
250 252
251 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pasemi; 253 pasemi_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pasemi;
252 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pasemi; 254 pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
253 ppc_md.tce_build = iobmap_build; 255 ppc_md.tce_build = iobmap_build;
254 ppc_md.tce_free = iobmap_free; 256 ppc_md.tce_free = iobmap_free;
255 set_pci_dma_ops(&dma_iommu_ops); 257 set_pci_dma_ops(&dma_iommu_ops);
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index ea65bf0eb897..11f230a48227 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -30,5 +30,6 @@ static inline void restore_astate(int cpu)
30} 30}
31#endif 31#endif
32 32
33extern struct pci_controller_ops pasemi_pci_controller_ops;
33 34
34#endif /* _PASEMI_PASEMI_H */ 35#endif /* _PASEMI_PASEMI_H */
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index aa862713258c..f3a68a0fef23 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -31,6 +31,8 @@
31 31
32#include <asm/ppc-pci.h> 32#include <asm/ppc-pci.h>
33 33
34#include "pasemi.h"
35
34#define PA_PXP_CFA(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off)) 36#define PA_PXP_CFA(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
35 37
36static inline int pa_pxp_offset_valid(u8 bus, u8 devfn, int offset) 38static inline int pa_pxp_offset_valid(u8 bus, u8 devfn, int offset)
@@ -199,6 +201,7 @@ static int __init pas_add_bridge(struct device_node *dev)
199 201
200 hose->first_busno = 0; 202 hose->first_busno = 0;
201 hose->last_busno = 0xff; 203 hose->last_busno = 0xff;
204 hose->controller_ops = pasemi_pci_controller_ops;
202 205
203 setup_pa_pxp(hose); 206 setup_pa_pxp(hose);
204 207
@@ -239,3 +242,5 @@ void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
239 242
240 return (void __iomem *)pa_pxp_cfg_addr(hose, dev->bus->number, dev->devfn, offset); 243 return (void __iomem *)pa_pxp_cfg_addr(hose, dev->bus->number, dev->devfn, offset);
241} 244}
245
246struct pci_controller_ops pasemi_pci_controller_ops;
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index 3e91ef538114..76f5013c35e5 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -246,7 +246,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base,
246 DBG(" detected display ! adding properties names !\n"); 246 DBG(" detected display ! adding properties names !\n");
247 bootx_dt_add_string("linux,boot-display", mem_end); 247 bootx_dt_add_string("linux,boot-display", mem_end);
248 bootx_dt_add_string("linux,opened", mem_end); 248 bootx_dt_add_string("linux,opened", mem_end);
249 strncpy(bootx_disp_path, namep, 255); 249 strlcpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
250 } 250 }
251 251
252 /* get and store all property names */ 252 /* get and store all property names */
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index f4071a67ad00..59ab16fa600f 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -27,6 +27,8 @@
27#include <asm/grackle.h> 27#include <asm/grackle.h>
28#include <asm/ppc-pci.h> 28#include <asm/ppc-pci.h>
29 29
30#include "pmac.h"
31
30#undef DEBUG 32#undef DEBUG
31 33
32#ifdef DEBUG 34#ifdef DEBUG
@@ -798,6 +800,7 @@ static int __init pmac_add_bridge(struct device_node *dev)
798 return -ENOMEM; 800 return -ENOMEM;
799 hose->first_busno = bus_range ? bus_range[0] : 0; 801 hose->first_busno = bus_range ? bus_range[0] : 0;
800 hose->last_busno = bus_range ? bus_range[1] : 0xff; 802 hose->last_busno = bus_range ? bus_range[1] : 0xff;
803 hose->controller_ops = pmac_pci_controller_ops;
801 804
802 disp_name = NULL; 805 disp_name = NULL;
803 806
@@ -942,7 +945,7 @@ void __init pmac_pci_init(void)
942} 945}
943 946
944#ifdef CONFIG_PPC32 947#ifdef CONFIG_PPC32
945int pmac_pci_enable_device_hook(struct pci_dev *dev) 948static bool pmac_pci_enable_device_hook(struct pci_dev *dev)
946{ 949{
947 struct device_node* node; 950 struct device_node* node;
948 int updatecfg = 0; 951 int updatecfg = 0;
@@ -958,11 +961,11 @@ int pmac_pci_enable_device_hook(struct pci_dev *dev)
958 && !node) { 961 && !node) {
959 printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n", 962 printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
960 pci_name(dev)); 963 pci_name(dev));
961 return -EINVAL; 964 return false;
962 } 965 }
963 966
964 if (!node) 967 if (!node)
965 return 0; 968 return true;
966 969
967 uninorth_child = node->parent && 970 uninorth_child = node->parent &&
968 of_device_is_compatible(node->parent, "uni-north"); 971 of_device_is_compatible(node->parent, "uni-north");
@@ -1003,7 +1006,7 @@ int pmac_pci_enable_device_hook(struct pci_dev *dev)
1003 L1_CACHE_BYTES >> 2); 1006 L1_CACHE_BYTES >> 2);
1004 } 1007 }
1005 1008
1006 return 0; 1009 return true;
1007} 1010}
1008 1011
1009void pmac_pci_fixup_ohci(struct pci_dev *dev) 1012void pmac_pci_fixup_ohci(struct pci_dev *dev)
@@ -1223,3 +1226,30 @@ static void fixup_u4_pcie(struct pci_dev* dev)
1223 pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0); 1226 pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0);
1224} 1227}
1225DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_U4_PCIE, fixup_u4_pcie); 1228DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_U4_PCIE, fixup_u4_pcie);
1229
1230#ifdef CONFIG_PPC64
1231static int pmac_pci_probe_mode(struct pci_bus *bus)
1232{
1233 struct device_node *node = pci_bus_to_OF_node(bus);
1234
1235 /* We need to use normal PCI probing for the AGP bus,
1236 * since the device for the AGP bridge isn't in the tree.
1237 * Same for the PCIe host on U4 and the HT host bridge.
1238 */
1239 if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
1240 of_device_is_compatible(node, "u4-pcie") ||
1241 of_device_is_compatible(node, "u3-ht")))
1242 return PCI_PROBE_NORMAL;
1243 return PCI_PROBE_DEVTREE;
1244}
1245#endif /* CONFIG_PPC64 */
1246
1247struct pci_controller_ops pmac_pci_controller_ops = {
1248#ifdef CONFIG_PPC64
1249 .probe_mode = pmac_pci_probe_mode,
1250#endif
1251#ifdef CONFIG_PPC32
1252 .enable_device_hook = pmac_pci_enable_device_hook,
1253#endif
1254};
1255
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 4c24bf60d39d..59cfc9d63c2d 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -321,6 +321,9 @@ static void __init pmac_pic_probe_oldstyle(void)
321 max_irqs = max_real_irqs = 64; 321 max_irqs = max_real_irqs = 64;
322 322
323 /* We might have a second cascaded heathrow */ 323 /* We might have a second cascaded heathrow */
324
325 /* Compensate for of_node_put() in of_find_node_by_name() */
326 of_node_get(master);
324 slave = of_find_node_by_name(master, "mac-io"); 327 slave = of_find_node_by_name(master, "mac-io");
325 328
326 /* Check ordering of master & slave */ 329 /* Check ordering of master & slave */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 8327cce2bdb0..e7f8163d6769 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -25,7 +25,6 @@ extern void pmac_pci_init(void);
25extern void pmac_nvram_update(void); 25extern void pmac_nvram_update(void);
26extern unsigned char pmac_nvram_read_byte(int addr); 26extern unsigned char pmac_nvram_read_byte(int addr);
27extern void pmac_nvram_write_byte(int addr, unsigned char val); 27extern void pmac_nvram_write_byte(int addr, unsigned char val);
28extern int pmac_pci_enable_device_hook(struct pci_dev *dev);
29extern void pmac_pcibios_after_init(void); 28extern void pmac_pcibios_after_init(void);
30extern int of_show_percpuinfo(struct seq_file *m, int i); 29extern int of_show_percpuinfo(struct seq_file *m, int i);
31 30
@@ -39,4 +38,6 @@ extern void low_cpu_die(void) __attribute__((noreturn));
39extern int pmac_nvram_init(void); 38extern int pmac_nvram_init(void);
40extern void pmac_pic_init(void); 39extern void pmac_pic_init(void);
41 40
41extern struct pci_controller_ops pmac_pci_controller_ops;
42
42#endif /* __PMAC_H__ */ 43#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 713d36d45d1d..8dd78f4e1af4 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -473,7 +473,7 @@ static void __init pmac_init_early(void)
473 udbg_adb_init(!!strstr(boot_command_line, "btextdbg")); 473 udbg_adb_init(!!strstr(boot_command_line, "btextdbg"));
474 474
475#ifdef CONFIG_PPC64 475#ifdef CONFIG_PPC64
476 iommu_init_early_dart(); 476 iommu_init_early_dart(&pmac_pci_controller_ops);
477#endif 477#endif
478 478
479 /* SMP Init has to be done early as we need to patch up 479 /* SMP Init has to be done early as we need to patch up
@@ -637,24 +637,6 @@ static int __init pmac_probe(void)
637 return 1; 637 return 1;
638} 638}
639 639
640#ifdef CONFIG_PPC64
641/* Move that to pci.c */
642static int pmac_pci_probe_mode(struct pci_bus *bus)
643{
644 struct device_node *node = pci_bus_to_OF_node(bus);
645
646 /* We need to use normal PCI probing for the AGP bus,
647 * since the device for the AGP bridge isn't in the tree.
648 * Same for the PCIe host on U4 and the HT host bridge.
649 */
650 if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
651 of_device_is_compatible(node, "u4-pcie") ||
652 of_device_is_compatible(node, "u3-ht")))
653 return PCI_PROBE_NORMAL;
654 return PCI_PROBE_DEVTREE;
655}
656#endif /* CONFIG_PPC64 */
657
658define_machine(powermac) { 640define_machine(powermac) {
659 .name = "PowerMac", 641 .name = "PowerMac",
660 .probe = pmac_probe, 642 .probe = pmac_probe,
@@ -674,12 +656,10 @@ define_machine(powermac) {
674 .feature_call = pmac_do_feature_call, 656 .feature_call = pmac_do_feature_call,
675 .progress = udbg_progress, 657 .progress = udbg_progress,
676#ifdef CONFIG_PPC64 658#ifdef CONFIG_PPC64
677 .pci_probe_mode = pmac_pci_probe_mode,
678 .power_save = power4_idle, 659 .power_save = power4_idle,
679 .enable_pmcs = power4_enable_pmcs, 660 .enable_pmcs = power4_enable_pmcs,
680#endif /* CONFIG_PPC64 */ 661#endif /* CONFIG_PPC64 */
681#ifdef CONFIG_PPC32 662#ifdef CONFIG_PPC32
682 .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
683 .pcibios_after_init = pmac_pcibios_after_init, 663 .pcibios_after_init = pmac_pcibios_after_init,
684 .phys_mem_access_prot = pci_phys_mem_access_prot, 664 .phys_mem_access_prot = pci_phys_mem_access_prot,
685#endif 665#endif
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index af094ae03dbb..28a147ca32ba 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -268,14 +268,14 @@ static void __init psurge_quad_init(void)
268 mdelay(33); 268 mdelay(33);
269} 269}
270 270
271static int __init smp_psurge_probe(void) 271static void __init smp_psurge_probe(void)
272{ 272{
273 int i, ncpus; 273 int i, ncpus;
274 struct device_node *dn; 274 struct device_node *dn;
275 275
276 /* We don't do SMP on the PPC601 -- paulus */ 276 /* We don't do SMP on the PPC601 -- paulus */
277 if (PVR_VER(mfspr(SPRN_PVR)) == 1) 277 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
278 return 1; 278 return;
279 279
280 /* 280 /*
281 * The powersurge cpu board can be used in the generation 281 * The powersurge cpu board can be used in the generation
@@ -289,7 +289,7 @@ static int __init smp_psurge_probe(void)
289 */ 289 */
290 dn = of_find_node_by_name(NULL, "hammerhead"); 290 dn = of_find_node_by_name(NULL, "hammerhead");
291 if (dn == NULL) 291 if (dn == NULL)
292 return 1; 292 return;
293 of_node_put(dn); 293 of_node_put(dn);
294 294
295 hhead_base = ioremap(HAMMERHEAD_BASE, 0x800); 295 hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
@@ -310,13 +310,13 @@ static int __init smp_psurge_probe(void)
310 /* not a dual-cpu card */ 310 /* not a dual-cpu card */
311 iounmap(hhead_base); 311 iounmap(hhead_base);
312 psurge_type = PSURGE_NONE; 312 psurge_type = PSURGE_NONE;
313 return 1; 313 return;
314 } 314 }
315 ncpus = 2; 315 ncpus = 2;
316 } 316 }
317 317
318 if (psurge_secondary_ipi_init()) 318 if (psurge_secondary_ipi_init())
319 return 1; 319 return;
320 320
321 psurge_start = ioremap(PSURGE_START, 4); 321 psurge_start = ioremap(PSURGE_START, 4);
322 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); 322 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
@@ -332,8 +332,6 @@ static int __init smp_psurge_probe(void)
332 set_cpu_present(i, true); 332 set_cpu_present(i, true);
333 333
334 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); 334 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
335
336 return ncpus;
337} 335}
338 336
339static int __init smp_psurge_kick_cpu(int nr) 337static int __init smp_psurge_kick_cpu(int nr)
@@ -766,7 +764,7 @@ static void __init smp_core99_setup(int ncpus)
766 powersave_nap = 0; 764 powersave_nap = 0;
767} 765}
768 766
769static int __init smp_core99_probe(void) 767static void __init smp_core99_probe(void)
770{ 768{
771 struct device_node *cpus; 769 struct device_node *cpus;
772 int ncpus = 0; 770 int ncpus = 0;
@@ -781,7 +779,7 @@ static int __init smp_core99_probe(void)
781 779
782 /* Nothing more to do if less than 2 of them */ 780 /* Nothing more to do if less than 2 of them */
783 if (ncpus <= 1) 781 if (ncpus <= 1)
784 return 1; 782 return;
785 783
786 /* We need to perform some early initialisations before we can start 784 /* We need to perform some early initialisations before we can start
787 * setting up SMP as we are running before initcalls 785 * setting up SMP as we are running before initcalls
@@ -797,8 +795,6 @@ static int __init smp_core99_probe(void)
797 795
798 /* Collect l2cr and l3cr values from CPU 0 */ 796 /* Collect l2cr and l3cr values from CPU 0 */
799 core99_init_caches(0); 797 core99_init_caches(0);
800
801 return ncpus;
802} 798}
803 799
804static int smp_core99_kick_cpu(int nr) 800static int smp_core99_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 45a8ed0585cd..4b044d8cb49a 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -19,10 +19,3 @@ config PPC_POWERNV
19 select CPU_FREQ_GOV_CONSERVATIVE 19 select CPU_FREQ_GOV_CONSERVATIVE
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 default y 21 default y
22
23config PPC_POWERNV_RTAS
24 depends on PPC_POWERNV
25 bool "Support for RTAS based PowerNV platforms such as BML"
26 default y
27 select PPC_ICS_RTAS
28 select PPC_RTAS
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 6f3c5d33c3af..33e44f37212f 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -5,7 +5,7 @@ obj-y += opal-msglog.o opal-hmi.o opal-power.o
5 5
6obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o 6obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
7obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o 7obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
8obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o 8obj-$(CONFIG_EEH) += eeh-powernv.o
9obj-$(CONFIG_PPC_SCOM) += opal-xscom.o 9obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
10obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o 10obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
11obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o 11obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
deleted file mode 100644
index 2809c9895288..000000000000
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ /dev/null
@@ -1,1149 +0,0 @@
1/*
2 * The file intends to implement the functions needed by EEH, which is
3 * built on IODA compliant chip. Actually, lots of functions related
4 * to EEH would be built based on the OPAL APIs.
5 *
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/debugfs.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/msi.h>
20#include <linux/notifier.h>
21#include <linux/pci.h>
22#include <linux/string.h>
23
24#include <asm/eeh.h>
25#include <asm/eeh_event.h>
26#include <asm/io.h>
27#include <asm/iommu.h>
28#include <asm/msi_bitmap.h>
29#include <asm/opal.h>
30#include <asm/pci-bridge.h>
31#include <asm/ppc-pci.h>
32#include <asm/tce.h>
33
34#include "powernv.h"
35#include "pci.h"
36
37static int ioda_eeh_nb_init = 0;
38
39static int ioda_eeh_event(struct notifier_block *nb,
40 unsigned long events, void *change)
41{
42 uint64_t changed_evts = (uint64_t)change;
43
44 /*
45 * We simply send special EEH event if EEH has
46 * been enabled, or clear pending events in
47 * case that we enable EEH soon
48 */
49 if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
50 !(events & OPAL_EVENT_PCI_ERROR))
51 return 0;
52
53 if (eeh_enabled())
54 eeh_send_failure_event(NULL);
55 else
56 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
57
58 return 0;
59}
60
61static struct notifier_block ioda_eeh_nb = {
62 .notifier_call = ioda_eeh_event,
63 .next = NULL,
64 .priority = 0
65};
66
67#ifdef CONFIG_DEBUG_FS
68static ssize_t ioda_eeh_ei_write(struct file *filp,
69 const char __user *user_buf,
70 size_t count, loff_t *ppos)
71{
72 struct pci_controller *hose = filp->private_data;
73 struct pnv_phb *phb = hose->private_data;
74 struct eeh_dev *edev;
75 struct eeh_pe *pe;
76 int pe_no, type, func;
77 unsigned long addr, mask;
78 char buf[50];
79 int ret;
80
81 if (!phb->eeh_ops || !phb->eeh_ops->err_inject)
82 return -ENXIO;
83
84 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
85 if (!ret)
86 return -EFAULT;
87
88 /* Retrieve parameters */
89 ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
90 &pe_no, &type, &func, &addr, &mask);
91 if (ret != 5)
92 return -EINVAL;
93
94 /* Retrieve PE */
95 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
96 if (!edev)
97 return -ENOMEM;
98 edev->phb = hose;
99 edev->pe_config_addr = pe_no;
100 pe = eeh_pe_get(edev);
101 kfree(edev);
102 if (!pe)
103 return -ENODEV;
104
105 /* Do error injection */
106 ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
107 return ret < 0 ? ret : count;
108}
109
110static const struct file_operations ioda_eeh_ei_fops = {
111 .open = simple_open,
112 .llseek = no_llseek,
113 .write = ioda_eeh_ei_write,
114};
115
116static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
117{
118 struct pci_controller *hose = data;
119 struct pnv_phb *phb = hose->private_data;
120
121 out_be64(phb->regs + offset, val);
122 return 0;
123}
124
125static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
126{
127 struct pci_controller *hose = data;
128 struct pnv_phb *phb = hose->private_data;
129
130 *val = in_be64(phb->regs + offset);
131 return 0;
132}
133
134static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
135{
136 return ioda_eeh_dbgfs_set(data, 0xD10, val);
137}
138
139static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
140{
141 return ioda_eeh_dbgfs_get(data, 0xD10, val);
142}
143
144static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
145{
146 return ioda_eeh_dbgfs_set(data, 0xD90, val);
147}
148
149static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
150{
151 return ioda_eeh_dbgfs_get(data, 0xD90, val);
152}
153
154static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
155{
156 return ioda_eeh_dbgfs_set(data, 0xE10, val);
157}
158
159static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
160{
161 return ioda_eeh_dbgfs_get(data, 0xE10, val);
162}
163
164DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
165 ioda_eeh_outb_dbgfs_set, "0x%llx\n");
166DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
167 ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
168DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
169 ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
170#endif /* CONFIG_DEBUG_FS */
171
172
173/**
174 * ioda_eeh_post_init - Chip dependent post initialization
175 * @hose: PCI controller
176 *
177 * The function will be called after eeh PEs and devices
178 * have been built. That means the EEH is ready to supply
179 * service with I/O cache.
180 */
181static int ioda_eeh_post_init(struct pci_controller *hose)
182{
183 struct pnv_phb *phb = hose->private_data;
184 int ret;
185
186 /* Register OPAL event notifier */
187 if (!ioda_eeh_nb_init) {
188 ret = opal_notifier_register(&ioda_eeh_nb);
189 if (ret) {
190 pr_err("%s: Can't register OPAL event notifier (%d)\n",
191 __func__, ret);
192 return ret;
193 }
194
195 ioda_eeh_nb_init = 1;
196 }
197
198#ifdef CONFIG_DEBUG_FS
199 if (!phb->has_dbgfs && phb->dbgfs) {
200 phb->has_dbgfs = 1;
201
202 debugfs_create_file("err_injct", 0200,
203 phb->dbgfs, hose,
204 &ioda_eeh_ei_fops);
205
206 debugfs_create_file("err_injct_outbound", 0600,
207 phb->dbgfs, hose,
208 &ioda_eeh_outb_dbgfs_ops);
209 debugfs_create_file("err_injct_inboundA", 0600,
210 phb->dbgfs, hose,
211 &ioda_eeh_inbA_dbgfs_ops);
212 debugfs_create_file("err_injct_inboundB", 0600,
213 phb->dbgfs, hose,
214 &ioda_eeh_inbB_dbgfs_ops);
215 }
216#endif
217
218 /* If EEH is enabled, we're going to rely on that.
219 * Otherwise, we restore to conventional mechanism
220 * to clear frozen PE during PCI config access.
221 */
222 if (eeh_enabled())
223 phb->flags |= PNV_PHB_FLAG_EEH;
224 else
225 phb->flags &= ~PNV_PHB_FLAG_EEH;
226
227 return 0;
228}
229
230/**
231 * ioda_eeh_set_option - Set EEH operation or I/O setting
232 * @pe: EEH PE
233 * @option: options
234 *
235 * Enable or disable EEH option for the indicated PE. The
236 * function also can be used to enable I/O or DMA for the
237 * PE.
238 */
239static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
240{
241 struct pci_controller *hose = pe->phb;
242 struct pnv_phb *phb = hose->private_data;
243 bool freeze_pe = false;
244 int enable, ret = 0;
245 s64 rc;
246
247 /* Check on PE number */
248 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
249 pr_err("%s: PE address %x out of range [0, %x] "
250 "on PHB#%x\n",
251 __func__, pe->addr, phb->ioda.total_pe,
252 hose->global_number);
253 return -EINVAL;
254 }
255
256 switch (option) {
257 case EEH_OPT_DISABLE:
258 return -EPERM;
259 case EEH_OPT_ENABLE:
260 return 0;
261 case EEH_OPT_THAW_MMIO:
262 enable = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
263 break;
264 case EEH_OPT_THAW_DMA:
265 enable = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
266 break;
267 case EEH_OPT_FREEZE_PE:
268 freeze_pe = true;
269 enable = OPAL_EEH_ACTION_SET_FREEZE_ALL;
270 break;
271 default:
272 pr_warn("%s: Invalid option %d\n",
273 __func__, option);
274 return -EINVAL;
275 }
276
277 /* If PHB supports compound PE, to handle it */
278 if (freeze_pe) {
279 if (phb->freeze_pe) {
280 phb->freeze_pe(phb, pe->addr);
281 } else {
282 rc = opal_pci_eeh_freeze_set(phb->opal_id,
283 pe->addr,
284 enable);
285 if (rc != OPAL_SUCCESS) {
286 pr_warn("%s: Failure %lld freezing "
287 "PHB#%x-PE#%x\n",
288 __func__, rc,
289 phb->hose->global_number, pe->addr);
290 ret = -EIO;
291 }
292 }
293 } else {
294 if (phb->unfreeze_pe) {
295 ret = phb->unfreeze_pe(phb, pe->addr, enable);
296 } else {
297 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
298 pe->addr,
299 enable);
300 if (rc != OPAL_SUCCESS) {
301 pr_warn("%s: Failure %lld enable %d "
302 "for PHB#%x-PE#%x\n",
303 __func__, rc, option,
304 phb->hose->global_number, pe->addr);
305 ret = -EIO;
306 }
307 }
308 }
309
310 return ret;
311}
312
313static void ioda_eeh_phb_diag(struct eeh_pe *pe)
314{
315 struct pnv_phb *phb = pe->phb->private_data;
316 long rc;
317
318 rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
319 PNV_PCI_DIAG_BUF_SIZE);
320 if (rc != OPAL_SUCCESS)
321 pr_warn("%s: Failed to get diag-data for PHB#%x (%ld)\n",
322 __func__, pe->phb->global_number, rc);
323}
324
325static int ioda_eeh_get_phb_state(struct eeh_pe *pe)
326{
327 struct pnv_phb *phb = pe->phb->private_data;
328 u8 fstate;
329 __be16 pcierr;
330 s64 rc;
331 int result = 0;
332
333 rc = opal_pci_eeh_freeze_status(phb->opal_id,
334 pe->addr,
335 &fstate,
336 &pcierr,
337 NULL);
338 if (rc != OPAL_SUCCESS) {
339 pr_warn("%s: Failure %lld getting PHB#%x state\n",
340 __func__, rc, phb->hose->global_number);
341 return EEH_STATE_NOT_SUPPORT;
342 }
343
344 /*
345 * Check PHB state. If the PHB is frozen for the
346 * first time, to dump the PHB diag-data.
347 */
348 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
349 result = (EEH_STATE_MMIO_ACTIVE |
350 EEH_STATE_DMA_ACTIVE |
351 EEH_STATE_MMIO_ENABLED |
352 EEH_STATE_DMA_ENABLED);
353 } else if (!(pe->state & EEH_PE_ISOLATED)) {
354 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
355 ioda_eeh_phb_diag(pe);
356
357 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
358 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
359 }
360
361 return result;
362}
363
364static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
365{
366 struct pnv_phb *phb = pe->phb->private_data;
367 u8 fstate;
368 __be16 pcierr;
369 s64 rc;
370 int result;
371
372 /*
373 * We don't clobber hardware frozen state until PE
374 * reset is completed. In order to keep EEH core
375 * moving forward, we have to return operational
376 * state during PE reset.
377 */
378 if (pe->state & EEH_PE_RESET) {
379 result = (EEH_STATE_MMIO_ACTIVE |
380 EEH_STATE_DMA_ACTIVE |
381 EEH_STATE_MMIO_ENABLED |
382 EEH_STATE_DMA_ENABLED);
383 return result;
384 }
385
386 /*
387 * Fetch PE state from hardware. If the PHB
388 * supports compound PE, let it handle that.
389 */
390 if (phb->get_pe_state) {
391 fstate = phb->get_pe_state(phb, pe->addr);
392 } else {
393 rc = opal_pci_eeh_freeze_status(phb->opal_id,
394 pe->addr,
395 &fstate,
396 &pcierr,
397 NULL);
398 if (rc != OPAL_SUCCESS) {
399 pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
400 __func__, rc, phb->hose->global_number, pe->addr);
401 return EEH_STATE_NOT_SUPPORT;
402 }
403 }
404
405 /* Figure out state */
406 switch (fstate) {
407 case OPAL_EEH_STOPPED_NOT_FROZEN:
408 result = (EEH_STATE_MMIO_ACTIVE |
409 EEH_STATE_DMA_ACTIVE |
410 EEH_STATE_MMIO_ENABLED |
411 EEH_STATE_DMA_ENABLED);
412 break;
413 case OPAL_EEH_STOPPED_MMIO_FREEZE:
414 result = (EEH_STATE_DMA_ACTIVE |
415 EEH_STATE_DMA_ENABLED);
416 break;
417 case OPAL_EEH_STOPPED_DMA_FREEZE:
418 result = (EEH_STATE_MMIO_ACTIVE |
419 EEH_STATE_MMIO_ENABLED);
420 break;
421 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
422 result = 0;
423 break;
424 case OPAL_EEH_STOPPED_RESET:
425 result = EEH_STATE_RESET_ACTIVE;
426 break;
427 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
428 result = EEH_STATE_UNAVAILABLE;
429 break;
430 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
431 result = EEH_STATE_NOT_SUPPORT;
432 break;
433 default:
434 result = EEH_STATE_NOT_SUPPORT;
435 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
436 __func__, phb->hose->global_number,
437 pe->addr, fstate);
438 }
439
440 /*
441 * If PHB supports compound PE, to freeze all
442 * slave PEs for consistency.
443 *
444 * If the PE is switching to frozen state for the
445 * first time, to dump the PHB diag-data.
446 */
447 if (!(result & EEH_STATE_NOT_SUPPORT) &&
448 !(result & EEH_STATE_UNAVAILABLE) &&
449 !(result & EEH_STATE_MMIO_ACTIVE) &&
450 !(result & EEH_STATE_DMA_ACTIVE) &&
451 !(pe->state & EEH_PE_ISOLATED)) {
452 if (phb->freeze_pe)
453 phb->freeze_pe(phb, pe->addr);
454
455 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
456 ioda_eeh_phb_diag(pe);
457
458 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
459 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
460 }
461
462 return result;
463}
464
465/**
466 * ioda_eeh_get_state - Retrieve the state of PE
467 * @pe: EEH PE
468 *
469 * The PE's state should be retrieved from the PEEV, PEST
470 * IODA tables. Since the OPAL has exported the function
471 * to do it, it'd better to use that.
472 */
473static int ioda_eeh_get_state(struct eeh_pe *pe)
474{
475 struct pnv_phb *phb = pe->phb->private_data;
476
477 /* Sanity check on PE number. PHB PE should have 0 */
478 if (pe->addr < 0 ||
479 pe->addr >= phb->ioda.total_pe) {
480 pr_warn("%s: PHB#%x-PE#%x out of range [0, %x]\n",
481 __func__, phb->hose->global_number,
482 pe->addr, phb->ioda.total_pe);
483 return EEH_STATE_NOT_SUPPORT;
484 }
485
486 if (pe->type & EEH_PE_PHB)
487 return ioda_eeh_get_phb_state(pe);
488
489 return ioda_eeh_get_pe_state(pe);
490}
491
492static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
493{
494 s64 rc = OPAL_HARDWARE;
495
496 while (1) {
497 rc = opal_pci_poll(phb->opal_id);
498 if (rc <= 0)
499 break;
500
501 if (system_state < SYSTEM_RUNNING)
502 udelay(1000 * rc);
503 else
504 msleep(rc);
505 }
506
507 return rc;
508}
509
510int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
511{
512 struct pnv_phb *phb = hose->private_data;
513 s64 rc = OPAL_HARDWARE;
514
515 pr_debug("%s: Reset PHB#%x, option=%d\n",
516 __func__, hose->global_number, option);
517
518 /* Issue PHB complete reset request */
519 if (option == EEH_RESET_FUNDAMENTAL ||
520 option == EEH_RESET_HOT)
521 rc = opal_pci_reset(phb->opal_id,
522 OPAL_RESET_PHB_COMPLETE,
523 OPAL_ASSERT_RESET);
524 else if (option == EEH_RESET_DEACTIVATE)
525 rc = opal_pci_reset(phb->opal_id,
526 OPAL_RESET_PHB_COMPLETE,
527 OPAL_DEASSERT_RESET);
528 if (rc < 0)
529 goto out;
530
531 /*
532 * Poll state of the PHB until the request is done
533 * successfully. The PHB reset is usually PHB complete
534 * reset followed by hot reset on root bus. So we also
535 * need the PCI bus settlement delay.
536 */
537 rc = ioda_eeh_phb_poll(phb);
538 if (option == EEH_RESET_DEACTIVATE) {
539 if (system_state < SYSTEM_RUNNING)
540 udelay(1000 * EEH_PE_RST_SETTLE_TIME);
541 else
542 msleep(EEH_PE_RST_SETTLE_TIME);
543 }
544out:
545 if (rc != OPAL_SUCCESS)
546 return -EIO;
547
548 return 0;
549}
550
551static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
552{
553 struct pnv_phb *phb = hose->private_data;
554 s64 rc = OPAL_SUCCESS;
555
556 pr_debug("%s: Reset PHB#%x, option=%d\n",
557 __func__, hose->global_number, option);
558
559 /*
560 * During the reset deassert time, we needn't care
561 * the reset scope because the firmware does nothing
562 * for fundamental or hot reset during deassert phase.
563 */
564 if (option == EEH_RESET_FUNDAMENTAL)
565 rc = opal_pci_reset(phb->opal_id,
566 OPAL_RESET_PCI_FUNDAMENTAL,
567 OPAL_ASSERT_RESET);
568 else if (option == EEH_RESET_HOT)
569 rc = opal_pci_reset(phb->opal_id,
570 OPAL_RESET_PCI_HOT,
571 OPAL_ASSERT_RESET);
572 else if (option == EEH_RESET_DEACTIVATE)
573 rc = opal_pci_reset(phb->opal_id,
574 OPAL_RESET_PCI_HOT,
575 OPAL_DEASSERT_RESET);
576 if (rc < 0)
577 goto out;
578
579 /* Poll state of the PHB until the request is done */
580 rc = ioda_eeh_phb_poll(phb);
581 if (option == EEH_RESET_DEACTIVATE)
582 msleep(EEH_PE_RST_SETTLE_TIME);
583out:
584 if (rc != OPAL_SUCCESS)
585 return -EIO;
586
587 return 0;
588}
589
590static int ioda_eeh_bridge_reset(struct pci_dev *dev, int option)
591
592{
593 struct device_node *dn = pci_device_to_OF_node(dev);
594 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
595 int aer = edev ? edev->aer_cap : 0;
596 u32 ctrl;
597
598 pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
599 __func__, pci_domain_nr(dev->bus),
600 dev->bus->number, option);
601
602 switch (option) {
603 case EEH_RESET_FUNDAMENTAL:
604 case EEH_RESET_HOT:
605 /* Don't report linkDown event */
606 if (aer) {
607 eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
608 4, &ctrl);
609 ctrl |= PCI_ERR_UNC_SURPDN;
610 eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
611 4, ctrl);
612 }
613
614 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
615 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
616 eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
617 msleep(EEH_PE_RST_HOLD_TIME);
618
619 break;
620 case EEH_RESET_DEACTIVATE:
621 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
622 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
623 eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
624 msleep(EEH_PE_RST_SETTLE_TIME);
625
626 /* Continue reporting linkDown event */
627 if (aer) {
628 eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
629 4, &ctrl);
630 ctrl &= ~PCI_ERR_UNC_SURPDN;
631 eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
632 4, ctrl);
633 }
634
635 break;
636 }
637
638 return 0;
639}
640
641void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
642{
643 struct pci_controller *hose;
644
645 if (pci_is_root_bus(dev->bus)) {
646 hose = pci_bus_to_host(dev->bus);
647 ioda_eeh_root_reset(hose, EEH_RESET_HOT);
648 ioda_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
649 } else {
650 ioda_eeh_bridge_reset(dev, EEH_RESET_HOT);
651 ioda_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
652 }
653}
654
655/**
656 * ioda_eeh_reset - Reset the indicated PE
657 * @pe: EEH PE
658 * @option: reset option
659 *
660 * Do reset on the indicated PE. For PCI bus sensitive PE,
661 * we need to reset the parent p2p bridge. The PHB has to
662 * be reinitialized if the p2p bridge is root bridge. For
663 * PCI device sensitive PE, we will try to reset the device
664 * through FLR. For now, we don't have OPAL APIs to do HARD
665 * reset yet, so all reset would be SOFT (HOT) reset.
666 */
667static int ioda_eeh_reset(struct eeh_pe *pe, int option)
668{
669 struct pci_controller *hose = pe->phb;
670 struct pci_bus *bus;
671 int ret;
672
673 /*
674 * For PHB reset, we always have complete reset. For those PEs whose
675 * primary bus derived from root complex (root bus) or root port
676 * (usually bus#1), we apply hot or fundamental reset on the root port.
677 * For other PEs, we always have hot reset on the PE primary bus.
678 *
679 * Here, we have different design to pHyp, which always clear the
680 * frozen state during PE reset. However, the good idea here from
681 * benh is to keep frozen state before we get PE reset done completely
682 * (until BAR restore). With the frozen state, HW drops illegal IO
683 * or MMIO access, which can incur recrusive frozen PE during PE
684 * reset. The side effect is that EEH core has to clear the frozen
685 * state explicitly after BAR restore.
686 */
687 if (pe->type & EEH_PE_PHB) {
688 ret = ioda_eeh_phb_reset(hose, option);
689 } else {
690 struct pnv_phb *phb;
691 s64 rc;
692
693 /*
694 * The frozen PE might be caused by PAPR error injection
695 * registers, which are expected to be cleared after hitting
696 * frozen PE as stated in the hardware spec. Unfortunately,
697 * that's not true on P7IOC. So we have to clear it manually
698 * to avoid recursive EEH errors during recovery.
699 */
700 phb = hose->private_data;
701 if (phb->model == PNV_PHB_MODEL_P7IOC &&
702 (option == EEH_RESET_HOT ||
703 option == EEH_RESET_FUNDAMENTAL)) {
704 rc = opal_pci_reset(phb->opal_id,
705 OPAL_RESET_PHB_ERROR,
706 OPAL_ASSERT_RESET);
707 if (rc != OPAL_SUCCESS) {
708 pr_warn("%s: Failure %lld clearing "
709 "error injection registers\n",
710 __func__, rc);
711 return -EIO;
712 }
713 }
714
715 bus = eeh_pe_bus_get(pe);
716 if (pci_is_root_bus(bus) ||
717 pci_is_root_bus(bus->parent))
718 ret = ioda_eeh_root_reset(hose, option);
719 else
720 ret = ioda_eeh_bridge_reset(bus->self, option);
721 }
722
723 return ret;
724}
725
726/**
727 * ioda_eeh_get_log - Retrieve error log
728 * @pe: frozen PE
729 * @severity: permanent or temporary error
730 * @drv_log: device driver log
731 * @len: length of device driver log
732 *
733 * Retrieve error log, which contains log from device driver
734 * and firmware.
735 */
736static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
737 char *drv_log, unsigned long len)
738{
739 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
740 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
741
742 return 0;
743}
744
745/**
746 * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
747 * @pe: EEH PE
748 *
749 * For particular PE, it might have included PCI bridges. In order
750 * to make the PE work properly, those PCI bridges should be configured
751 * correctly. However, we need do nothing on P7IOC since the reset
752 * function will do everything that should be covered by the function.
753 */
754static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
755{
756 return 0;
757}
758
759static int ioda_eeh_err_inject(struct eeh_pe *pe, int type, int func,
760 unsigned long addr, unsigned long mask)
761{
762 struct pci_controller *hose = pe->phb;
763 struct pnv_phb *phb = hose->private_data;
764 s64 ret;
765
766 /* Sanity check on error type */
767 if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
768 type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
769 pr_warn("%s: Invalid error type %d\n",
770 __func__, type);
771 return -ERANGE;
772 }
773
774 if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
775 func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
776 pr_warn("%s: Invalid error function %d\n",
777 __func__, func);
778 return -ERANGE;
779 }
780
781 /* Firmware supports error injection ? */
782 if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
783 pr_warn("%s: Firmware doesn't support error injection\n",
784 __func__);
785 return -ENXIO;
786 }
787
788 /* Do error injection */
789 ret = opal_pci_err_inject(phb->opal_id, pe->addr,
790 type, func, addr, mask);
791 if (ret != OPAL_SUCCESS) {
792 pr_warn("%s: Failure %lld injecting error "
793 "%d-%d to PHB#%x-PE#%x\n",
794 __func__, ret, type, func,
795 hose->global_number, pe->addr);
796 return -EIO;
797 }
798
799 return 0;
800}
801
802static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
803{
804 /* GEM */
805 if (data->gemXfir || data->gemRfir ||
806 data->gemRirqfir || data->gemMask || data->gemRwof)
807 pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
808 be64_to_cpu(data->gemXfir),
809 be64_to_cpu(data->gemRfir),
810 be64_to_cpu(data->gemRirqfir),
811 be64_to_cpu(data->gemMask),
812 be64_to_cpu(data->gemRwof));
813
814 /* LEM */
815 if (data->lemFir || data->lemErrMask ||
816 data->lemAction0 || data->lemAction1 || data->lemWof)
817 pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
818 be64_to_cpu(data->lemFir),
819 be64_to_cpu(data->lemErrMask),
820 be64_to_cpu(data->lemAction0),
821 be64_to_cpu(data->lemAction1),
822 be64_to_cpu(data->lemWof));
823}
824
825static void ioda_eeh_hub_diag(struct pci_controller *hose)
826{
827 struct pnv_phb *phb = hose->private_data;
828 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
829 long rc;
830
831 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
832 if (rc != OPAL_SUCCESS) {
833 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
834 __func__, phb->hub_id, rc);
835 return;
836 }
837
838 switch (data->type) {
839 case OPAL_P7IOC_DIAG_TYPE_RGC:
840 pr_info("P7IOC diag-data for RGC\n\n");
841 ioda_eeh_hub_diag_common(data);
842 if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
843 pr_info(" RGC: %016llx %016llx\n",
844 be64_to_cpu(data->rgc.rgcStatus),
845 be64_to_cpu(data->rgc.rgcLdcp));
846 break;
847 case OPAL_P7IOC_DIAG_TYPE_BI:
848 pr_info("P7IOC diag-data for BI %s\n\n",
849 data->bi.biDownbound ? "Downbound" : "Upbound");
850 ioda_eeh_hub_diag_common(data);
851 if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
852 data->bi.biLdcp2 || data->bi.biFenceStatus)
853 pr_info(" BI: %016llx %016llx %016llx %016llx\n",
854 be64_to_cpu(data->bi.biLdcp0),
855 be64_to_cpu(data->bi.biLdcp1),
856 be64_to_cpu(data->bi.biLdcp2),
857 be64_to_cpu(data->bi.biFenceStatus));
858 break;
859 case OPAL_P7IOC_DIAG_TYPE_CI:
860 pr_info("P7IOC diag-data for CI Port %d\n\n",
861 data->ci.ciPort);
862 ioda_eeh_hub_diag_common(data);
863 if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
864 pr_info(" CI: %016llx %016llx\n",
865 be64_to_cpu(data->ci.ciPortStatus),
866 be64_to_cpu(data->ci.ciPortLdcp));
867 break;
868 case OPAL_P7IOC_DIAG_TYPE_MISC:
869 pr_info("P7IOC diag-data for MISC\n\n");
870 ioda_eeh_hub_diag_common(data);
871 break;
872 case OPAL_P7IOC_DIAG_TYPE_I2C:
873 pr_info("P7IOC diag-data for I2C\n\n");
874 ioda_eeh_hub_diag_common(data);
875 break;
876 default:
877 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
878 __func__, phb->hub_id, data->type);
879 }
880}
881
882static int ioda_eeh_get_pe(struct pci_controller *hose,
883 u16 pe_no, struct eeh_pe **pe)
884{
885 struct pnv_phb *phb = hose->private_data;
886 struct pnv_ioda_pe *pnv_pe;
887 struct eeh_pe *dev_pe;
888 struct eeh_dev edev;
889
890 /*
891 * If PHB supports compound PE, to fetch
892 * the master PE because slave PE is invisible
893 * to EEH core.
894 */
895 pnv_pe = &phb->ioda.pe_array[pe_no];
896 if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
897 pnv_pe = pnv_pe->master;
898 WARN_ON(!pnv_pe ||
899 !(pnv_pe->flags & PNV_IODA_PE_MASTER));
900 pe_no = pnv_pe->pe_number;
901 }
902
903 /* Find the PE according to PE# */
904 memset(&edev, 0, sizeof(struct eeh_dev));
905 edev.phb = hose;
906 edev.pe_config_addr = pe_no;
907 dev_pe = eeh_pe_get(&edev);
908 if (!dev_pe)
909 return -EEXIST;
910
911 /* Freeze the (compound) PE */
912 *pe = dev_pe;
913 if (!(dev_pe->state & EEH_PE_ISOLATED))
914 phb->freeze_pe(phb, pe_no);
915
916 /*
917 * At this point, we're sure the (compound) PE should
918 * have been frozen. However, we still need poke until
919 * hitting the frozen PE on top level.
920 */
921 dev_pe = dev_pe->parent;
922 while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
923 int ret;
924 int active_flags = (EEH_STATE_MMIO_ACTIVE |
925 EEH_STATE_DMA_ACTIVE);
926
927 ret = eeh_ops->get_state(dev_pe, NULL);
928 if (ret <= 0 || (ret & active_flags) == active_flags) {
929 dev_pe = dev_pe->parent;
930 continue;
931 }
932
933 /* Frozen parent PE */
934 *pe = dev_pe;
935 if (!(dev_pe->state & EEH_PE_ISOLATED))
936 phb->freeze_pe(phb, dev_pe->addr);
937
938 /* Next one */
939 dev_pe = dev_pe->parent;
940 }
941
942 return 0;
943}
944
945/**
946 * ioda_eeh_next_error - Retrieve next error for EEH core to handle
947 * @pe: The affected PE
948 *
949 * The function is expected to be called by EEH core while it gets
950 * special EEH event (without binding PE). The function calls to
951 * OPAL APIs for next error to handle. The informational error is
952 * handled internally by platform. However, the dead IOC, dead PHB,
953 * fenced PHB and frozen PE should be handled by EEH core eventually.
954 */
955static int ioda_eeh_next_error(struct eeh_pe **pe)
956{
957 struct pci_controller *hose;
958 struct pnv_phb *phb;
959 struct eeh_pe *phb_pe, *parent_pe;
960 __be64 frozen_pe_no;
961 __be16 err_type, severity;
962 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
963 long rc;
964 int state, ret = EEH_NEXT_ERR_NONE;
965
966 /*
967 * While running here, it's safe to purge the event queue.
968 * And we should keep the cached OPAL notifier event sychronized
969 * between the kernel and firmware.
970 */
971 eeh_remove_event(NULL, false);
972 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
973
974 list_for_each_entry(hose, &hose_list, list_node) {
975 /*
976 * If the subordinate PCI buses of the PHB has been
977 * removed or is exactly under error recovery, we
978 * needn't take care of it any more.
979 */
980 phb = hose->private_data;
981 phb_pe = eeh_phb_pe_get(hose);
982 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
983 continue;
984
985 rc = opal_pci_next_error(phb->opal_id,
986 &frozen_pe_no, &err_type, &severity);
987
988 /* If OPAL API returns error, we needn't proceed */
989 if (rc != OPAL_SUCCESS) {
990 pr_devel("%s: Invalid return value on "
991 "PHB#%x (0x%lx) from opal_pci_next_error",
992 __func__, hose->global_number, rc);
993 continue;
994 }
995
996 /* If the PHB doesn't have error, stop processing */
997 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
998 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
999 pr_devel("%s: No error found on PHB#%x\n",
1000 __func__, hose->global_number);
1001 continue;
1002 }
1003
1004 /*
1005 * Processing the error. We're expecting the error with
1006 * highest priority reported upon multiple errors on the
1007 * specific PHB.
1008 */
1009 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
1010 __func__, be16_to_cpu(err_type), be16_to_cpu(severity),
1011 be64_to_cpu(frozen_pe_no), hose->global_number);
1012 switch (be16_to_cpu(err_type)) {
1013 case OPAL_EEH_IOC_ERROR:
1014 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
1015 pr_err("EEH: dead IOC detected\n");
1016 ret = EEH_NEXT_ERR_DEAD_IOC;
1017 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1018 pr_info("EEH: IOC informative error "
1019 "detected\n");
1020 ioda_eeh_hub_diag(hose);
1021 ret = EEH_NEXT_ERR_NONE;
1022 }
1023
1024 break;
1025 case OPAL_EEH_PHB_ERROR:
1026 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
1027 *pe = phb_pe;
1028 pr_err("EEH: dead PHB#%x detected, "
1029 "location: %s\n",
1030 hose->global_number,
1031 eeh_pe_loc_get(phb_pe));
1032 ret = EEH_NEXT_ERR_DEAD_PHB;
1033 } else if (be16_to_cpu(severity) ==
1034 OPAL_EEH_SEV_PHB_FENCED) {
1035 *pe = phb_pe;
1036 pr_err("EEH: Fenced PHB#%x detected, "
1037 "location: %s\n",
1038 hose->global_number,
1039 eeh_pe_loc_get(phb_pe));
1040 ret = EEH_NEXT_ERR_FENCED_PHB;
1041 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1042 pr_info("EEH: PHB#%x informative error "
1043 "detected, location: %s\n",
1044 hose->global_number,
1045 eeh_pe_loc_get(phb_pe));
1046 ioda_eeh_phb_diag(phb_pe);
1047 pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
1048 ret = EEH_NEXT_ERR_NONE;
1049 }
1050
1051 break;
1052 case OPAL_EEH_PE_ERROR:
1053 /*
1054 * If we can't find the corresponding PE, we
1055 * just try to unfreeze.
1056 */
1057 if (ioda_eeh_get_pe(hose,
1058 be64_to_cpu(frozen_pe_no), pe)) {
1059 /* Try best to clear it */
1060 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
1061 hose->global_number, frozen_pe_no);
1062 pr_info("EEH: PHB location: %s\n",
1063 eeh_pe_loc_get(phb_pe));
1064 opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no,
1065 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
1066 ret = EEH_NEXT_ERR_NONE;
1067 } else if ((*pe)->state & EEH_PE_ISOLATED ||
1068 eeh_pe_passed(*pe)) {
1069 ret = EEH_NEXT_ERR_NONE;
1070 } else {
1071 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
1072 (*pe)->addr, (*pe)->phb->global_number);
1073 pr_err("EEH: PE location: %s, PHB location: %s\n",
1074 eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe));
1075 ret = EEH_NEXT_ERR_FROZEN_PE;
1076 }
1077
1078 break;
1079 default:
1080 pr_warn("%s: Unexpected error type %d\n",
1081 __func__, be16_to_cpu(err_type));
1082 }
1083
1084 /*
1085 * EEH core will try recover from fenced PHB or
1086 * frozen PE. In the time for frozen PE, EEH core
1087 * enable IO path for that before collecting logs,
1088 * but it ruins the site. So we have to dump the
1089 * log in advance here.
1090 */
1091 if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
1092 ret == EEH_NEXT_ERR_FENCED_PHB) &&
1093 !((*pe)->state & EEH_PE_ISOLATED)) {
1094 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1095 ioda_eeh_phb_diag(*pe);
1096
1097 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
1098 pnv_pci_dump_phb_diag_data((*pe)->phb,
1099 (*pe)->data);
1100 }
1101
1102 /*
1103 * We probably have the frozen parent PE out there and
1104 * we need have to handle frozen parent PE firstly.
1105 */
1106 if (ret == EEH_NEXT_ERR_FROZEN_PE) {
1107 parent_pe = (*pe)->parent;
1108 while (parent_pe) {
1109 /* Hit the ceiling ? */
1110 if (parent_pe->type & EEH_PE_PHB)
1111 break;
1112
1113 /* Frozen parent PE ? */
1114 state = ioda_eeh_get_state(parent_pe);
1115 if (state > 0 &&
1116 (state & active_flags) != active_flags)
1117 *pe = parent_pe;
1118
1119 /* Next parent level */
1120 parent_pe = parent_pe->parent;
1121 }
1122
1123 /* We possibly migrate to another PE */
1124 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1125 }
1126
1127 /*
1128 * If we have no errors on the specific PHB or only
1129 * informative error there, we continue poking it.
1130 * Otherwise, we need actions to be taken by upper
1131 * layer.
1132 */
1133 if (ret > EEH_NEXT_ERR_INF)
1134 break;
1135 }
1136
1137 return ret;
1138}
1139
1140struct pnv_eeh_ops ioda_eeh_ops = {
1141 .post_init = ioda_eeh_post_init,
1142 .set_option = ioda_eeh_set_option,
1143 .get_state = ioda_eeh_get_state,
1144 .reset = ioda_eeh_reset,
1145 .get_log = ioda_eeh_get_log,
1146 .configure_bridge = ioda_eeh_configure_bridge,
1147 .err_inject = ioda_eeh_err_inject,
1148 .next_error = ioda_eeh_next_error
1149};
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index e261869adc86..ce738ab3d5a9 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/atomic.h> 14#include <linux/atomic.h>
15#include <linux/debugfs.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <linux/export.h> 17#include <linux/export.h>
17#include <linux/init.h> 18#include <linux/init.h>
@@ -38,12 +39,14 @@
38#include "powernv.h" 39#include "powernv.h"
39#include "pci.h" 40#include "pci.h"
40 41
42static bool pnv_eeh_nb_init = false;
43
41/** 44/**
42 * powernv_eeh_init - EEH platform dependent initialization 45 * pnv_eeh_init - EEH platform dependent initialization
43 * 46 *
44 * EEH platform dependent initialization on powernv 47 * EEH platform dependent initialization on powernv
45 */ 48 */
46static int powernv_eeh_init(void) 49static int pnv_eeh_init(void)
47{ 50{
48 struct pci_controller *hose; 51 struct pci_controller *hose;
49 struct pnv_phb *phb; 52 struct pnv_phb *phb;
@@ -85,37 +88,280 @@ static int powernv_eeh_init(void)
85 return 0; 88 return 0;
86} 89}
87 90
91static int pnv_eeh_event(struct notifier_block *nb,
92 unsigned long events, void *change)
93{
94 uint64_t changed_evts = (uint64_t)change;
95
96 /*
97 * We simply send special EEH event if EEH has
98 * been enabled, or clear pending events in
99 * case that we enable EEH soon
100 */
101 if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
102 !(events & OPAL_EVENT_PCI_ERROR))
103 return 0;
104
105 if (eeh_enabled())
106 eeh_send_failure_event(NULL);
107 else
108 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
109
110 return 0;
111}
112
113static struct notifier_block pnv_eeh_nb = {
114 .notifier_call = pnv_eeh_event,
115 .next = NULL,
116 .priority = 0
117};
118
119#ifdef CONFIG_DEBUG_FS
120static ssize_t pnv_eeh_ei_write(struct file *filp,
121 const char __user *user_buf,
122 size_t count, loff_t *ppos)
123{
124 struct pci_controller *hose = filp->private_data;
125 struct eeh_dev *edev;
126 struct eeh_pe *pe;
127 int pe_no, type, func;
128 unsigned long addr, mask;
129 char buf[50];
130 int ret;
131
132 if (!eeh_ops || !eeh_ops->err_inject)
133 return -ENXIO;
134
135 /* Copy over argument buffer */
136 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
137 if (!ret)
138 return -EFAULT;
139
140 /* Retrieve parameters */
141 ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
142 &pe_no, &type, &func, &addr, &mask);
143 if (ret != 5)
144 return -EINVAL;
145
146 /* Retrieve PE */
147 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
148 if (!edev)
149 return -ENOMEM;
150 edev->phb = hose;
151 edev->pe_config_addr = pe_no;
152 pe = eeh_pe_get(edev);
153 kfree(edev);
154 if (!pe)
155 return -ENODEV;
156
157 /* Do error injection */
158 ret = eeh_ops->err_inject(pe, type, func, addr, mask);
159 return ret < 0 ? ret : count;
160}
161
162static const struct file_operations pnv_eeh_ei_fops = {
163 .open = simple_open,
164 .llseek = no_llseek,
165 .write = pnv_eeh_ei_write,
166};
167
168static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
169{
170 struct pci_controller *hose = data;
171 struct pnv_phb *phb = hose->private_data;
172
173 out_be64(phb->regs + offset, val);
174 return 0;
175}
176
177static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
178{
179 struct pci_controller *hose = data;
180 struct pnv_phb *phb = hose->private_data;
181
182 *val = in_be64(phb->regs + offset);
183 return 0;
184}
185
186static int pnv_eeh_outb_dbgfs_set(void *data, u64 val)
187{
188 return pnv_eeh_dbgfs_set(data, 0xD10, val);
189}
190
191static int pnv_eeh_outb_dbgfs_get(void *data, u64 *val)
192{
193 return pnv_eeh_dbgfs_get(data, 0xD10, val);
194}
195
196static int pnv_eeh_inbA_dbgfs_set(void *data, u64 val)
197{
198 return pnv_eeh_dbgfs_set(data, 0xD90, val);
199}
200
201static int pnv_eeh_inbA_dbgfs_get(void *data, u64 *val)
202{
203 return pnv_eeh_dbgfs_get(data, 0xD90, val);
204}
205
206static int pnv_eeh_inbB_dbgfs_set(void *data, u64 val)
207{
208 return pnv_eeh_dbgfs_set(data, 0xE10, val);
209}
210
211static int pnv_eeh_inbB_dbgfs_get(void *data, u64 *val)
212{
213 return pnv_eeh_dbgfs_get(data, 0xE10, val);
214}
215
216DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_outb_dbgfs_ops, pnv_eeh_outb_dbgfs_get,
217 pnv_eeh_outb_dbgfs_set, "0x%llx\n");
218DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbA_dbgfs_ops, pnv_eeh_inbA_dbgfs_get,
219 pnv_eeh_inbA_dbgfs_set, "0x%llx\n");
220DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbB_dbgfs_ops, pnv_eeh_inbB_dbgfs_get,
221 pnv_eeh_inbB_dbgfs_set, "0x%llx\n");
222#endif /* CONFIG_DEBUG_FS */
223
88/** 224/**
89 * powernv_eeh_post_init - EEH platform dependent post initialization 225 * pnv_eeh_post_init - EEH platform dependent post initialization
90 * 226 *
91 * EEH platform dependent post initialization on powernv. When 227 * EEH platform dependent post initialization on powernv. When
92 * the function is called, the EEH PEs and devices should have 228 * the function is called, the EEH PEs and devices should have
93 * been built. If the I/O cache staff has been built, EEH is 229 * been built. If the I/O cache staff has been built, EEH is
94 * ready to supply service. 230 * ready to supply service.
95 */ 231 */
96static int powernv_eeh_post_init(void) 232static int pnv_eeh_post_init(void)
97{ 233{
98 struct pci_controller *hose; 234 struct pci_controller *hose;
99 struct pnv_phb *phb; 235 struct pnv_phb *phb;
100 int ret = 0; 236 int ret = 0;
101 237
238 /* Register OPAL event notifier */
239 if (!pnv_eeh_nb_init) {
240 ret = opal_notifier_register(&pnv_eeh_nb);
241 if (ret) {
242 pr_warn("%s: Can't register OPAL event notifier (%d)\n",
243 __func__, ret);
244 return ret;
245 }
246
247 pnv_eeh_nb_init = true;
248 }
249
102 list_for_each_entry(hose, &hose_list, list_node) { 250 list_for_each_entry(hose, &hose_list, list_node) {
103 phb = hose->private_data; 251 phb = hose->private_data;
104 252
105 if (phb->eeh_ops && phb->eeh_ops->post_init) { 253 /*
106 ret = phb->eeh_ops->post_init(hose); 254 * If EEH is enabled, we're going to rely on that.
107 if (ret) 255 * Otherwise, we restore to conventional mechanism
108 break; 256 * to clear frozen PE during PCI config access.
109 } 257 */
258 if (eeh_enabled())
259 phb->flags |= PNV_PHB_FLAG_EEH;
260 else
261 phb->flags &= ~PNV_PHB_FLAG_EEH;
262
263 /* Create debugfs entries */
264#ifdef CONFIG_DEBUG_FS
265 if (phb->has_dbgfs || !phb->dbgfs)
266 continue;
267
268 phb->has_dbgfs = 1;
269 debugfs_create_file("err_injct", 0200,
270 phb->dbgfs, hose,
271 &pnv_eeh_ei_fops);
272
273 debugfs_create_file("err_injct_outbound", 0600,
274 phb->dbgfs, hose,
275 &pnv_eeh_outb_dbgfs_ops);
276 debugfs_create_file("err_injct_inboundA", 0600,
277 phb->dbgfs, hose,
278 &pnv_eeh_inbA_dbgfs_ops);
279 debugfs_create_file("err_injct_inboundB", 0600,
280 phb->dbgfs, hose,
281 &pnv_eeh_inbB_dbgfs_ops);
282#endif /* CONFIG_DEBUG_FS */
110 } 283 }
111 284
285
112 return ret; 286 return ret;
113} 287}
114 288
289static int pnv_eeh_cap_start(struct pci_dn *pdn)
290{
291 u32 status;
292
293 if (!pdn)
294 return 0;
295
296 pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status);
297 if (!(status & PCI_STATUS_CAP_LIST))
298 return 0;
299
300 return PCI_CAPABILITY_LIST;
301}
302
303static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap)
304{
305 int pos = pnv_eeh_cap_start(pdn);
306 int cnt = 48; /* Maximal number of capabilities */
307 u32 id;
308
309 if (!pos)
310 return 0;
311
312 while (cnt--) {
313 pnv_pci_cfg_read(pdn, pos, 1, &pos);
314 if (pos < 0x40)
315 break;
316
317 pos &= ~3;
318 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
319 if (id == 0xff)
320 break;
321
322 /* Found */
323 if (id == cap)
324 return pos;
325
326 /* Next one */
327 pos += PCI_CAP_LIST_NEXT;
328 }
329
330 return 0;
331}
332
333static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
334{
335 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
336 u32 header;
337 int pos = 256, ttl = (4096 - 256) / 8;
338
339 if (!edev || !edev->pcie_cap)
340 return 0;
341 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
342 return 0;
343 else if (!header)
344 return 0;
345
346 while (ttl-- > 0) {
347 if (PCI_EXT_CAP_ID(header) == cap && pos)
348 return pos;
349
350 pos = PCI_EXT_CAP_NEXT(header);
351 if (pos < 256)
352 break;
353
354 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
355 break;
356 }
357
358 return 0;
359}
360
115/** 361/**
116 * powernv_eeh_dev_probe - Do probe on PCI device 362 * pnv_eeh_probe - Do probe on PCI device
117 * @dev: PCI device 363 * @pdn: PCI device node
118 * @flag: unused 364 * @data: unused
119 * 365 *
120 * When EEH module is installed during system boot, all PCI devices 366 * When EEH module is installed during system boot, all PCI devices
121 * are checked one by one to see if it supports EEH. The function 367 * are checked one by one to see if it supports EEH. The function
@@ -129,12 +375,12 @@ static int powernv_eeh_post_init(void)
129 * was possiblly triggered by EEH core, the binding between EEH device 375 * was possiblly triggered by EEH core, the binding between EEH device
130 * and the PCI device isn't built yet. 376 * and the PCI device isn't built yet.
131 */ 377 */
132static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) 378static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
133{ 379{
134 struct pci_controller *hose = pci_bus_to_host(dev->bus); 380 struct pci_controller *hose = pdn->phb;
135 struct pnv_phb *phb = hose->private_data; 381 struct pnv_phb *phb = hose->private_data;
136 struct device_node *dn = pci_device_to_OF_node(dev); 382 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
137 struct eeh_dev *edev = of_node_to_eeh_dev(dn); 383 uint32_t pcie_flags;
138 int ret; 384 int ret;
139 385
140 /* 386 /*
@@ -143,40 +389,42 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
143 * the root bridge. So it's not reasonable to continue 389 * the root bridge. So it's not reasonable to continue
144 * the probing. 390 * the probing.
145 */ 391 */
146 if (!dn || !edev || edev->pe) 392 if (!edev || edev->pe)
147 return 0; 393 return NULL;
148 394
149 /* Skip for PCI-ISA bridge */ 395 /* Skip for PCI-ISA bridge */
150 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA) 396 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
151 return 0; 397 return NULL;
152 398
153 /* Initialize eeh device */ 399 /* Initialize eeh device */
154 edev->class_code = dev->class; 400 edev->class_code = pdn->class_code;
155 edev->mode &= 0xFFFFFF00; 401 edev->mode &= 0xFFFFFF00;
156 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) 402 edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
403 edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
404 edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
405 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
157 edev->mode |= EEH_DEV_BRIDGE; 406 edev->mode |= EEH_DEV_BRIDGE;
158 edev->pcix_cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 407 if (edev->pcie_cap) {
159 if (pci_is_pcie(dev)) { 408 pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
160 edev->pcie_cap = pci_pcie_cap(dev); 409 2, &pcie_flags);
161 410 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
162 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 411 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
163 edev->mode |= EEH_DEV_ROOT_PORT; 412 edev->mode |= EEH_DEV_ROOT_PORT;
164 else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) 413 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
165 edev->mode |= EEH_DEV_DS_PORT; 414 edev->mode |= EEH_DEV_DS_PORT;
166 415 }
167 edev->aer_cap = pci_find_ext_capability(dev,
168 PCI_EXT_CAP_ID_ERR);
169 } 416 }
170 417
171 edev->config_addr = ((dev->bus->number << 8) | dev->devfn); 418 edev->config_addr = (pdn->busno << 8) | (pdn->devfn);
172 edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff); 419 edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr];
173 420
174 /* Create PE */ 421 /* Create PE */
175 ret = eeh_add_to_parent_pe(edev); 422 ret = eeh_add_to_parent_pe(edev);
176 if (ret) { 423 if (ret) {
177 pr_warn("%s: Can't add PCI dev %s to parent PE (%d)\n", 424 pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n",
178 __func__, pci_name(dev), ret); 425 __func__, hose->global_number, pdn->busno,
179 return ret; 426 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
427 return NULL;
180 } 428 }
181 429
182 /* 430 /*
@@ -195,8 +443,10 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
195 * Broadcom Austin 4-ports NICs (14e4:1657) 443 * Broadcom Austin 4-ports NICs (14e4:1657)
196 * Broadcom Shiner 2-ports 10G NICs (14e4:168e) 444 * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
197 */ 445 */
198 if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) || 446 if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
199 (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e)) 447 pdn->device_id == 0x1657) ||
448 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
449 pdn->device_id == 0x168e))
200 edev->pe->state |= EEH_PE_CFG_RESTRICTED; 450 edev->pe->state |= EEH_PE_CFG_RESTRICTED;
201 451
202 /* 452 /*
@@ -206,7 +456,8 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
206 * to PE reset. 456 * to PE reset.
207 */ 457 */
208 if (!edev->pe->bus) 458 if (!edev->pe->bus)
209 edev->pe->bus = dev->bus; 459 edev->pe->bus = pci_find_bus(hose->global_number,
460 pdn->busno);
210 461
211 /* 462 /*
212 * Enable EEH explicitly so that we will do EEH check 463 * Enable EEH explicitly so that we will do EEH check
@@ -217,11 +468,11 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
217 /* Save memory bars */ 468 /* Save memory bars */
218 eeh_save_bars(edev); 469 eeh_save_bars(edev);
219 470
220 return 0; 471 return NULL;
221} 472}
222 473
223/** 474/**
224 * powernv_eeh_set_option - Initialize EEH or MMIO/DMA reenable 475 * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
225 * @pe: EEH PE 476 * @pe: EEH PE
226 * @option: operation to be issued 477 * @option: operation to be issued
227 * 478 *
@@ -229,36 +480,236 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
229 * Currently, following options are support according to PAPR: 480 * Currently, following options are support according to PAPR:
230 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 481 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
231 */ 482 */
232static int powernv_eeh_set_option(struct eeh_pe *pe, int option) 483static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
233{ 484{
234 struct pci_controller *hose = pe->phb; 485 struct pci_controller *hose = pe->phb;
235 struct pnv_phb *phb = hose->private_data; 486 struct pnv_phb *phb = hose->private_data;
236 int ret = -EEXIST; 487 bool freeze_pe = false;
488 int opt, ret = 0;
489 s64 rc;
490
491 /* Sanity check on option */
492 switch (option) {
493 case EEH_OPT_DISABLE:
494 return -EPERM;
495 case EEH_OPT_ENABLE:
496 return 0;
497 case EEH_OPT_THAW_MMIO:
498 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
499 break;
500 case EEH_OPT_THAW_DMA:
501 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
502 break;
503 case EEH_OPT_FREEZE_PE:
504 freeze_pe = true;
505 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL;
506 break;
507 default:
508 pr_warn("%s: Invalid option %d\n", __func__, option);
509 return -EINVAL;
510 }
237 511
238 /* 512 /* If PHB supports compound PE, to handle it */
239 * What we need do is pass it down for hardware 513 if (freeze_pe) {
240 * implementation to handle it. 514 if (phb->freeze_pe) {
241 */ 515 phb->freeze_pe(phb, pe->addr);
242 if (phb->eeh_ops && phb->eeh_ops->set_option) 516 } else {
243 ret = phb->eeh_ops->set_option(pe, option); 517 rc = opal_pci_eeh_freeze_set(phb->opal_id,
518 pe->addr, opt);
519 if (rc != OPAL_SUCCESS) {
520 pr_warn("%s: Failure %lld freezing "
521 "PHB#%x-PE#%x\n",
522 __func__, rc,
523 phb->hose->global_number, pe->addr);
524 ret = -EIO;
525 }
526 }
527 } else {
528 if (phb->unfreeze_pe) {
529 ret = phb->unfreeze_pe(phb, pe->addr, opt);
530 } else {
531 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
532 pe->addr, opt);
533 if (rc != OPAL_SUCCESS) {
534 pr_warn("%s: Failure %lld enable %d "
535 "for PHB#%x-PE#%x\n",
536 __func__, rc, option,
537 phb->hose->global_number, pe->addr);
538 ret = -EIO;
539 }
540 }
541 }
244 542
245 return ret; 543 return ret;
246} 544}
247 545
248/** 546/**
249 * powernv_eeh_get_pe_addr - Retrieve PE address 547 * pnv_eeh_get_pe_addr - Retrieve PE address
250 * @pe: EEH PE 548 * @pe: EEH PE
251 * 549 *
252 * Retrieve the PE address according to the given tranditional 550 * Retrieve the PE address according to the given tranditional
253 * PCI BDF (Bus/Device/Function) address. 551 * PCI BDF (Bus/Device/Function) address.
254 */ 552 */
255static int powernv_eeh_get_pe_addr(struct eeh_pe *pe) 553static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
256{ 554{
257 return pe->addr; 555 return pe->addr;
258} 556}
259 557
558static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
559{
560 struct pnv_phb *phb = pe->phb->private_data;
561 s64 rc;
562
563 rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
564 PNV_PCI_DIAG_BUF_SIZE);
565 if (rc != OPAL_SUCCESS)
566 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n",
567 __func__, rc, pe->phb->global_number);
568}
569
570static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
571{
572 struct pnv_phb *phb = pe->phb->private_data;
573 u8 fstate;
574 __be16 pcierr;
575 s64 rc;
576 int result = 0;
577
578 rc = opal_pci_eeh_freeze_status(phb->opal_id,
579 pe->addr,
580 &fstate,
581 &pcierr,
582 NULL);
583 if (rc != OPAL_SUCCESS) {
584 pr_warn("%s: Failure %lld getting PHB#%x state\n",
585 __func__, rc, phb->hose->global_number);
586 return EEH_STATE_NOT_SUPPORT;
587 }
588
589 /*
590 * Check PHB state. If the PHB is frozen for the
591 * first time, to dump the PHB diag-data.
592 */
593 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
594 result = (EEH_STATE_MMIO_ACTIVE |
595 EEH_STATE_DMA_ACTIVE |
596 EEH_STATE_MMIO_ENABLED |
597 EEH_STATE_DMA_ENABLED);
598 } else if (!(pe->state & EEH_PE_ISOLATED)) {
599 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
600 pnv_eeh_get_phb_diag(pe);
601
602 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
603 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
604 }
605
606 return result;
607}
608
609static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
610{
611 struct pnv_phb *phb = pe->phb->private_data;
612 u8 fstate;
613 __be16 pcierr;
614 s64 rc;
615 int result;
616
617 /*
618 * We don't clobber hardware frozen state until PE
619 * reset is completed. In order to keep EEH core
620 * moving forward, we have to return operational
621 * state during PE reset.
622 */
623 if (pe->state & EEH_PE_RESET) {
624 result = (EEH_STATE_MMIO_ACTIVE |
625 EEH_STATE_DMA_ACTIVE |
626 EEH_STATE_MMIO_ENABLED |
627 EEH_STATE_DMA_ENABLED);
628 return result;
629 }
630
631 /*
632 * Fetch PE state from hardware. If the PHB
633 * supports compound PE, let it handle that.
634 */
635 if (phb->get_pe_state) {
636 fstate = phb->get_pe_state(phb, pe->addr);
637 } else {
638 rc = opal_pci_eeh_freeze_status(phb->opal_id,
639 pe->addr,
640 &fstate,
641 &pcierr,
642 NULL);
643 if (rc != OPAL_SUCCESS) {
644 pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
645 __func__, rc, phb->hose->global_number,
646 pe->addr);
647 return EEH_STATE_NOT_SUPPORT;
648 }
649 }
650
651 /* Figure out state */
652 switch (fstate) {
653 case OPAL_EEH_STOPPED_NOT_FROZEN:
654 result = (EEH_STATE_MMIO_ACTIVE |
655 EEH_STATE_DMA_ACTIVE |
656 EEH_STATE_MMIO_ENABLED |
657 EEH_STATE_DMA_ENABLED);
658 break;
659 case OPAL_EEH_STOPPED_MMIO_FREEZE:
660 result = (EEH_STATE_DMA_ACTIVE |
661 EEH_STATE_DMA_ENABLED);
662 break;
663 case OPAL_EEH_STOPPED_DMA_FREEZE:
664 result = (EEH_STATE_MMIO_ACTIVE |
665 EEH_STATE_MMIO_ENABLED);
666 break;
667 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
668 result = 0;
669 break;
670 case OPAL_EEH_STOPPED_RESET:
671 result = EEH_STATE_RESET_ACTIVE;
672 break;
673 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
674 result = EEH_STATE_UNAVAILABLE;
675 break;
676 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
677 result = EEH_STATE_NOT_SUPPORT;
678 break;
679 default:
680 result = EEH_STATE_NOT_SUPPORT;
681 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
682 __func__, phb->hose->global_number,
683 pe->addr, fstate);
684 }
685
686 /*
687 * If PHB supports compound PE, to freeze all
688 * slave PEs for consistency.
689 *
690 * If the PE is switching to frozen state for the
691 * first time, to dump the PHB diag-data.
692 */
693 if (!(result & EEH_STATE_NOT_SUPPORT) &&
694 !(result & EEH_STATE_UNAVAILABLE) &&
695 !(result & EEH_STATE_MMIO_ACTIVE) &&
696 !(result & EEH_STATE_DMA_ACTIVE) &&
697 !(pe->state & EEH_PE_ISOLATED)) {
698 if (phb->freeze_pe)
699 phb->freeze_pe(phb, pe->addr);
700
701 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
702 pnv_eeh_get_phb_diag(pe);
703
704 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
705 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
706 }
707
708 return result;
709}
710
260/** 711/**
261 * powernv_eeh_get_state - Retrieve PE state 712 * pnv_eeh_get_state - Retrieve PE state
262 * @pe: EEH PE 713 * @pe: EEH PE
263 * @delay: delay while PE state is temporarily unavailable 714 * @delay: delay while PE state is temporarily unavailable
264 * 715 *
@@ -267,64 +718,279 @@ static int powernv_eeh_get_pe_addr(struct eeh_pe *pe)
267 * we prefer passing down to hardware implementation to handle 718 * we prefer passing down to hardware implementation to handle
268 * it. 719 * it.
269 */ 720 */
270static int powernv_eeh_get_state(struct eeh_pe *pe, int *delay) 721static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
722{
723 int ret;
724
725 if (pe->type & EEH_PE_PHB)
726 ret = pnv_eeh_get_phb_state(pe);
727 else
728 ret = pnv_eeh_get_pe_state(pe);
729
730 if (!delay)
731 return ret;
732
733 /*
734 * If the PE state is temporarily unavailable,
735 * to inform the EEH core delay for default
736 * period (1 second)
737 */
738 *delay = 0;
739 if (ret & EEH_STATE_UNAVAILABLE)
740 *delay = 1000;
741
742 return ret;
743}
744
745static s64 pnv_eeh_phb_poll(struct pnv_phb *phb)
746{
747 s64 rc = OPAL_HARDWARE;
748
749 while (1) {
750 rc = opal_pci_poll(phb->opal_id);
751 if (rc <= 0)
752 break;
753
754 if (system_state < SYSTEM_RUNNING)
755 udelay(1000 * rc);
756 else
757 msleep(rc);
758 }
759
760 return rc;
761}
762
763int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
271{ 764{
272 struct pci_controller *hose = pe->phb;
273 struct pnv_phb *phb = hose->private_data; 765 struct pnv_phb *phb = hose->private_data;
274 int ret = EEH_STATE_NOT_SUPPORT; 766 s64 rc = OPAL_HARDWARE;
767
768 pr_debug("%s: Reset PHB#%x, option=%d\n",
769 __func__, hose->global_number, option);
770
771 /* Issue PHB complete reset request */
772 if (option == EEH_RESET_FUNDAMENTAL ||
773 option == EEH_RESET_HOT)
774 rc = opal_pci_reset(phb->opal_id,
775 OPAL_RESET_PHB_COMPLETE,
776 OPAL_ASSERT_RESET);
777 else if (option == EEH_RESET_DEACTIVATE)
778 rc = opal_pci_reset(phb->opal_id,
779 OPAL_RESET_PHB_COMPLETE,
780 OPAL_DEASSERT_RESET);
781 if (rc < 0)
782 goto out;
275 783
276 if (phb->eeh_ops && phb->eeh_ops->get_state) { 784 /*
277 ret = phb->eeh_ops->get_state(pe); 785 * Poll state of the PHB until the request is done
786 * successfully. The PHB reset is usually PHB complete
787 * reset followed by hot reset on root bus. So we also
788 * need the PCI bus settlement delay.
789 */
790 rc = pnv_eeh_phb_poll(phb);
791 if (option == EEH_RESET_DEACTIVATE) {
792 if (system_state < SYSTEM_RUNNING)
793 udelay(1000 * EEH_PE_RST_SETTLE_TIME);
794 else
795 msleep(EEH_PE_RST_SETTLE_TIME);
796 }
797out:
798 if (rc != OPAL_SUCCESS)
799 return -EIO;
278 800
279 /* 801 return 0;
280 * If the PE state is temporarily unavailable, 802}
281 * to inform the EEH core delay for default 803
282 * period (1 second) 804static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
283 */ 805{
284 if (delay) { 806 struct pnv_phb *phb = hose->private_data;
285 *delay = 0; 807 s64 rc = OPAL_HARDWARE;
286 if (ret & EEH_STATE_UNAVAILABLE) 808
287 *delay = 1000; 809 pr_debug("%s: Reset PHB#%x, option=%d\n",
810 __func__, hose->global_number, option);
811
812 /*
813 * During the reset deassert time, we needn't care
814 * the reset scope because the firmware does nothing
815 * for fundamental or hot reset during deassert phase.
816 */
817 if (option == EEH_RESET_FUNDAMENTAL)
818 rc = opal_pci_reset(phb->opal_id,
819 OPAL_RESET_PCI_FUNDAMENTAL,
820 OPAL_ASSERT_RESET);
821 else if (option == EEH_RESET_HOT)
822 rc = opal_pci_reset(phb->opal_id,
823 OPAL_RESET_PCI_HOT,
824 OPAL_ASSERT_RESET);
825 else if (option == EEH_RESET_DEACTIVATE)
826 rc = opal_pci_reset(phb->opal_id,
827 OPAL_RESET_PCI_HOT,
828 OPAL_DEASSERT_RESET);
829 if (rc < 0)
830 goto out;
831
832 /* Poll state of the PHB until the request is done */
833 rc = pnv_eeh_phb_poll(phb);
834 if (option == EEH_RESET_DEACTIVATE)
835 msleep(EEH_PE_RST_SETTLE_TIME);
836out:
837 if (rc != OPAL_SUCCESS)
838 return -EIO;
839
840 return 0;
841}
842
843static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
844{
845 struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
846 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
847 int aer = edev ? edev->aer_cap : 0;
848 u32 ctrl;
849
850 pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
851 __func__, pci_domain_nr(dev->bus),
852 dev->bus->number, option);
853
854 switch (option) {
855 case EEH_RESET_FUNDAMENTAL:
856 case EEH_RESET_HOT:
857 /* Don't report linkDown event */
858 if (aer) {
859 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
860 4, &ctrl);
861 ctrl |= PCI_ERR_UNC_SURPDN;
862 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
863 4, ctrl);
288 } 864 }
865
866 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
867 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
868 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
869
870 msleep(EEH_PE_RST_HOLD_TIME);
871 break;
872 case EEH_RESET_DEACTIVATE:
873 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
874 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
875 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
876
877 msleep(EEH_PE_RST_SETTLE_TIME);
878
879 /* Continue reporting linkDown event */
880 if (aer) {
881 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
882 4, &ctrl);
883 ctrl &= ~PCI_ERR_UNC_SURPDN;
884 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
885 4, ctrl);
886 }
887
888 break;
289 } 889 }
290 890
291 return ret; 891 return 0;
892}
893
894void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
895{
896 struct pci_controller *hose;
897
898 if (pci_is_root_bus(dev->bus)) {
899 hose = pci_bus_to_host(dev->bus);
900 pnv_eeh_root_reset(hose, EEH_RESET_HOT);
901 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
902 } else {
903 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT);
904 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
905 }
292} 906}
293 907
294/** 908/**
295 * powernv_eeh_reset - Reset the specified PE 909 * pnv_eeh_reset - Reset the specified PE
296 * @pe: EEH PE 910 * @pe: EEH PE
297 * @option: reset option 911 * @option: reset option
298 * 912 *
299 * Reset the specified PE 913 * Do reset on the indicated PE. For PCI bus sensitive PE,
914 * we need to reset the parent p2p bridge. The PHB has to
915 * be reinitialized if the p2p bridge is root bridge. For
916 * PCI device sensitive PE, we will try to reset the device
917 * through FLR. For now, we don't have OPAL APIs to do HARD
918 * reset yet, so all reset would be SOFT (HOT) reset.
300 */ 919 */
301static int powernv_eeh_reset(struct eeh_pe *pe, int option) 920static int pnv_eeh_reset(struct eeh_pe *pe, int option)
302{ 921{
303 struct pci_controller *hose = pe->phb; 922 struct pci_controller *hose = pe->phb;
304 struct pnv_phb *phb = hose->private_data; 923 struct pci_bus *bus;
305 int ret = -EEXIST; 924 int ret;
925
926 /*
927 * For PHB reset, we always have complete reset. For those PEs whose
928 * primary bus derived from root complex (root bus) or root port
929 * (usually bus#1), we apply hot or fundamental reset on the root port.
930 * For other PEs, we always have hot reset on the PE primary bus.
931 *
932 * Here, we have different design to pHyp, which always clear the
933 * frozen state during PE reset. However, the good idea here from
934 * benh is to keep frozen state before we get PE reset done completely
935 * (until BAR restore). With the frozen state, HW drops illegal IO
936 * or MMIO access, which can incur recrusive frozen PE during PE
937 * reset. The side effect is that EEH core has to clear the frozen
938 * state explicitly after BAR restore.
939 */
940 if (pe->type & EEH_PE_PHB) {
941 ret = pnv_eeh_phb_reset(hose, option);
942 } else {
943 struct pnv_phb *phb;
944 s64 rc;
306 945
307 if (phb->eeh_ops && phb->eeh_ops->reset) 946 /*
308 ret = phb->eeh_ops->reset(pe, option); 947 * The frozen PE might be caused by PAPR error injection
948 * registers, which are expected to be cleared after hitting
949 * frozen PE as stated in the hardware spec. Unfortunately,
950 * that's not true on P7IOC. So we have to clear it manually
951 * to avoid recursive EEH errors during recovery.
952 */
953 phb = hose->private_data;
954 if (phb->model == PNV_PHB_MODEL_P7IOC &&
955 (option == EEH_RESET_HOT ||
956 option == EEH_RESET_FUNDAMENTAL)) {
957 rc = opal_pci_reset(phb->opal_id,
958 OPAL_RESET_PHB_ERROR,
959 OPAL_ASSERT_RESET);
960 if (rc != OPAL_SUCCESS) {
961 pr_warn("%s: Failure %lld clearing "
962 "error injection registers\n",
963 __func__, rc);
964 return -EIO;
965 }
966 }
967
968 bus = eeh_pe_bus_get(pe);
969 if (pci_is_root_bus(bus) ||
970 pci_is_root_bus(bus->parent))
971 ret = pnv_eeh_root_reset(hose, option);
972 else
973 ret = pnv_eeh_bridge_reset(bus->self, option);
974 }
309 975
310 return ret; 976 return ret;
311} 977}
312 978
313/** 979/**
314 * powernv_eeh_wait_state - Wait for PE state 980 * pnv_eeh_wait_state - Wait for PE state
315 * @pe: EEH PE 981 * @pe: EEH PE
316 * @max_wait: maximal period in microsecond 982 * @max_wait: maximal period in microsecond
317 * 983 *
318 * Wait for the state of associated PE. It might take some time 984 * Wait for the state of associated PE. It might take some time
319 * to retrieve the PE's state. 985 * to retrieve the PE's state.
320 */ 986 */
321static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait) 987static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
322{ 988{
323 int ret; 989 int ret;
324 int mwait; 990 int mwait;
325 991
326 while (1) { 992 while (1) {
327 ret = powernv_eeh_get_state(pe, &mwait); 993 ret = pnv_eeh_get_state(pe, &mwait);
328 994
329 /* 995 /*
330 * If the PE's state is temporarily unavailable, 996 * If the PE's state is temporarily unavailable,
@@ -348,7 +1014,7 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
348} 1014}
349 1015
350/** 1016/**
351 * powernv_eeh_get_log - Retrieve error log 1017 * pnv_eeh_get_log - Retrieve error log
352 * @pe: EEH PE 1018 * @pe: EEH PE
353 * @severity: temporary or permanent error log 1019 * @severity: temporary or permanent error log
354 * @drv_log: driver log to be combined with retrieved error log 1020 * @drv_log: driver log to be combined with retrieved error log
@@ -356,41 +1022,30 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
356 * 1022 *
357 * Retrieve the temporary or permanent error from the PE. 1023 * Retrieve the temporary or permanent error from the PE.
358 */ 1024 */
359static int powernv_eeh_get_log(struct eeh_pe *pe, int severity, 1025static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
360 char *drv_log, unsigned long len) 1026 char *drv_log, unsigned long len)
361{ 1027{
362 struct pci_controller *hose = pe->phb; 1028 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
363 struct pnv_phb *phb = hose->private_data; 1029 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
364 int ret = -EEXIST;
365 1030
366 if (phb->eeh_ops && phb->eeh_ops->get_log) 1031 return 0;
367 ret = phb->eeh_ops->get_log(pe, severity, drv_log, len);
368
369 return ret;
370} 1032}
371 1033
372/** 1034/**
373 * powernv_eeh_configure_bridge - Configure PCI bridges in the indicated PE 1035 * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
374 * @pe: EEH PE 1036 * @pe: EEH PE
375 * 1037 *
376 * The function will be called to reconfigure the bridges included 1038 * The function will be called to reconfigure the bridges included
377 * in the specified PE so that the mulfunctional PE would be recovered 1039 * in the specified PE so that the mulfunctional PE would be recovered
378 * again. 1040 * again.
379 */ 1041 */
380static int powernv_eeh_configure_bridge(struct eeh_pe *pe) 1042static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
381{ 1043{
382 struct pci_controller *hose = pe->phb; 1044 return 0;
383 struct pnv_phb *phb = hose->private_data;
384 int ret = 0;
385
386 if (phb->eeh_ops && phb->eeh_ops->configure_bridge)
387 ret = phb->eeh_ops->configure_bridge(pe);
388
389 return ret;
390} 1045}
391 1046
392/** 1047/**
393 * powernv_pe_err_inject - Inject specified error to the indicated PE 1048 * pnv_pe_err_inject - Inject specified error to the indicated PE
394 * @pe: the indicated PE 1049 * @pe: the indicated PE
395 * @type: error type 1050 * @type: error type
396 * @func: specific error type 1051 * @func: specific error type
@@ -401,22 +1056,52 @@ static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
401 * determined by @type and @func, to the indicated PE for 1056 * determined by @type and @func, to the indicated PE for
402 * testing purpose. 1057 * testing purpose.
403 */ 1058 */
404static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func, 1059static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
405 unsigned long addr, unsigned long mask) 1060 unsigned long addr, unsigned long mask)
406{ 1061{
407 struct pci_controller *hose = pe->phb; 1062 struct pci_controller *hose = pe->phb;
408 struct pnv_phb *phb = hose->private_data; 1063 struct pnv_phb *phb = hose->private_data;
409 int ret = -EEXIST; 1064 s64 rc;
1065
1066 /* Sanity check on error type */
1067 if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
1068 type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
1069 pr_warn("%s: Invalid error type %d\n",
1070 __func__, type);
1071 return -ERANGE;
1072 }
410 1073
411 if (phb->eeh_ops && phb->eeh_ops->err_inject) 1074 if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
412 ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask); 1075 func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
1076 pr_warn("%s: Invalid error function %d\n",
1077 __func__, func);
1078 return -ERANGE;
1079 }
413 1080
414 return ret; 1081 /* Firmware supports error injection ? */
1082 if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
1083 pr_warn("%s: Firmware doesn't support error injection\n",
1084 __func__);
1085 return -ENXIO;
1086 }
1087
1088 /* Do error injection */
1089 rc = opal_pci_err_inject(phb->opal_id, pe->addr,
1090 type, func, addr, mask);
1091 if (rc != OPAL_SUCCESS) {
1092 pr_warn("%s: Failure %lld injecting error "
1093 "%d-%d to PHB#%x-PE#%x\n",
1094 __func__, rc, type, func,
1095 hose->global_number, pe->addr);
1096 return -EIO;
1097 }
1098
1099 return 0;
415} 1100}
416 1101
417static inline bool powernv_eeh_cfg_blocked(struct device_node *dn) 1102static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn)
418{ 1103{
419 struct eeh_dev *edev = of_node_to_eeh_dev(dn); 1104 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
420 1105
421 if (!edev || !edev->pe) 1106 if (!edev || !edev->pe)
422 return false; 1107 return false;
@@ -427,51 +1112,377 @@ static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
427 return false; 1112 return false;
428} 1113}
429 1114
430static int powernv_eeh_read_config(struct device_node *dn, 1115static int pnv_eeh_read_config(struct pci_dn *pdn,
431 int where, int size, u32 *val) 1116 int where, int size, u32 *val)
432{ 1117{
433 if (powernv_eeh_cfg_blocked(dn)) { 1118 if (!pdn)
1119 return PCIBIOS_DEVICE_NOT_FOUND;
1120
1121 if (pnv_eeh_cfg_blocked(pdn)) {
434 *val = 0xFFFFFFFF; 1122 *val = 0xFFFFFFFF;
435 return PCIBIOS_SET_FAILED; 1123 return PCIBIOS_SET_FAILED;
436 } 1124 }
437 1125
438 return pnv_pci_cfg_read(dn, where, size, val); 1126 return pnv_pci_cfg_read(pdn, where, size, val);
439} 1127}
440 1128
441static int powernv_eeh_write_config(struct device_node *dn, 1129static int pnv_eeh_write_config(struct pci_dn *pdn,
442 int where, int size, u32 val) 1130 int where, int size, u32 val)
443{ 1131{
444 if (powernv_eeh_cfg_blocked(dn)) 1132 if (!pdn)
1133 return PCIBIOS_DEVICE_NOT_FOUND;
1134
1135 if (pnv_eeh_cfg_blocked(pdn))
445 return PCIBIOS_SET_FAILED; 1136 return PCIBIOS_SET_FAILED;
446 1137
447 return pnv_pci_cfg_write(dn, where, size, val); 1138 return pnv_pci_cfg_write(pdn, where, size, val);
1139}
1140
1141static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data)
1142{
1143 /* GEM */
1144 if (data->gemXfir || data->gemRfir ||
1145 data->gemRirqfir || data->gemMask || data->gemRwof)
1146 pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
1147 be64_to_cpu(data->gemXfir),
1148 be64_to_cpu(data->gemRfir),
1149 be64_to_cpu(data->gemRirqfir),
1150 be64_to_cpu(data->gemMask),
1151 be64_to_cpu(data->gemRwof));
1152
1153 /* LEM */
1154 if (data->lemFir || data->lemErrMask ||
1155 data->lemAction0 || data->lemAction1 || data->lemWof)
1156 pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
1157 be64_to_cpu(data->lemFir),
1158 be64_to_cpu(data->lemErrMask),
1159 be64_to_cpu(data->lemAction0),
1160 be64_to_cpu(data->lemAction1),
1161 be64_to_cpu(data->lemWof));
1162}
1163
1164static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
1165{
1166 struct pnv_phb *phb = hose->private_data;
1167 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
1168 long rc;
1169
1170 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
1171 if (rc != OPAL_SUCCESS) {
1172 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
1173 __func__, phb->hub_id, rc);
1174 return;
1175 }
1176
1177 switch (data->type) {
1178 case OPAL_P7IOC_DIAG_TYPE_RGC:
1179 pr_info("P7IOC diag-data for RGC\n\n");
1180 pnv_eeh_dump_hub_diag_common(data);
1181 if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
1182 pr_info(" RGC: %016llx %016llx\n",
1183 be64_to_cpu(data->rgc.rgcStatus),
1184 be64_to_cpu(data->rgc.rgcLdcp));
1185 break;
1186 case OPAL_P7IOC_DIAG_TYPE_BI:
1187 pr_info("P7IOC diag-data for BI %s\n\n",
1188 data->bi.biDownbound ? "Downbound" : "Upbound");
1189 pnv_eeh_dump_hub_diag_common(data);
1190 if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
1191 data->bi.biLdcp2 || data->bi.biFenceStatus)
1192 pr_info(" BI: %016llx %016llx %016llx %016llx\n",
1193 be64_to_cpu(data->bi.biLdcp0),
1194 be64_to_cpu(data->bi.biLdcp1),
1195 be64_to_cpu(data->bi.biLdcp2),
1196 be64_to_cpu(data->bi.biFenceStatus));
1197 break;
1198 case OPAL_P7IOC_DIAG_TYPE_CI:
1199 pr_info("P7IOC diag-data for CI Port %d\n\n",
1200 data->ci.ciPort);
1201 pnv_eeh_dump_hub_diag_common(data);
1202 if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
1203 pr_info(" CI: %016llx %016llx\n",
1204 be64_to_cpu(data->ci.ciPortStatus),
1205 be64_to_cpu(data->ci.ciPortLdcp));
1206 break;
1207 case OPAL_P7IOC_DIAG_TYPE_MISC:
1208 pr_info("P7IOC diag-data for MISC\n\n");
1209 pnv_eeh_dump_hub_diag_common(data);
1210 break;
1211 case OPAL_P7IOC_DIAG_TYPE_I2C:
1212 pr_info("P7IOC diag-data for I2C\n\n");
1213 pnv_eeh_dump_hub_diag_common(data);
1214 break;
1215 default:
1216 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
1217 __func__, phb->hub_id, data->type);
1218 }
1219}
1220
1221static int pnv_eeh_get_pe(struct pci_controller *hose,
1222 u16 pe_no, struct eeh_pe **pe)
1223{
1224 struct pnv_phb *phb = hose->private_data;
1225 struct pnv_ioda_pe *pnv_pe;
1226 struct eeh_pe *dev_pe;
1227 struct eeh_dev edev;
1228
1229 /*
1230 * If PHB supports compound PE, to fetch
1231 * the master PE because slave PE is invisible
1232 * to EEH core.
1233 */
1234 pnv_pe = &phb->ioda.pe_array[pe_no];
1235 if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
1236 pnv_pe = pnv_pe->master;
1237 WARN_ON(!pnv_pe ||
1238 !(pnv_pe->flags & PNV_IODA_PE_MASTER));
1239 pe_no = pnv_pe->pe_number;
1240 }
1241
1242 /* Find the PE according to PE# */
1243 memset(&edev, 0, sizeof(struct eeh_dev));
1244 edev.phb = hose;
1245 edev.pe_config_addr = pe_no;
1246 dev_pe = eeh_pe_get(&edev);
1247 if (!dev_pe)
1248 return -EEXIST;
1249
1250 /* Freeze the (compound) PE */
1251 *pe = dev_pe;
1252 if (!(dev_pe->state & EEH_PE_ISOLATED))
1253 phb->freeze_pe(phb, pe_no);
1254
1255 /*
1256 * At this point, we're sure the (compound) PE should
1257 * have been frozen. However, we still need poke until
1258 * hitting the frozen PE on top level.
1259 */
1260 dev_pe = dev_pe->parent;
1261 while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
1262 int ret;
1263 int active_flags = (EEH_STATE_MMIO_ACTIVE |
1264 EEH_STATE_DMA_ACTIVE);
1265
1266 ret = eeh_ops->get_state(dev_pe, NULL);
1267 if (ret <= 0 || (ret & active_flags) == active_flags) {
1268 dev_pe = dev_pe->parent;
1269 continue;
1270 }
1271
1272 /* Frozen parent PE */
1273 *pe = dev_pe;
1274 if (!(dev_pe->state & EEH_PE_ISOLATED))
1275 phb->freeze_pe(phb, dev_pe->addr);
1276
1277 /* Next one */
1278 dev_pe = dev_pe->parent;
1279 }
1280
1281 return 0;
448} 1282}
449 1283
450/** 1284/**
451 * powernv_eeh_next_error - Retrieve next EEH error to handle 1285 * pnv_eeh_next_error - Retrieve next EEH error to handle
452 * @pe: Affected PE 1286 * @pe: Affected PE
453 * 1287 *
454 * Using OPAL API, to retrieve next EEH error for EEH core to handle 1288 * The function is expected to be called by EEH core while it gets
1289 * special EEH event (without binding PE). The function calls to
1290 * OPAL APIs for next error to handle. The informational error is
1291 * handled internally by platform. However, the dead IOC, dead PHB,
1292 * fenced PHB and frozen PE should be handled by EEH core eventually.
455 */ 1293 */
456static int powernv_eeh_next_error(struct eeh_pe **pe) 1294static int pnv_eeh_next_error(struct eeh_pe **pe)
457{ 1295{
458 struct pci_controller *hose; 1296 struct pci_controller *hose;
459 struct pnv_phb *phb = NULL; 1297 struct pnv_phb *phb;
1298 struct eeh_pe *phb_pe, *parent_pe;
1299 __be64 frozen_pe_no;
1300 __be16 err_type, severity;
1301 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
1302 long rc;
1303 int state, ret = EEH_NEXT_ERR_NONE;
1304
1305 /*
1306 * While running here, it's safe to purge the event queue.
1307 * And we should keep the cached OPAL notifier event sychronized
1308 * between the kernel and firmware.
1309 */
1310 eeh_remove_event(NULL, false);
1311 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
460 1312
461 list_for_each_entry(hose, &hose_list, list_node) { 1313 list_for_each_entry(hose, &hose_list, list_node) {
1314 /*
1315 * If the subordinate PCI buses of the PHB has been
1316 * removed or is exactly under error recovery, we
1317 * needn't take care of it any more.
1318 */
462 phb = hose->private_data; 1319 phb = hose->private_data;
463 break; 1320 phb_pe = eeh_phb_pe_get(hose);
464 } 1321 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
1322 continue;
1323
1324 rc = opal_pci_next_error(phb->opal_id,
1325 &frozen_pe_no, &err_type, &severity);
1326 if (rc != OPAL_SUCCESS) {
1327 pr_devel("%s: Invalid return value on "
1328 "PHB#%x (0x%lx) from opal_pci_next_error",
1329 __func__, hose->global_number, rc);
1330 continue;
1331 }
1332
1333 /* If the PHB doesn't have error, stop processing */
1334 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
1335 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
1336 pr_devel("%s: No error found on PHB#%x\n",
1337 __func__, hose->global_number);
1338 continue;
1339 }
1340
1341 /*
1342 * Processing the error. We're expecting the error with
1343 * highest priority reported upon multiple errors on the
1344 * specific PHB.
1345 */
1346 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
1347 __func__, be16_to_cpu(err_type),
1348 be16_to_cpu(severity), be64_to_cpu(frozen_pe_no),
1349 hose->global_number);
1350 switch (be16_to_cpu(err_type)) {
1351 case OPAL_EEH_IOC_ERROR:
1352 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
1353 pr_err("EEH: dead IOC detected\n");
1354 ret = EEH_NEXT_ERR_DEAD_IOC;
1355 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1356 pr_info("EEH: IOC informative error "
1357 "detected\n");
1358 pnv_eeh_get_and_dump_hub_diag(hose);
1359 ret = EEH_NEXT_ERR_NONE;
1360 }
1361
1362 break;
1363 case OPAL_EEH_PHB_ERROR:
1364 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
1365 *pe = phb_pe;
1366 pr_err("EEH: dead PHB#%x detected, "
1367 "location: %s\n",
1368 hose->global_number,
1369 eeh_pe_loc_get(phb_pe));
1370 ret = EEH_NEXT_ERR_DEAD_PHB;
1371 } else if (be16_to_cpu(severity) ==
1372 OPAL_EEH_SEV_PHB_FENCED) {
1373 *pe = phb_pe;
1374 pr_err("EEH: Fenced PHB#%x detected, "
1375 "location: %s\n",
1376 hose->global_number,
1377 eeh_pe_loc_get(phb_pe));
1378 ret = EEH_NEXT_ERR_FENCED_PHB;
1379 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
1380 pr_info("EEH: PHB#%x informative error "
1381 "detected, location: %s\n",
1382 hose->global_number,
1383 eeh_pe_loc_get(phb_pe));
1384 pnv_eeh_get_phb_diag(phb_pe);
1385 pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
1386 ret = EEH_NEXT_ERR_NONE;
1387 }
1388
1389 break;
1390 case OPAL_EEH_PE_ERROR:
1391 /*
1392 * If we can't find the corresponding PE, we
1393 * just try to unfreeze.
1394 */
1395 if (pnv_eeh_get_pe(hose,
1396 be64_to_cpu(frozen_pe_no), pe)) {
1397 /* Try best to clear it */
1398 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
1399 hose->global_number, frozen_pe_no);
1400 pr_info("EEH: PHB location: %s\n",
1401 eeh_pe_loc_get(phb_pe));
1402 opal_pci_eeh_freeze_clear(phb->opal_id,
1403 frozen_pe_no,
1404 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
1405 ret = EEH_NEXT_ERR_NONE;
1406 } else if ((*pe)->state & EEH_PE_ISOLATED ||
1407 eeh_pe_passed(*pe)) {
1408 ret = EEH_NEXT_ERR_NONE;
1409 } else {
1410 pr_err("EEH: Frozen PE#%x "
1411 "on PHB#%x detected\n",
1412 (*pe)->addr,
1413 (*pe)->phb->global_number);
1414 pr_err("EEH: PE location: %s, "
1415 "PHB location: %s\n",
1416 eeh_pe_loc_get(*pe),
1417 eeh_pe_loc_get(phb_pe));
1418 ret = EEH_NEXT_ERR_FROZEN_PE;
1419 }
1420
1421 break;
1422 default:
1423 pr_warn("%s: Unexpected error type %d\n",
1424 __func__, be16_to_cpu(err_type));
1425 }
465 1426
466 if (phb && phb->eeh_ops->next_error) 1427 /*
467 return phb->eeh_ops->next_error(pe); 1428 * EEH core will try recover from fenced PHB or
1429 * frozen PE. In the time for frozen PE, EEH core
1430 * enable IO path for that before collecting logs,
1431 * but it ruins the site. So we have to dump the
1432 * log in advance here.
1433 */
1434 if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
1435 ret == EEH_NEXT_ERR_FENCED_PHB) &&
1436 !((*pe)->state & EEH_PE_ISOLATED)) {
1437 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1438 pnv_eeh_get_phb_diag(*pe);
1439
1440 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
1441 pnv_pci_dump_phb_diag_data((*pe)->phb,
1442 (*pe)->data);
1443 }
468 1444
469 return -EEXIST; 1445 /*
1446 * We probably have the frozen parent PE out there and
1447 * we need have to handle frozen parent PE firstly.
1448 */
1449 if (ret == EEH_NEXT_ERR_FROZEN_PE) {
1450 parent_pe = (*pe)->parent;
1451 while (parent_pe) {
1452 /* Hit the ceiling ? */
1453 if (parent_pe->type & EEH_PE_PHB)
1454 break;
1455
1456 /* Frozen parent PE ? */
1457 state = eeh_ops->get_state(parent_pe, NULL);
1458 if (state > 0 &&
1459 (state & active_flags) != active_flags)
1460 *pe = parent_pe;
1461
1462 /* Next parent level */
1463 parent_pe = parent_pe->parent;
1464 }
1465
1466 /* We possibly migrate to another PE */
1467 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1468 }
1469
1470 /*
1471 * If we have no errors on the specific PHB or only
1472 * informative error there, we continue poking it.
1473 * Otherwise, we need actions to be taken by upper
1474 * layer.
1475 */
1476 if (ret > EEH_NEXT_ERR_INF)
1477 break;
1478 }
1479
1480 return ret;
470} 1481}
471 1482
472static int powernv_eeh_restore_config(struct device_node *dn) 1483static int pnv_eeh_restore_config(struct pci_dn *pdn)
473{ 1484{
474 struct eeh_dev *edev = of_node_to_eeh_dev(dn); 1485 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
475 struct pnv_phb *phb; 1486 struct pnv_phb *phb;
476 s64 ret; 1487 s64 ret;
477 1488
@@ -490,24 +1501,23 @@ static int powernv_eeh_restore_config(struct device_node *dn)
490 return 0; 1501 return 0;
491} 1502}
492 1503
493static struct eeh_ops powernv_eeh_ops = { 1504static struct eeh_ops pnv_eeh_ops = {
494 .name = "powernv", 1505 .name = "powernv",
495 .init = powernv_eeh_init, 1506 .init = pnv_eeh_init,
496 .post_init = powernv_eeh_post_init, 1507 .post_init = pnv_eeh_post_init,
497 .of_probe = NULL, 1508 .probe = pnv_eeh_probe,
498 .dev_probe = powernv_eeh_dev_probe, 1509 .set_option = pnv_eeh_set_option,
499 .set_option = powernv_eeh_set_option, 1510 .get_pe_addr = pnv_eeh_get_pe_addr,
500 .get_pe_addr = powernv_eeh_get_pe_addr, 1511 .get_state = pnv_eeh_get_state,
501 .get_state = powernv_eeh_get_state, 1512 .reset = pnv_eeh_reset,
502 .reset = powernv_eeh_reset, 1513 .wait_state = pnv_eeh_wait_state,
503 .wait_state = powernv_eeh_wait_state, 1514 .get_log = pnv_eeh_get_log,
504 .get_log = powernv_eeh_get_log, 1515 .configure_bridge = pnv_eeh_configure_bridge,
505 .configure_bridge = powernv_eeh_configure_bridge, 1516 .err_inject = pnv_eeh_err_inject,
506 .err_inject = powernv_eeh_err_inject, 1517 .read_config = pnv_eeh_read_config,
507 .read_config = powernv_eeh_read_config, 1518 .write_config = pnv_eeh_write_config,
508 .write_config = powernv_eeh_write_config, 1519 .next_error = pnv_eeh_next_error,
509 .next_error = powernv_eeh_next_error, 1520 .restore_config = pnv_eeh_restore_config
510 .restore_config = powernv_eeh_restore_config
511}; 1521};
512 1522
513/** 1523/**
@@ -521,7 +1531,7 @@ static int __init eeh_powernv_init(void)
521 int ret = -EINVAL; 1531 int ret = -EINVAL;
522 1532
523 eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE); 1533 eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
524 ret = eeh_ops_register(&powernv_eeh_ops); 1534 ret = eeh_ops_register(&pnv_eeh_ops);
525 if (!ret) 1535 if (!ret)
526 pr_info("EEH: PowerNV platform initialized\n"); 1536 pr_info("EEH: PowerNV platform initialized\n");
527 else 1537 else
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 23260f7dfa7a..5aa9c1ce4de3 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -452,5 +452,6 @@ void __init opal_platform_dump_init(void)
452 return; 452 return;
453 } 453 }
454 454
455 opal_dump_resend_notification(); 455 if (opal_check_token(OPAL_DUMP_RESEND))
456 opal_dump_resend_notification();
456} 457}
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 518fe95dbf24..38ce757e5e2a 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -313,7 +313,8 @@ int __init opal_elog_init(void)
313 } 313 }
314 314
315 /* We are now ready to pull error logs from opal. */ 315 /* We are now ready to pull error logs from opal. */
316 opal_resend_pending_logs(); 316 if (opal_check_token(OPAL_ELOG_RESEND))
317 opal_resend_pending_logs();
317 318
318 return 0; 319 return 0;
319} 320}
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index 5c21d9c07f45..4ec6219287fc 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -120,7 +120,11 @@ static struct image_header_t image_header;
120static struct image_data_t image_data; 120static struct image_data_t image_data;
121static struct validate_flash_t validate_flash_data; 121static struct validate_flash_t validate_flash_data;
122static struct manage_flash_t manage_flash_data; 122static struct manage_flash_t manage_flash_data;
123static struct update_flash_t update_flash_data; 123
124/* Initialize update_flash_data status to No Operation */
125static struct update_flash_t update_flash_data = {
126 .status = FLASH_NO_OP,
127};
124 128
125static DEFINE_MUTEX(image_data_mutex); 129static DEFINE_MUTEX(image_data_mutex);
126 130
@@ -542,7 +546,7 @@ static struct attribute_group image_op_attr_group = {
542 .attrs = image_op_attrs, 546 .attrs = image_op_attrs,
543}; 547};
544 548
545void __init opal_flash_init(void) 549void __init opal_flash_update_init(void)
546{ 550{
547 int ret; 551 int ret;
548 552
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index f9896fd5d04a..9db4398ded5d 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -16,6 +16,7 @@
16#include <linux/of.h> 16#include <linux/of.h>
17 17
18#include <asm/opal.h> 18#include <asm/opal.h>
19#include <asm/nvram.h>
19#include <asm/machdep.h> 20#include <asm/machdep.h>
20 21
21static unsigned int nvram_size; 22static unsigned int nvram_size;
@@ -62,6 +63,15 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
62 return count; 63 return count;
63} 64}
64 65
66static int __init opal_nvram_init_log_partitions(void)
67{
68 /* Scan nvram for partitions */
69 nvram_scan_partitions();
70 nvram_init_oops_partition(0);
71 return 0;
72}
73machine_arch_initcall(powernv, opal_nvram_init_log_partitions);
74
65void __init opal_nvram_init(void) 75void __init opal_nvram_init(void)
66{ 76{
67 struct device_node *np; 77 struct device_node *np;
diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c
index 4ab67ef7abc9..655250499d18 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor.c
@@ -46,18 +46,28 @@ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
46 46
47 mutex_lock(&opal_sensor_mutex); 47 mutex_lock(&opal_sensor_mutex);
48 ret = opal_sensor_read(sensor_hndl, token, &data); 48 ret = opal_sensor_read(sensor_hndl, token, &data);
49 if (ret != OPAL_ASYNC_COMPLETION) 49 switch (ret) {
50 goto out_token; 50 case OPAL_ASYNC_COMPLETION:
51 ret = opal_async_wait_response(token, &msg);
52 if (ret) {
53 pr_err("%s: Failed to wait for the async response, %d\n",
54 __func__, ret);
55 goto out_token;
56 }
51 57
52 ret = opal_async_wait_response(token, &msg); 58 ret = opal_error_code(be64_to_cpu(msg.params[1]));
53 if (ret) { 59 *sensor_data = be32_to_cpu(data);
54 pr_err("%s: Failed to wait for the async response, %d\n", 60 break;
55 __func__, ret); 61
56 goto out_token; 62 case OPAL_SUCCESS:
57 } 63 ret = 0;
64 *sensor_data = be32_to_cpu(data);
65 break;
58 66
59 *sensor_data = be32_to_cpu(data); 67 default:
60 ret = be64_to_cpu(msg.params[1]); 68 ret = opal_error_code(ret);
69 break;
70 }
61 71
62out_token: 72out_token:
63 mutex_unlock(&opal_sensor_mutex); 73 mutex_unlock(&opal_sensor_mutex);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index fcbe899fe299..a7ade94cdf87 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -286,9 +286,12 @@ OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
286OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG); 286OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
287OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); 287OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
288OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); 288OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
289OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE); 289OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CAPI_MODE);
290OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO); 290OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO);
291OPAL_CALL(opal_tpo_read, OPAL_READ_TPO); 291OPAL_CALL(opal_tpo_read, OPAL_READ_TPO);
292OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND); 292OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND);
293OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV); 293OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV);
294OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST); 294OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST);
295OPAL_CALL(opal_flash_read, OPAL_FLASH_READ);
296OPAL_CALL(opal_flash_write, OPAL_FLASH_WRITE);
297OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 18fd4e71c9c1..2241565b0739 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -23,6 +23,8 @@
23#include <linux/kobject.h> 23#include <linux/kobject.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/memblock.h> 25#include <linux/memblock.h>
26#include <linux/kthread.h>
27#include <linux/freezer.h>
26 28
27#include <asm/machdep.h> 29#include <asm/machdep.h>
28#include <asm/opal.h> 30#include <asm/opal.h>
@@ -58,6 +60,7 @@ static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
58static DEFINE_SPINLOCK(opal_notifier_lock); 60static DEFINE_SPINLOCK(opal_notifier_lock);
59static uint64_t last_notified_mask = 0x0ul; 61static uint64_t last_notified_mask = 0x0ul;
60static atomic_t opal_notifier_hold = ATOMIC_INIT(0); 62static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
63static uint32_t opal_heartbeat;
61 64
62static void opal_reinit_cores(void) 65static void opal_reinit_cores(void)
63{ 66{
@@ -302,23 +305,26 @@ void opal_notifier_disable(void)
302 * Opal message notifier based on message type. Allow subscribers to get 305 * Opal message notifier based on message type. Allow subscribers to get
303 * notified for specific messgae type. 306 * notified for specific messgae type.
304 */ 307 */
305int opal_message_notifier_register(enum OpalMessageType msg_type, 308int opal_message_notifier_register(enum opal_msg_type msg_type,
306 struct notifier_block *nb) 309 struct notifier_block *nb)
307{ 310{
308 if (!nb) { 311 if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
309 pr_warning("%s: Invalid argument (%p)\n", 312 pr_warning("%s: Invalid arguments, msg_type:%d\n",
310 __func__, nb);
311 return -EINVAL;
312 }
313 if (msg_type > OPAL_MSG_TYPE_MAX) {
314 pr_warning("%s: Invalid message type argument (%d)\n",
315 __func__, msg_type); 313 __func__, msg_type);
316 return -EINVAL; 314 return -EINVAL;
317 } 315 }
316
318 return atomic_notifier_chain_register( 317 return atomic_notifier_chain_register(
319 &opal_msg_notifier_head[msg_type], nb); 318 &opal_msg_notifier_head[msg_type], nb);
320} 319}
321 320
321int opal_message_notifier_unregister(enum opal_msg_type msg_type,
322 struct notifier_block *nb)
323{
324 return atomic_notifier_chain_unregister(
325 &opal_msg_notifier_head[msg_type], nb);
326}
327
322static void opal_message_do_notify(uint32_t msg_type, void *msg) 328static void opal_message_do_notify(uint32_t msg_type, void *msg)
323{ 329{
324 /* notify subscribers */ 330 /* notify subscribers */
@@ -351,7 +357,7 @@ static void opal_handle_message(void)
351 type = be32_to_cpu(msg.msg_type); 357 type = be32_to_cpu(msg.msg_type);
352 358
353 /* Sanity check */ 359 /* Sanity check */
354 if (type > OPAL_MSG_TYPE_MAX) { 360 if (type >= OPAL_MSG_TYPE_MAX) {
355 pr_warning("%s: Unknown message type: %u\n", __func__, type); 361 pr_warning("%s: Unknown message type: %u\n", __func__, type);
356 return; 362 return;
357 } 363 }
@@ -665,6 +671,9 @@ static void __init opal_dump_region_init(void)
665 uint64_t size; 671 uint64_t size;
666 int rc; 672 int rc;
667 673
674 if (!opal_check_token(OPAL_REGISTER_DUMP_REGION))
675 return;
676
668 /* Register kernel log buffer */ 677 /* Register kernel log buffer */
669 addr = log_buf_addr_get(); 678 addr = log_buf_addr_get();
670 if (addr == NULL) 679 if (addr == NULL)
@@ -684,6 +693,15 @@ static void __init opal_dump_region_init(void)
684 "rc = %d\n", rc); 693 "rc = %d\n", rc);
685} 694}
686 695
696static void opal_flash_init(struct device_node *opal_node)
697{
698 struct device_node *np;
699
700 for_each_child_of_node(opal_node, np)
701 if (of_device_is_compatible(np, "ibm,opal-flash"))
702 of_platform_device_create(np, NULL, NULL);
703}
704
687static void opal_ipmi_init(struct device_node *opal_node) 705static void opal_ipmi_init(struct device_node *opal_node)
688{ 706{
689 struct device_node *np; 707 struct device_node *np;
@@ -741,6 +759,29 @@ static void __init opal_irq_init(struct device_node *dn)
741 } 759 }
742} 760}
743 761
762static int kopald(void *unused)
763{
764 set_freezable();
765 do {
766 try_to_freeze();
767 opal_poll_events(NULL);
768 msleep_interruptible(opal_heartbeat);
769 } while (!kthread_should_stop());
770
771 return 0;
772}
773
774static void opal_init_heartbeat(void)
775{
776 /* Old firwmware, we assume the HVC heartbeat is sufficient */
777 if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
778 &opal_heartbeat) != 0)
779 opal_heartbeat = 0;
780
781 if (opal_heartbeat)
782 kthread_run(kopald, NULL, "kopald");
783}
784
744static int __init opal_init(void) 785static int __init opal_init(void)
745{ 786{
746 struct device_node *np, *consoles; 787 struct device_node *np, *consoles;
@@ -769,6 +810,9 @@ static int __init opal_init(void)
769 /* Create i2c platform devices */ 810 /* Create i2c platform devices */
770 opal_i2c_create_devs(); 811 opal_i2c_create_devs();
771 812
813 /* Setup a heatbeat thread if requested by OPAL */
814 opal_init_heartbeat();
815
772 /* Find all OPAL interrupts and request them */ 816 /* Find all OPAL interrupts and request them */
773 opal_irq_init(opal_node); 817 opal_irq_init(opal_node);
774 818
@@ -782,7 +826,7 @@ static int __init opal_init(void)
782 /* Setup error log interface */ 826 /* Setup error log interface */
783 rc = opal_elog_init(); 827 rc = opal_elog_init();
784 /* Setup code update interface */ 828 /* Setup code update interface */
785 opal_flash_init(); 829 opal_flash_update_init();
786 /* Setup platform dump extract interface */ 830 /* Setup platform dump extract interface */
787 opal_platform_dump_init(); 831 opal_platform_dump_init();
788 /* Setup system parameters interface */ 832 /* Setup system parameters interface */
@@ -791,8 +835,11 @@ static int __init opal_init(void)
791 opal_msglog_init(); 835 opal_msglog_init();
792 } 836 }
793 837
838 /* Initialize OPAL IPMI backend */
794 opal_ipmi_init(opal_node); 839 opal_ipmi_init(opal_node);
795 840
841 opal_flash_init(opal_node);
842
796 return 0; 843 return 0;
797} 844}
798machine_subsys_initcall(powernv, opal_init); 845machine_subsys_initcall(powernv, opal_init);
@@ -823,13 +870,17 @@ void opal_shutdown(void)
823 } 870 }
824 871
825 /* Unregister memory dump region */ 872 /* Unregister memory dump region */
826 opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF); 873 if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION))
874 opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
827} 875}
828 876
829/* Export this so that test modules can use it */ 877/* Export this so that test modules can use it */
830EXPORT_SYMBOL_GPL(opal_invalid_call); 878EXPORT_SYMBOL_GPL(opal_invalid_call);
831EXPORT_SYMBOL_GPL(opal_ipmi_send); 879EXPORT_SYMBOL_GPL(opal_ipmi_send);
832EXPORT_SYMBOL_GPL(opal_ipmi_recv); 880EXPORT_SYMBOL_GPL(opal_ipmi_recv);
881EXPORT_SYMBOL_GPL(opal_flash_read);
882EXPORT_SYMBOL_GPL(opal_flash_write);
883EXPORT_SYMBOL_GPL(opal_flash_erase);
833 884
834/* Convert a region of vmalloc memory to an opal sg list */ 885/* Convert a region of vmalloc memory to an opal sg list */
835struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, 886struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
@@ -894,6 +945,25 @@ void opal_free_sg_list(struct opal_sg_list *sg)
894 } 945 }
895} 946}
896 947
948int opal_error_code(int rc)
949{
950 switch (rc) {
951 case OPAL_SUCCESS: return 0;
952
953 case OPAL_PARAMETER: return -EINVAL;
954 case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
955 case OPAL_BUSY_EVENT: return -EBUSY;
956 case OPAL_NO_MEM: return -ENOMEM;
957
958 case OPAL_UNSUPPORTED: return -EIO;
959 case OPAL_HARDWARE: return -EIO;
960 case OPAL_INTERNAL_ERROR: return -EIO;
961 default:
962 pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
963 return -EIO;
964 }
965}
966
897EXPORT_SYMBOL_GPL(opal_poll_events); 967EXPORT_SYMBOL_GPL(opal_poll_events);
898EXPORT_SYMBOL_GPL(opal_rtc_read); 968EXPORT_SYMBOL_GPL(opal_rtc_read);
899EXPORT_SYMBOL_GPL(opal_rtc_write); 969EXPORT_SYMBOL_GPL(opal_rtc_write);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6c9ff2b95119..920c252d1f49 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -44,6 +44,9 @@
44#include "powernv.h" 44#include "powernv.h"
45#include "pci.h" 45#include "pci.h"
46 46
47/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
48#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
49
47static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, 50static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
48 const char *fmt, ...) 51 const char *fmt, ...)
49{ 52{
@@ -56,11 +59,18 @@ static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
56 vaf.fmt = fmt; 59 vaf.fmt = fmt;
57 vaf.va = &args; 60 vaf.va = &args;
58 61
59 if (pe->pdev) 62 if (pe->flags & PNV_IODA_PE_DEV)
60 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); 63 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
61 else 64 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
62 sprintf(pfix, "%04x:%02x ", 65 sprintf(pfix, "%04x:%02x ",
63 pci_domain_nr(pe->pbus), pe->pbus->number); 66 pci_domain_nr(pe->pbus), pe->pbus->number);
67#ifdef CONFIG_PCI_IOV
68 else if (pe->flags & PNV_IODA_PE_VF)
69 sprintf(pfix, "%04x:%02x:%2x.%d",
70 pci_domain_nr(pe->parent_dev->bus),
71 (pe->rid & 0xff00) >> 8,
72 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
73#endif /* CONFIG_PCI_IOV*/
64 74
65 printk("%spci %s: [PE# %.3d] %pV", 75 printk("%spci %s: [PE# %.3d] %pV",
66 level, pfix, pe->pe_number, &vaf); 76 level, pfix, pe->pe_number, &vaf);
@@ -591,7 +601,7 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
591 bool is_add) 601 bool is_add)
592{ 602{
593 struct pnv_ioda_pe *slave; 603 struct pnv_ioda_pe *slave;
594 struct pci_dev *pdev; 604 struct pci_dev *pdev = NULL;
595 int ret; 605 int ret;
596 606
597 /* 607 /*
@@ -630,8 +640,12 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
630 640
631 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS)) 641 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
632 pdev = pe->pbus->self; 642 pdev = pe->pbus->self;
633 else 643 else if (pe->flags & PNV_IODA_PE_DEV)
634 pdev = pe->pdev->bus->self; 644 pdev = pe->pdev->bus->self;
645#ifdef CONFIG_PCI_IOV
646 else if (pe->flags & PNV_IODA_PE_VF)
647 pdev = pe->parent_dev->bus->self;
648#endif /* CONFIG_PCI_IOV */
635 while (pdev) { 649 while (pdev) {
636 struct pci_dn *pdn = pci_get_pdn(pdev); 650 struct pci_dn *pdn = pci_get_pdn(pdev);
637 struct pnv_ioda_pe *parent; 651 struct pnv_ioda_pe *parent;
@@ -649,6 +663,87 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
649 return 0; 663 return 0;
650} 664}
651 665
666#ifdef CONFIG_PCI_IOV
667static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
668{
669 struct pci_dev *parent;
670 uint8_t bcomp, dcomp, fcomp;
671 int64_t rc;
672 long rid_end, rid;
673
674 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
675 if (pe->pbus) {
676 int count;
677
678 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
679 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
680 parent = pe->pbus->self;
681 if (pe->flags & PNV_IODA_PE_BUS_ALL)
682 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
683 else
684 count = 1;
685
686 switch(count) {
687 case 1: bcomp = OpalPciBusAll; break;
688 case 2: bcomp = OpalPciBus7Bits; break;
689 case 4: bcomp = OpalPciBus6Bits; break;
690 case 8: bcomp = OpalPciBus5Bits; break;
691 case 16: bcomp = OpalPciBus4Bits; break;
692 case 32: bcomp = OpalPciBus3Bits; break;
693 default:
694 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
695 count);
696 /* Do an exact match only */
697 bcomp = OpalPciBusAll;
698 }
699 rid_end = pe->rid + (count << 8);
700 } else {
701 if (pe->flags & PNV_IODA_PE_VF)
702 parent = pe->parent_dev;
703 else
704 parent = pe->pdev->bus->self;
705 bcomp = OpalPciBusAll;
706 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
707 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
708 rid_end = pe->rid + 1;
709 }
710
711 /* Clear the reverse map */
712 for (rid = pe->rid; rid < rid_end; rid++)
713 phb->ioda.pe_rmap[rid] = 0;
714
715 /* Release from all parents PELT-V */
716 while (parent) {
717 struct pci_dn *pdn = pci_get_pdn(parent);
718 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
719 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
720 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
721 /* XXX What to do in case of error ? */
722 }
723 parent = parent->bus->self;
724 }
725
726 opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
727 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
728
729 /* Disassociate PE in PELT */
730 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
731 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
732 if (rc)
733 pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
734 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
735 bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
736 if (rc)
737 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
738
739 pe->pbus = NULL;
740 pe->pdev = NULL;
741 pe->parent_dev = NULL;
742
743 return 0;
744}
745#endif /* CONFIG_PCI_IOV */
746
652static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) 747static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
653{ 748{
654 struct pci_dev *parent; 749 struct pci_dev *parent;
@@ -675,15 +770,19 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
675 case 16: bcomp = OpalPciBus4Bits; break; 770 case 16: bcomp = OpalPciBus4Bits; break;
676 case 32: bcomp = OpalPciBus3Bits; break; 771 case 32: bcomp = OpalPciBus3Bits; break;
677 default: 772 default:
678 pr_err("%s: Number of subordinate busses %d" 773 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
679 " unsupported\n", 774 count);
680 pci_name(pe->pbus->self), count);
681 /* Do an exact match only */ 775 /* Do an exact match only */
682 bcomp = OpalPciBusAll; 776 bcomp = OpalPciBusAll;
683 } 777 }
684 rid_end = pe->rid + (count << 8); 778 rid_end = pe->rid + (count << 8);
685 } else { 779 } else {
686 parent = pe->pdev->bus->self; 780#ifdef CONFIG_PCI_IOV
781 if (pe->flags & PNV_IODA_PE_VF)
782 parent = pe->parent_dev;
783 else
784#endif /* CONFIG_PCI_IOV */
785 parent = pe->pdev->bus->self;
687 bcomp = OpalPciBusAll; 786 bcomp = OpalPciBusAll;
688 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; 787 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
689 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; 788 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
@@ -774,6 +873,78 @@ static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
774 return 10; 873 return 10;
775} 874}
776 875
876#ifdef CONFIG_PCI_IOV
877static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
878{
879 struct pci_dn *pdn = pci_get_pdn(dev);
880 int i;
881 struct resource *res, res2;
882 resource_size_t size;
883 u16 num_vfs;
884
885 if (!dev->is_physfn)
886 return -EINVAL;
887
888 /*
889 * "offset" is in VFs. The M64 windows are sized so that when they
890 * are segmented, each segment is the same size as the IOV BAR.
891 * Each segment is in a separate PE, and the high order bits of the
892 * address are the PE number. Therefore, each VF's BAR is in a
893 * separate PE, and changing the IOV BAR start address changes the
894 * range of PEs the VFs are in.
895 */
896 num_vfs = pdn->num_vfs;
897 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
898 res = &dev->resource[i + PCI_IOV_RESOURCES];
899 if (!res->flags || !res->parent)
900 continue;
901
902 if (!pnv_pci_is_mem_pref_64(res->flags))
903 continue;
904
905 /*
906 * The actual IOV BAR range is determined by the start address
907 * and the actual size for num_vfs VFs BAR. This check is to
908 * make sure that after shifting, the range will not overlap
909 * with another device.
910 */
911 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
912 res2.flags = res->flags;
913 res2.start = res->start + (size * offset);
914 res2.end = res2.start + (size * num_vfs) - 1;
915
916 if (res2.end > res->end) {
917 dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
918 i, &res2, res, num_vfs, offset);
919 return -EBUSY;
920 }
921 }
922
923 /*
924 * After doing so, there would be a "hole" in the /proc/iomem when
925 * offset is a positive value. It looks like the device return some
926 * mmio back to the system, which actually no one could use it.
927 */
928 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
929 res = &dev->resource[i + PCI_IOV_RESOURCES];
930 if (!res->flags || !res->parent)
931 continue;
932
933 if (!pnv_pci_is_mem_pref_64(res->flags))
934 continue;
935
936 size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
937 res2 = *res;
938 res->start += size * offset;
939
940 dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
941 i, &res2, res, num_vfs, offset);
942 pci_update_resource(dev, i + PCI_IOV_RESOURCES);
943 }
944 return 0;
945}
946#endif /* CONFIG_PCI_IOV */
947
777#if 0 948#if 0
778static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) 949static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
779{ 950{
@@ -857,7 +1028,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
857 pci_name(dev)); 1028 pci_name(dev));
858 continue; 1029 continue;
859 } 1030 }
860 pdn->pcidev = dev;
861 pdn->pe_number = pe->pe_number; 1031 pdn->pe_number = pe->pe_number;
862 pe->dma_weight += pnv_ioda_dma_weight(dev); 1032 pe->dma_weight += pnv_ioda_dma_weight(dev);
863 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) 1033 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -916,6 +1086,10 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
916 return; 1086 return;
917 } 1087 }
918 1088
1089 pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
1090 GFP_KERNEL, hose->node);
1091 pe->tce32_table->data = pe;
1092
919 /* Associate it with all child devices */ 1093 /* Associate it with all child devices */
920 pnv_ioda_setup_same_PE(bus, pe); 1094 pnv_ioda_setup_same_PE(bus, pe);
921 1095
@@ -974,6 +1148,441 @@ static void pnv_pci_ioda_setup_PEs(void)
974 } 1148 }
975} 1149}
976 1150
1151#ifdef CONFIG_PCI_IOV
1152static int pnv_pci_vf_release_m64(struct pci_dev *pdev)
1153{
1154 struct pci_bus *bus;
1155 struct pci_controller *hose;
1156 struct pnv_phb *phb;
1157 struct pci_dn *pdn;
1158 int i, j;
1159
1160 bus = pdev->bus;
1161 hose = pci_bus_to_host(bus);
1162 phb = hose->private_data;
1163 pdn = pci_get_pdn(pdev);
1164
1165 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1166 for (j = 0; j < M64_PER_IOV; j++) {
1167 if (pdn->m64_wins[i][j] == IODA_INVALID_M64)
1168 continue;
1169 opal_pci_phb_mmio_enable(phb->opal_id,
1170 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 0);
1171 clear_bit(pdn->m64_wins[i][j], &phb->ioda.m64_bar_alloc);
1172 pdn->m64_wins[i][j] = IODA_INVALID_M64;
1173 }
1174
1175 return 0;
1176}
1177
1178static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
1179{
1180 struct pci_bus *bus;
1181 struct pci_controller *hose;
1182 struct pnv_phb *phb;
1183 struct pci_dn *pdn;
1184 unsigned int win;
1185 struct resource *res;
1186 int i, j;
1187 int64_t rc;
1188 int total_vfs;
1189 resource_size_t size, start;
1190 int pe_num;
1191 int vf_groups;
1192 int vf_per_group;
1193
1194 bus = pdev->bus;
1195 hose = pci_bus_to_host(bus);
1196 phb = hose->private_data;
1197 pdn = pci_get_pdn(pdev);
1198 total_vfs = pci_sriov_get_totalvfs(pdev);
1199
1200 /* Initialize the m64_wins to IODA_INVALID_M64 */
1201 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
1202 for (j = 0; j < M64_PER_IOV; j++)
1203 pdn->m64_wins[i][j] = IODA_INVALID_M64;
1204
1205 if (pdn->m64_per_iov == M64_PER_IOV) {
1206 vf_groups = (num_vfs <= M64_PER_IOV) ? num_vfs: M64_PER_IOV;
1207 vf_per_group = (num_vfs <= M64_PER_IOV)? 1:
1208 roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1209 } else {
1210 vf_groups = 1;
1211 vf_per_group = 1;
1212 }
1213
1214 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1215 res = &pdev->resource[i + PCI_IOV_RESOURCES];
1216 if (!res->flags || !res->parent)
1217 continue;
1218
1219 if (!pnv_pci_is_mem_pref_64(res->flags))
1220 continue;
1221
1222 for (j = 0; j < vf_groups; j++) {
1223 do {
1224 win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
1225 phb->ioda.m64_bar_idx + 1, 0);
1226
1227 if (win >= phb->ioda.m64_bar_idx + 1)
1228 goto m64_failed;
1229 } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
1230
1231 pdn->m64_wins[i][j] = win;
1232
1233 if (pdn->m64_per_iov == M64_PER_IOV) {
1234 size = pci_iov_resource_size(pdev,
1235 PCI_IOV_RESOURCES + i);
1236 size = size * vf_per_group;
1237 start = res->start + size * j;
1238 } else {
1239 size = resource_size(res);
1240 start = res->start;
1241 }
1242
1243 /* Map the M64 here */
1244 if (pdn->m64_per_iov == M64_PER_IOV) {
1245 pe_num = pdn->offset + j;
1246 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1247 pe_num, OPAL_M64_WINDOW_TYPE,
1248 pdn->m64_wins[i][j], 0);
1249 }
1250
1251 rc = opal_pci_set_phb_mem_window(phb->opal_id,
1252 OPAL_M64_WINDOW_TYPE,
1253 pdn->m64_wins[i][j],
1254 start,
1255 0, /* unused */
1256 size);
1257
1258
1259 if (rc != OPAL_SUCCESS) {
1260 dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
1261 win, rc);
1262 goto m64_failed;
1263 }
1264
1265 if (pdn->m64_per_iov == M64_PER_IOV)
1266 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1267 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 2);
1268 else
1269 rc = opal_pci_phb_mmio_enable(phb->opal_id,
1270 OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 1);
1271
1272 if (rc != OPAL_SUCCESS) {
1273 dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
1274 win, rc);
1275 goto m64_failed;
1276 }
1277 }
1278 }
1279 return 0;
1280
1281m64_failed:
1282 pnv_pci_vf_release_m64(pdev);
1283 return -EBUSY;
1284}
1285
1286static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
1287{
1288 struct pci_bus *bus;
1289 struct pci_controller *hose;
1290 struct pnv_phb *phb;
1291 struct iommu_table *tbl;
1292 unsigned long addr;
1293 int64_t rc;
1294
1295 bus = dev->bus;
1296 hose = pci_bus_to_host(bus);
1297 phb = hose->private_data;
1298 tbl = pe->tce32_table;
1299 addr = tbl->it_base;
1300
1301 opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
1302 pe->pe_number << 1, 1, __pa(addr),
1303 0, 0x1000);
1304
1305 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
1306 pe->pe_number,
1307 (pe->pe_number << 1) + 1,
1308 pe->tce_bypass_base,
1309 0);
1310 if (rc)
1311 pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
1312
1313 iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
1314 free_pages(addr, get_order(TCE32_TABLE_SIZE));
1315 pe->tce32_table = NULL;
1316}
1317
1318static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1319{
1320 struct pci_bus *bus;
1321 struct pci_controller *hose;
1322 struct pnv_phb *phb;
1323 struct pnv_ioda_pe *pe, *pe_n;
1324 struct pci_dn *pdn;
1325 u16 vf_index;
1326 int64_t rc;
1327
1328 bus = pdev->bus;
1329 hose = pci_bus_to_host(bus);
1330 phb = hose->private_data;
1331 pdn = pci_get_pdn(pdev);
1332
1333 if (!pdev->is_physfn)
1334 return;
1335
1336 if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
1337 int vf_group;
1338 int vf_per_group;
1339 int vf_index1;
1340
1341 vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1342
1343 for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++)
1344 for (vf_index = vf_group * vf_per_group;
1345 vf_index < (vf_group + 1) * vf_per_group &&
1346 vf_index < num_vfs;
1347 vf_index++)
1348 for (vf_index1 = vf_group * vf_per_group;
1349 vf_index1 < (vf_group + 1) * vf_per_group &&
1350 vf_index1 < num_vfs;
1351 vf_index1++){
1352
1353 rc = opal_pci_set_peltv(phb->opal_id,
1354 pdn->offset + vf_index,
1355 pdn->offset + vf_index1,
1356 OPAL_REMOVE_PE_FROM_DOMAIN);
1357
1358 if (rc)
1359 dev_warn(&pdev->dev, "%s: Failed to unlink same group PE#%d(%lld)\n",
1360 __func__,
1361 pdn->offset + vf_index1, rc);
1362 }
1363 }
1364
1365 list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
1366 if (pe->parent_dev != pdev)
1367 continue;
1368
1369 pnv_pci_ioda2_release_dma_pe(pdev, pe);
1370
1371 /* Remove from list */
1372 mutex_lock(&phb->ioda.pe_list_mutex);
1373 list_del(&pe->list);
1374 mutex_unlock(&phb->ioda.pe_list_mutex);
1375
1376 pnv_ioda_deconfigure_pe(phb, pe);
1377
1378 pnv_ioda_free_pe(phb, pe->pe_number);
1379 }
1380}
1381
1382void pnv_pci_sriov_disable(struct pci_dev *pdev)
1383{
1384 struct pci_bus *bus;
1385 struct pci_controller *hose;
1386 struct pnv_phb *phb;
1387 struct pci_dn *pdn;
1388 struct pci_sriov *iov;
1389 u16 num_vfs;
1390
1391 bus = pdev->bus;
1392 hose = pci_bus_to_host(bus);
1393 phb = hose->private_data;
1394 pdn = pci_get_pdn(pdev);
1395 iov = pdev->sriov;
1396 num_vfs = pdn->num_vfs;
1397
1398 /* Release VF PEs */
1399 pnv_ioda_release_vf_PE(pdev, num_vfs);
1400
1401 if (phb->type == PNV_PHB_IODA2) {
1402 if (pdn->m64_per_iov == 1)
1403 pnv_pci_vf_resource_shift(pdev, -pdn->offset);
1404
1405 /* Release M64 windows */
1406 pnv_pci_vf_release_m64(pdev);
1407
1408 /* Release PE numbers */
1409 bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1410 pdn->offset = 0;
1411 }
1412}
1413
1414static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1415 struct pnv_ioda_pe *pe);
1416static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
1417{
1418 struct pci_bus *bus;
1419 struct pci_controller *hose;
1420 struct pnv_phb *phb;
1421 struct pnv_ioda_pe *pe;
1422 int pe_num;
1423 u16 vf_index;
1424 struct pci_dn *pdn;
1425 int64_t rc;
1426
1427 bus = pdev->bus;
1428 hose = pci_bus_to_host(bus);
1429 phb = hose->private_data;
1430 pdn = pci_get_pdn(pdev);
1431
1432 if (!pdev->is_physfn)
1433 return;
1434
1435 /* Reserve PE for each VF */
1436 for (vf_index = 0; vf_index < num_vfs; vf_index++) {
1437 pe_num = pdn->offset + vf_index;
1438
1439 pe = &phb->ioda.pe_array[pe_num];
1440 pe->pe_number = pe_num;
1441 pe->phb = phb;
1442 pe->flags = PNV_IODA_PE_VF;
1443 pe->pbus = NULL;
1444 pe->parent_dev = pdev;
1445 pe->tce32_seg = -1;
1446 pe->mve_number = -1;
1447 pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
1448 pci_iov_virtfn_devfn(pdev, vf_index);
1449
1450 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
1451 hose->global_number, pdev->bus->number,
1452 PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
1453 PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
1454
1455 if (pnv_ioda_configure_pe(phb, pe)) {
1456 /* XXX What do we do here ? */
1457 if (pe_num)
1458 pnv_ioda_free_pe(phb, pe_num);
1459 pe->pdev = NULL;
1460 continue;
1461 }
1462
1463 pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
1464 GFP_KERNEL, hose->node);
1465 pe->tce32_table->data = pe;
1466
1467 /* Put PE to the list */
1468 mutex_lock(&phb->ioda.pe_list_mutex);
1469 list_add_tail(&pe->list, &phb->ioda.pe_list);
1470 mutex_unlock(&phb->ioda.pe_list_mutex);
1471
1472 pnv_pci_ioda2_setup_dma_pe(phb, pe);
1473 }
1474
1475 if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
1476 int vf_group;
1477 int vf_per_group;
1478 int vf_index1;
1479
1480 vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
1481
1482 for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++) {
1483 for (vf_index = vf_group * vf_per_group;
1484 vf_index < (vf_group + 1) * vf_per_group &&
1485 vf_index < num_vfs;
1486 vf_index++) {
1487 for (vf_index1 = vf_group * vf_per_group;
1488 vf_index1 < (vf_group + 1) * vf_per_group &&
1489 vf_index1 < num_vfs;
1490 vf_index1++) {
1491
1492 rc = opal_pci_set_peltv(phb->opal_id,
1493 pdn->offset + vf_index,
1494 pdn->offset + vf_index1,
1495 OPAL_ADD_PE_TO_DOMAIN);
1496
1497 if (rc)
1498 dev_warn(&pdev->dev, "%s: Failed to link same group PE#%d(%lld)\n",
1499 __func__,
1500 pdn->offset + vf_index1, rc);
1501 }
1502 }
1503 }
1504 }
1505}
1506
1507int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1508{
1509 struct pci_bus *bus;
1510 struct pci_controller *hose;
1511 struct pnv_phb *phb;
1512 struct pci_dn *pdn;
1513 int ret;
1514
1515 bus = pdev->bus;
1516 hose = pci_bus_to_host(bus);
1517 phb = hose->private_data;
1518 pdn = pci_get_pdn(pdev);
1519
1520 if (phb->type == PNV_PHB_IODA2) {
1521 /* Calculate available PE for required VFs */
1522 mutex_lock(&phb->ioda.pe_alloc_mutex);
1523 pdn->offset = bitmap_find_next_zero_area(
1524 phb->ioda.pe_alloc, phb->ioda.total_pe,
1525 0, num_vfs, 0);
1526 if (pdn->offset >= phb->ioda.total_pe) {
1527 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1528 dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
1529 pdn->offset = 0;
1530 return -EBUSY;
1531 }
1532 bitmap_set(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1533 pdn->num_vfs = num_vfs;
1534 mutex_unlock(&phb->ioda.pe_alloc_mutex);
1535
1536 /* Assign M64 window accordingly */
1537 ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
1538 if (ret) {
1539 dev_info(&pdev->dev, "Not enough M64 window resources\n");
1540 goto m64_failed;
1541 }
1542
1543 /*
1544 * When using one M64 BAR to map one IOV BAR, we need to shift
1545 * the IOV BAR according to the PE# allocated to the VFs.
1546 * Otherwise, the PE# for the VF will conflict with others.
1547 */
1548 if (pdn->m64_per_iov == 1) {
1549 ret = pnv_pci_vf_resource_shift(pdev, pdn->offset);
1550 if (ret)
1551 goto m64_failed;
1552 }
1553 }
1554
1555 /* Setup VF PEs */
1556 pnv_ioda_setup_vf_PE(pdev, num_vfs);
1557
1558 return 0;
1559
1560m64_failed:
1561 bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
1562 pdn->offset = 0;
1563
1564 return ret;
1565}
1566
1567int pcibios_sriov_disable(struct pci_dev *pdev)
1568{
1569 pnv_pci_sriov_disable(pdev);
1570
1571 /* Release PCI data */
1572 remove_dev_pci_data(pdev);
1573 return 0;
1574}
1575
1576int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
1577{
1578 /* Allocate PCI data */
1579 add_dev_pci_data(pdev);
1580
1581 pnv_pci_sriov_enable(pdev, num_vfs);
1582 return 0;
1583}
1584#endif /* CONFIG_PCI_IOV */
1585
977static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) 1586static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
978{ 1587{
979 struct pci_dn *pdn = pci_get_pdn(pdev); 1588 struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -989,7 +1598,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
989 1598
990 pe = &phb->ioda.pe_array[pdn->pe_number]; 1599 pe = &phb->ioda.pe_array[pdn->pe_number];
991 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); 1600 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
992 set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); 1601 set_iommu_table_base_and_group(&pdev->dev, pe->tce32_table);
993} 1602}
994 1603
995static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, 1604static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
@@ -1016,7 +1625,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
1016 } else { 1625 } else {
1017 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); 1626 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
1018 set_dma_ops(&pdev->dev, &dma_iommu_ops); 1627 set_dma_ops(&pdev->dev, &dma_iommu_ops);
1019 set_iommu_table_base(&pdev->dev, &pe->tce32_table); 1628 set_iommu_table_base(&pdev->dev, pe->tce32_table);
1020 } 1629 }
1021 *pdev->dev.dma_mask = dma_mask; 1630 *pdev->dev.dma_mask = dma_mask;
1022 return 0; 1631 return 0;
@@ -1053,9 +1662,9 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
1053 list_for_each_entry(dev, &bus->devices, bus_list) { 1662 list_for_each_entry(dev, &bus->devices, bus_list) {
1054 if (add_to_iommu_group) 1663 if (add_to_iommu_group)
1055 set_iommu_table_base_and_group(&dev->dev, 1664 set_iommu_table_base_and_group(&dev->dev,
1056 &pe->tce32_table); 1665 pe->tce32_table);
1057 else 1666 else
1058 set_iommu_table_base(&dev->dev, &pe->tce32_table); 1667 set_iommu_table_base(&dev->dev, pe->tce32_table);
1059 1668
1060 if (dev->subordinate) 1669 if (dev->subordinate)
1061 pnv_ioda_setup_bus_dma(pe, dev->subordinate, 1670 pnv_ioda_setup_bus_dma(pe, dev->subordinate,
@@ -1145,8 +1754,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
1145void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, 1754void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
1146 __be64 *startp, __be64 *endp, bool rm) 1755 __be64 *startp, __be64 *endp, bool rm)
1147{ 1756{
1148 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, 1757 struct pnv_ioda_pe *pe = tbl->data;
1149 tce32_table);
1150 struct pnv_phb *phb = pe->phb; 1758 struct pnv_phb *phb = pe->phb;
1151 1759
1152 if (phb->type == PNV_PHB_IODA1) 1760 if (phb->type == PNV_PHB_IODA1)
@@ -1167,9 +1775,6 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
1167 int64_t rc; 1775 int64_t rc;
1168 void *addr; 1776 void *addr;
1169 1777
1170 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
1171#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
1172
1173 /* XXX FIXME: Handle 64-bit only DMA devices */ 1778 /* XXX FIXME: Handle 64-bit only DMA devices */
1174 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */ 1779 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
1175 /* XXX FIXME: Allocate multi-level tables on PHB3 */ 1780 /* XXX FIXME: Allocate multi-level tables on PHB3 */
@@ -1212,7 +1817,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
1212 } 1817 }
1213 1818
1214 /* Setup linux iommu table */ 1819 /* Setup linux iommu table */
1215 tbl = &pe->tce32_table; 1820 tbl = pe->tce32_table;
1216 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs, 1821 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
1217 base << 28, IOMMU_PAGE_SHIFT_4K); 1822 base << 28, IOMMU_PAGE_SHIFT_4K);
1218 1823
@@ -1232,12 +1837,19 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
1232 TCE_PCI_SWINV_PAIR); 1837 TCE_PCI_SWINV_PAIR);
1233 } 1838 }
1234 iommu_init_table(tbl, phb->hose->node); 1839 iommu_init_table(tbl, phb->hose->node);
1235 iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
1236 1840
1237 if (pe->pdev) 1841 if (pe->flags & PNV_IODA_PE_DEV) {
1842 iommu_register_group(tbl, phb->hose->global_number,
1843 pe->pe_number);
1238 set_iommu_table_base_and_group(&pe->pdev->dev, tbl); 1844 set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
1239 else 1845 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) {
1846 iommu_register_group(tbl, phb->hose->global_number,
1847 pe->pe_number);
1240 pnv_ioda_setup_bus_dma(pe, pe->pbus, true); 1848 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
1849 } else if (pe->flags & PNV_IODA_PE_VF) {
1850 iommu_register_group(tbl, phb->hose->global_number,
1851 pe->pe_number);
1852 }
1241 1853
1242 return; 1854 return;
1243 fail: 1855 fail:
@@ -1250,8 +1862,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
1250 1862
1251static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable) 1863static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
1252{ 1864{
1253 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, 1865 struct pnv_ioda_pe *pe = tbl->data;
1254 tce32_table);
1255 uint16_t window_id = (pe->pe_number << 1 ) + 1; 1866 uint16_t window_id = (pe->pe_number << 1 ) + 1;
1256 int64_t rc; 1867 int64_t rc;
1257 1868
@@ -1296,10 +1907,10 @@ static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
1296 pe->tce_bypass_base = 1ull << 59; 1907 pe->tce_bypass_base = 1ull << 59;
1297 1908
1298 /* Install set_bypass callback for VFIO */ 1909 /* Install set_bypass callback for VFIO */
1299 pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass; 1910 pe->tce32_table->set_bypass = pnv_pci_ioda2_set_bypass;
1300 1911
1301 /* Enable bypass by default */ 1912 /* Enable bypass by default */
1302 pnv_pci_ioda2_set_bypass(&pe->tce32_table, true); 1913 pnv_pci_ioda2_set_bypass(pe->tce32_table, true);
1303} 1914}
1304 1915
1305static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, 1916static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
@@ -1347,7 +1958,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1347 } 1958 }
1348 1959
1349 /* Setup linux iommu table */ 1960 /* Setup linux iommu table */
1350 tbl = &pe->tce32_table; 1961 tbl = pe->tce32_table;
1351 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0, 1962 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0,
1352 IOMMU_PAGE_SHIFT_4K); 1963 IOMMU_PAGE_SHIFT_4K);
1353 1964
@@ -1365,12 +1976,19 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
1365 tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE); 1976 tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
1366 } 1977 }
1367 iommu_init_table(tbl, phb->hose->node); 1978 iommu_init_table(tbl, phb->hose->node);
1368 iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
1369 1979
1370 if (pe->pdev) 1980 if (pe->flags & PNV_IODA_PE_DEV) {
1981 iommu_register_group(tbl, phb->hose->global_number,
1982 pe->pe_number);
1371 set_iommu_table_base_and_group(&pe->pdev->dev, tbl); 1983 set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
1372 else 1984 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) {
1985 iommu_register_group(tbl, phb->hose->global_number,
1986 pe->pe_number);
1373 pnv_ioda_setup_bus_dma(pe, pe->pbus, true); 1987 pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
1988 } else if (pe->flags & PNV_IODA_PE_VF) {
1989 iommu_register_group(tbl, phb->hose->global_number,
1990 pe->pe_number);
1991 }
1374 1992
1375 /* Also create a bypass window */ 1993 /* Also create a bypass window */
1376 if (!pnv_iommu_bypass_disabled) 1994 if (!pnv_iommu_bypass_disabled)
@@ -1731,6 +2349,73 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
1731static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { } 2349static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
1732#endif /* CONFIG_PCI_MSI */ 2350#endif /* CONFIG_PCI_MSI */
1733 2351
2352#ifdef CONFIG_PCI_IOV
2353static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2354{
2355 struct pci_controller *hose;
2356 struct pnv_phb *phb;
2357 struct resource *res;
2358 int i;
2359 resource_size_t size;
2360 struct pci_dn *pdn;
2361 int mul, total_vfs;
2362
2363 if (!pdev->is_physfn || pdev->is_added)
2364 return;
2365
2366 hose = pci_bus_to_host(pdev->bus);
2367 phb = hose->private_data;
2368
2369 pdn = pci_get_pdn(pdev);
2370 pdn->vfs_expanded = 0;
2371
2372 total_vfs = pci_sriov_get_totalvfs(pdev);
2373 pdn->m64_per_iov = 1;
2374 mul = phb->ioda.total_pe;
2375
2376 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2377 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2378 if (!res->flags || res->parent)
2379 continue;
2380 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2381 dev_warn(&pdev->dev, " non M64 VF BAR%d: %pR\n",
2382 i, res);
2383 continue;
2384 }
2385
2386 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
2387
2388 /* bigger than 64M */
2389 if (size > (1 << 26)) {
2390 dev_info(&pdev->dev, "PowerNV: VF BAR%d: %pR IOV size is bigger than 64M, roundup power2\n",
2391 i, res);
2392 pdn->m64_per_iov = M64_PER_IOV;
2393 mul = roundup_pow_of_two(total_vfs);
2394 break;
2395 }
2396 }
2397
2398 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
2399 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2400 if (!res->flags || res->parent)
2401 continue;
2402 if (!pnv_pci_is_mem_pref_64(res->flags)) {
2403 dev_warn(&pdev->dev, "Skipping expanding VF BAR%d: %pR\n",
2404 i, res);
2405 continue;
2406 }
2407
2408 dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
2409 size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
2410 res->end = res->start + size * mul - 1;
2411 dev_dbg(&pdev->dev, " %pR\n", res);
2412 dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
2413 i, res, mul);
2414 }
2415 pdn->vfs_expanded = mul;
2416}
2417#endif /* CONFIG_PCI_IOV */
2418
1734/* 2419/*
1735 * This function is supposed to be called on basis of PE from top 2420 * This function is supposed to be called on basis of PE from top
1736 * to bottom style. So the the I/O or MMIO segment assigned to 2421 * to bottom style. So the the I/O or MMIO segment assigned to
@@ -1777,7 +2462,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
1777 region.start += phb->ioda.io_segsize; 2462 region.start += phb->ioda.io_segsize;
1778 index++; 2463 index++;
1779 } 2464 }
1780 } else if (res->flags & IORESOURCE_MEM) { 2465 } else if ((res->flags & IORESOURCE_MEM) &&
2466 !pnv_pci_is_mem_pref_64(res->flags)) {
1781 region.start = res->start - 2467 region.start = res->start -
1782 hose->mem_offset[0] - 2468 hose->mem_offset[0] -
1783 phb->ioda.m32_pci_base; 2469 phb->ioda.m32_pci_base;
@@ -1907,10 +2593,29 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
1907 return phb->ioda.io_segsize; 2593 return phb->ioda.io_segsize;
1908} 2594}
1909 2595
2596#ifdef CONFIG_PCI_IOV
2597static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
2598 int resno)
2599{
2600 struct pci_dn *pdn = pci_get_pdn(pdev);
2601 resource_size_t align, iov_align;
2602
2603 iov_align = resource_size(&pdev->resource[resno]);
2604 if (iov_align)
2605 return iov_align;
2606
2607 align = pci_iov_resource_size(pdev, resno);
2608 if (pdn->vfs_expanded)
2609 return pdn->vfs_expanded * align;
2610
2611 return align;
2612}
2613#endif /* CONFIG_PCI_IOV */
2614
1910/* Prevent enabling devices for which we couldn't properly 2615/* Prevent enabling devices for which we couldn't properly
1911 * assign a PE 2616 * assign a PE
1912 */ 2617 */
1913static int pnv_pci_enable_device_hook(struct pci_dev *dev) 2618static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
1914{ 2619{
1915 struct pci_controller *hose = pci_bus_to_host(dev->bus); 2620 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1916 struct pnv_phb *phb = hose->private_data; 2621 struct pnv_phb *phb = hose->private_data;
@@ -1922,13 +2627,13 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev)
1922 * PEs isn't ready. 2627 * PEs isn't ready.
1923 */ 2628 */
1924 if (!phb->initialized) 2629 if (!phb->initialized)
1925 return 0; 2630 return true;
1926 2631
1927 pdn = pci_get_pdn(dev); 2632 pdn = pci_get_pdn(dev);
1928 if (!pdn || pdn->pe_number == IODA_INVALID_PE) 2633 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1929 return -EINVAL; 2634 return false;
1930 2635
1931 return 0; 2636 return true;
1932} 2637}
1933 2638
1934static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus, 2639static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
@@ -1988,9 +2693,11 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
1988 hose->last_busno = 0xff; 2693 hose->last_busno = 0xff;
1989 } 2694 }
1990 hose->private_data = phb; 2695 hose->private_data = phb;
2696 hose->controller_ops = pnv_pci_controller_ops;
1991 phb->hub_id = hub_id; 2697 phb->hub_id = hub_id;
1992 phb->opal_id = phb_id; 2698 phb->opal_id = phb_id;
1993 phb->type = ioda_type; 2699 phb->type = ioda_type;
2700 mutex_init(&phb->ioda.pe_alloc_mutex);
1994 2701
1995 /* Detect specific models for error handling */ 2702 /* Detect specific models for error handling */
1996 if (of_device_is_compatible(np, "ibm,p7ioc-pciex")) 2703 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
@@ -2050,6 +2757,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2050 2757
2051 INIT_LIST_HEAD(&phb->ioda.pe_dma_list); 2758 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
2052 INIT_LIST_HEAD(&phb->ioda.pe_list); 2759 INIT_LIST_HEAD(&phb->ioda.pe_list);
2760 mutex_init(&phb->ioda.pe_list_mutex);
2053 2761
2054 /* Calculate how many 32-bit TCE segments we have */ 2762 /* Calculate how many 32-bit TCE segments we have */
2055 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28; 2763 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
@@ -2078,9 +2786,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2078 phb->get_pe_state = pnv_ioda_get_pe_state; 2786 phb->get_pe_state = pnv_ioda_get_pe_state;
2079 phb->freeze_pe = pnv_ioda_freeze_pe; 2787 phb->freeze_pe = pnv_ioda_freeze_pe;
2080 phb->unfreeze_pe = pnv_ioda_unfreeze_pe; 2788 phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
2081#ifdef CONFIG_EEH
2082 phb->eeh_ops = &ioda_eeh_ops;
2083#endif
2084 2789
2085 /* Setup RID -> PE mapping function */ 2790 /* Setup RID -> PE mapping function */
2086 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe; 2791 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
@@ -2104,9 +2809,15 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2104 * the child P2P bridges) can form individual PE. 2809 * the child P2P bridges) can form individual PE.
2105 */ 2810 */
2106 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; 2811 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
2107 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook; 2812 pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook;
2108 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment; 2813 pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment;
2109 ppc_md.pcibios_reset_secondary_bus = pnv_pci_reset_secondary_bus; 2814 pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus;
2815
2816#ifdef CONFIG_PCI_IOV
2817 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
2818 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
2819#endif
2820
2110 pci_add_flags(PCI_REASSIGN_ALL_RSRC); 2821 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
2111 2822
2112 /* Reset IODA tables to a clean state */ 2823 /* Reset IODA tables to a clean state */
@@ -2121,8 +2832,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
2121 */ 2832 */
2122 if (is_kdump_kernel()) { 2833 if (is_kdump_kernel()) {
2123 pr_info(" Issue PHB reset ...\n"); 2834 pr_info(" Issue PHB reset ...\n");
2124 ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); 2835 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
2125 ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); 2836 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
2126 } 2837 }
2127 2838
2128 /* Remove M64 resource if we can't configure it successfully */ 2839 /* Remove M64 resource if we can't configure it successfully */
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 6ef6d4d8e7e2..4729ca793813 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -133,6 +133,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
133 phb->hose->first_busno = 0; 133 phb->hose->first_busno = 0;
134 phb->hose->last_busno = 0xff; 134 phb->hose->last_busno = 0xff;
135 phb->hose->private_data = phb; 135 phb->hose->private_data = phb;
136 phb->hose->controller_ops = pnv_pci_controller_ops;
136 phb->hub_id = hub_id; 137 phb->hub_id = hub_id;
137 phb->opal_id = phb_id; 138 phb->opal_id = phb_id;
138 phb->type = PNV_PHB_P5IOC2; 139 phb->type = PNV_PHB_P5IOC2;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 54323d6b5166..bca2aeb6e4b6 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -366,9 +366,9 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
366 spin_unlock_irqrestore(&phb->lock, flags); 366 spin_unlock_irqrestore(&phb->lock, flags);
367} 367}
368 368
369static void pnv_pci_config_check_eeh(struct pnv_phb *phb, 369static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
370 struct device_node *dn)
371{ 370{
371 struct pnv_phb *phb = pdn->phb->private_data;
372 u8 fstate; 372 u8 fstate;
373 __be16 pcierr; 373 __be16 pcierr;
374 int pe_no; 374 int pe_no;
@@ -379,7 +379,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
379 * setup that yet. So all ER errors should be mapped to 379 * setup that yet. So all ER errors should be mapped to
380 * reserved PE. 380 * reserved PE.
381 */ 381 */
382 pe_no = PCI_DN(dn)->pe_number; 382 pe_no = pdn->pe_number;
383 if (pe_no == IODA_INVALID_PE) { 383 if (pe_no == IODA_INVALID_PE) {
384 if (phb->type == PNV_PHB_P5IOC2) 384 if (phb->type == PNV_PHB_P5IOC2)
385 pe_no = 0; 385 pe_no = 0;
@@ -407,8 +407,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
407 } 407 }
408 408
409 cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n", 409 cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
410 (PCI_DN(dn)->busno << 8) | (PCI_DN(dn)->devfn), 410 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
411 pe_no, fstate);
412 411
413 /* Clear the frozen state if applicable */ 412 /* Clear the frozen state if applicable */
414 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || 413 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
@@ -425,10 +424,9 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
425 } 424 }
426} 425}
427 426
428int pnv_pci_cfg_read(struct device_node *dn, 427int pnv_pci_cfg_read(struct pci_dn *pdn,
429 int where, int size, u32 *val) 428 int where, int size, u32 *val)
430{ 429{
431 struct pci_dn *pdn = PCI_DN(dn);
432 struct pnv_phb *phb = pdn->phb->private_data; 430 struct pnv_phb *phb = pdn->phb->private_data;
433 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 431 u32 bdfn = (pdn->busno << 8) | pdn->devfn;
434 s64 rc; 432 s64 rc;
@@ -462,10 +460,9 @@ int pnv_pci_cfg_read(struct device_node *dn,
462 return PCIBIOS_SUCCESSFUL; 460 return PCIBIOS_SUCCESSFUL;
463} 461}
464 462
465int pnv_pci_cfg_write(struct device_node *dn, 463int pnv_pci_cfg_write(struct pci_dn *pdn,
466 int where, int size, u32 val) 464 int where, int size, u32 val)
467{ 465{
468 struct pci_dn *pdn = PCI_DN(dn);
469 struct pnv_phb *phb = pdn->phb->private_data; 466 struct pnv_phb *phb = pdn->phb->private_data;
470 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 467 u32 bdfn = (pdn->busno << 8) | pdn->devfn;
471 468
@@ -489,18 +486,17 @@ int pnv_pci_cfg_write(struct device_node *dn,
489} 486}
490 487
491#if CONFIG_EEH 488#if CONFIG_EEH
492static bool pnv_pci_cfg_check(struct pci_controller *hose, 489static bool pnv_pci_cfg_check(struct pci_dn *pdn)
493 struct device_node *dn)
494{ 490{
495 struct eeh_dev *edev = NULL; 491 struct eeh_dev *edev = NULL;
496 struct pnv_phb *phb = hose->private_data; 492 struct pnv_phb *phb = pdn->phb->private_data;
497 493
498 /* EEH not enabled ? */ 494 /* EEH not enabled ? */
499 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 495 if (!(phb->flags & PNV_PHB_FLAG_EEH))
500 return true; 496 return true;
501 497
502 /* PE reset or device removed ? */ 498 /* PE reset or device removed ? */
503 edev = of_node_to_eeh_dev(dn); 499 edev = pdn->edev;
504 if (edev) { 500 if (edev) {
505 if (edev->pe && 501 if (edev->pe &&
506 (edev->pe->state & EEH_PE_CFG_BLOCKED)) 502 (edev->pe->state & EEH_PE_CFG_BLOCKED))
@@ -513,8 +509,7 @@ static bool pnv_pci_cfg_check(struct pci_controller *hose,
513 return true; 509 return true;
514} 510}
515#else 511#else
516static inline pnv_pci_cfg_check(struct pci_controller *hose, 512static inline pnv_pci_cfg_check(struct pci_dn *pdn)
517 struct device_node *dn)
518{ 513{
519 return true; 514 return true;
520} 515}
@@ -524,32 +519,26 @@ static int pnv_pci_read_config(struct pci_bus *bus,
524 unsigned int devfn, 519 unsigned int devfn,
525 int where, int size, u32 *val) 520 int where, int size, u32 *val)
526{ 521{
527 struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
528 struct pci_dn *pdn; 522 struct pci_dn *pdn;
529 struct pnv_phb *phb; 523 struct pnv_phb *phb;
530 bool found = false;
531 int ret; 524 int ret;
532 525
533 *val = 0xFFFFFFFF; 526 *val = 0xFFFFFFFF;
534 for (dn = busdn->child; dn; dn = dn->sibling) { 527 pdn = pci_get_pdn_by_devfn(bus, devfn);
535 pdn = PCI_DN(dn); 528 if (!pdn)
536 if (pdn && pdn->devfn == devfn) { 529 return PCIBIOS_DEVICE_NOT_FOUND;
537 phb = pdn->phb->private_data;
538 found = true;
539 break;
540 }
541 }
542 530
543 if (!found || !pnv_pci_cfg_check(pdn->phb, dn)) 531 if (!pnv_pci_cfg_check(pdn))
544 return PCIBIOS_DEVICE_NOT_FOUND; 532 return PCIBIOS_DEVICE_NOT_FOUND;
545 533
546 ret = pnv_pci_cfg_read(dn, where, size, val); 534 ret = pnv_pci_cfg_read(pdn, where, size, val);
547 if (phb->flags & PNV_PHB_FLAG_EEH) { 535 phb = pdn->phb->private_data;
536 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
548 if (*val == EEH_IO_ERROR_VALUE(size) && 537 if (*val == EEH_IO_ERROR_VALUE(size) &&
549 eeh_dev_check_failure(of_node_to_eeh_dev(dn))) 538 eeh_dev_check_failure(pdn->edev))
550 return PCIBIOS_DEVICE_NOT_FOUND; 539 return PCIBIOS_DEVICE_NOT_FOUND;
551 } else { 540 } else {
552 pnv_pci_config_check_eeh(phb, dn); 541 pnv_pci_config_check_eeh(pdn);
553 } 542 }
554 543
555 return ret; 544 return ret;
@@ -559,27 +548,21 @@ static int pnv_pci_write_config(struct pci_bus *bus,
559 unsigned int devfn, 548 unsigned int devfn,
560 int where, int size, u32 val) 549 int where, int size, u32 val)
561{ 550{
562 struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
563 struct pci_dn *pdn; 551 struct pci_dn *pdn;
564 struct pnv_phb *phb; 552 struct pnv_phb *phb;
565 bool found = false;
566 int ret; 553 int ret;
567 554
568 for (dn = busdn->child; dn; dn = dn->sibling) { 555 pdn = pci_get_pdn_by_devfn(bus, devfn);
569 pdn = PCI_DN(dn); 556 if (!pdn)
570 if (pdn && pdn->devfn == devfn) { 557 return PCIBIOS_DEVICE_NOT_FOUND;
571 phb = pdn->phb->private_data;
572 found = true;
573 break;
574 }
575 }
576 558
577 if (!found || !pnv_pci_cfg_check(pdn->phb, dn)) 559 if (!pnv_pci_cfg_check(pdn))
578 return PCIBIOS_DEVICE_NOT_FOUND; 560 return PCIBIOS_DEVICE_NOT_FOUND;
579 561
580 ret = pnv_pci_cfg_write(dn, where, size, val); 562 ret = pnv_pci_cfg_write(pdn, where, size, val);
563 phb = pdn->phb->private_data;
581 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 564 if (!(phb->flags & PNV_PHB_FLAG_EEH))
582 pnv_pci_config_check_eeh(phb, dn); 565 pnv_pci_config_check_eeh(pdn);
583 566
584 return ret; 567 return ret;
585} 568}
@@ -679,66 +662,31 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
679 tbl->it_type = TCE_PCI; 662 tbl->it_type = TCE_PCI;
680} 663}
681 664
682static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose)
683{
684 struct iommu_table *tbl;
685 const __be64 *basep, *swinvp;
686 const __be32 *sizep;
687
688 basep = of_get_property(hose->dn, "linux,tce-base", NULL);
689 sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
690 if (basep == NULL || sizep == NULL) {
691 pr_err("PCI: %s has missing tce entries !\n",
692 hose->dn->full_name);
693 return NULL;
694 }
695 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
696 if (WARN_ON(!tbl))
697 return NULL;
698 pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
699 be32_to_cpup(sizep), 0, IOMMU_PAGE_SHIFT_4K);
700 iommu_init_table(tbl, hose->node);
701 iommu_register_group(tbl, pci_domain_nr(hose->bus), 0);
702
703 /* Deal with SW invalidated TCEs when needed (BML way) */
704 swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
705 NULL);
706 if (swinvp) {
707 tbl->it_busno = be64_to_cpu(swinvp[1]);
708 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
709 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
710 }
711 return tbl;
712}
713
714static void pnv_pci_dma_fallback_setup(struct pci_controller *hose,
715 struct pci_dev *pdev)
716{
717 struct device_node *np = pci_bus_to_OF_node(hose->bus);
718 struct pci_dn *pdn;
719
720 if (np == NULL)
721 return;
722 pdn = PCI_DN(np);
723 if (!pdn->iommu_table)
724 pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
725 if (!pdn->iommu_table)
726 return;
727 set_iommu_table_base_and_group(&pdev->dev, pdn->iommu_table);
728}
729
730static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) 665static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
731{ 666{
732 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 667 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
733 struct pnv_phb *phb = hose->private_data; 668 struct pnv_phb *phb = hose->private_data;
669#ifdef CONFIG_PCI_IOV
670 struct pnv_ioda_pe *pe;
671 struct pci_dn *pdn;
672
673 /* Fix the VF pdn PE number */
674 if (pdev->is_virtfn) {
675 pdn = pci_get_pdn(pdev);
676 WARN_ON(pdn->pe_number != IODA_INVALID_PE);
677 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
678 if (pe->rid == ((pdev->bus->number << 8) |
679 (pdev->devfn & 0xff))) {
680 pdn->pe_number = pe->pe_number;
681 pe->pdev = pdev;
682 break;
683 }
684 }
685 }
686#endif /* CONFIG_PCI_IOV */
734 687
735 /* If we have no phb structure, try to setup a fallback based on
736 * the device-tree (RTAS PCI for example)
737 */
738 if (phb && phb->dma_dev_setup) 688 if (phb && phb->dma_dev_setup)
739 phb->dma_dev_setup(phb, pdev); 689 phb->dma_dev_setup(phb, pdev);
740 else
741 pnv_pci_dma_fallback_setup(hose, pdev);
742} 690}
743 691
744int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) 692int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
@@ -784,44 +732,36 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
784void __init pnv_pci_init(void) 732void __init pnv_pci_init(void)
785{ 733{
786 struct device_node *np; 734 struct device_node *np;
735 bool found_ioda = false;
787 736
788 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN); 737 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
789 738
790 /* OPAL absent, try POPAL first then RTAS detection of PHBs */ 739 /* If we don't have OPAL, eg. in sim, just skip PCI probe */
791 if (!firmware_has_feature(FW_FEATURE_OPAL)) { 740 if (!firmware_has_feature(FW_FEATURE_OPAL))
792#ifdef CONFIG_PPC_POWERNV_RTAS 741 return;
793 init_pci_config_tokens();
794 find_and_init_phbs();
795#endif /* CONFIG_PPC_POWERNV_RTAS */
796 }
797 /* OPAL is here, do our normal stuff */
798 else {
799 int found_ioda = 0;
800 742
801 /* Look for IODA IO-Hubs. We don't support mixing IODA 743 /* Look for IODA IO-Hubs. We don't support mixing IODA
802 * and p5ioc2 due to the need to change some global 744 * and p5ioc2 due to the need to change some global
803 * probing flags 745 * probing flags
804 */ 746 */
805 for_each_compatible_node(np, NULL, "ibm,ioda-hub") { 747 for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
806 pnv_pci_init_ioda_hub(np); 748 pnv_pci_init_ioda_hub(np);
807 found_ioda = 1; 749 found_ioda = true;
808 } 750 }
809 751
810 /* Look for p5ioc2 IO-Hubs */ 752 /* Look for p5ioc2 IO-Hubs */
811 if (!found_ioda) 753 if (!found_ioda)
812 for_each_compatible_node(np, NULL, "ibm,p5ioc2") 754 for_each_compatible_node(np, NULL, "ibm,p5ioc2")
813 pnv_pci_init_p5ioc2_hub(np); 755 pnv_pci_init_p5ioc2_hub(np);
814 756
815 /* Look for ioda2 built-in PHB3's */ 757 /* Look for ioda2 built-in PHB3's */
816 for_each_compatible_node(np, NULL, "ibm,ioda2-phb") 758 for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
817 pnv_pci_init_ioda2_phb(np); 759 pnv_pci_init_ioda2_phb(np);
818 }
819 760
820 /* Setup the linkage between OF nodes and PHBs */ 761 /* Setup the linkage between OF nodes and PHBs */
821 pci_devs_phb_init(); 762 pci_devs_phb_init();
822 763
823 /* Configure IOMMU DMA hooks */ 764 /* Configure IOMMU DMA hooks */
824 ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
825 ppc_md.tce_build = pnv_tce_build_vm; 765 ppc_md.tce_build = pnv_tce_build_vm;
826 ppc_md.tce_free = pnv_tce_free_vm; 766 ppc_md.tce_free = pnv_tce_free_vm;
827 ppc_md.tce_build_rm = pnv_tce_build_rm; 767 ppc_md.tce_build_rm = pnv_tce_build_rm;
@@ -837,3 +777,7 @@ void __init pnv_pci_init(void)
837} 777}
838 778
839machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); 779machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
780
781struct pci_controller_ops pnv_pci_controller_ops = {
782 .dma_dev_setup = pnv_pci_dma_dev_setup,
783};
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 6c02ff8dd69f..070ee888fc95 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -23,6 +23,7 @@ enum pnv_phb_model {
23#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */ 23#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
24#define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */ 24#define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */
25#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */ 25#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
26#define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
26 27
27/* Data associated with a PE, including IOMMU tracking etc.. */ 28/* Data associated with a PE, including IOMMU tracking etc.. */
28struct pnv_phb; 29struct pnv_phb;
@@ -34,6 +35,9 @@ struct pnv_ioda_pe {
34 * entire bus (& children). In the former case, pdev 35 * entire bus (& children). In the former case, pdev
35 * is populated, in the later case, pbus is. 36 * is populated, in the later case, pbus is.
36 */ 37 */
38#ifdef CONFIG_PCI_IOV
39 struct pci_dev *parent_dev;
40#endif
37 struct pci_dev *pdev; 41 struct pci_dev *pdev;
38 struct pci_bus *pbus; 42 struct pci_bus *pbus;
39 43
@@ -53,7 +57,7 @@ struct pnv_ioda_pe {
53 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */ 57 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
54 int tce32_seg; 58 int tce32_seg;
55 int tce32_segcount; 59 int tce32_segcount;
56 struct iommu_table tce32_table; 60 struct iommu_table *tce32_table;
57 phys_addr_t tce_inval_reg_phys; 61 phys_addr_t tce_inval_reg_phys;
58 62
59 /* 64-bit TCE bypass region */ 63 /* 64-bit TCE bypass region */
@@ -75,22 +79,6 @@ struct pnv_ioda_pe {
75 struct list_head list; 79 struct list_head list;
76}; 80};
77 81
78/* IOC dependent EEH operations */
79#ifdef CONFIG_EEH
80struct pnv_eeh_ops {
81 int (*post_init)(struct pci_controller *hose);
82 int (*set_option)(struct eeh_pe *pe, int option);
83 int (*get_state)(struct eeh_pe *pe);
84 int (*reset)(struct eeh_pe *pe, int option);
85 int (*get_log)(struct eeh_pe *pe, int severity,
86 char *drv_log, unsigned long len);
87 int (*configure_bridge)(struct eeh_pe *pe);
88 int (*err_inject)(struct eeh_pe *pe, int type, int func,
89 unsigned long addr, unsigned long mask);
90 int (*next_error)(struct eeh_pe **pe);
91};
92#endif /* CONFIG_EEH */
93
94#define PNV_PHB_FLAG_EEH (1 << 0) 82#define PNV_PHB_FLAG_EEH (1 << 0)
95 83
96struct pnv_phb { 84struct pnv_phb {
@@ -104,10 +92,6 @@ struct pnv_phb {
104 int initialized; 92 int initialized;
105 spinlock_t lock; 93 spinlock_t lock;
106 94
107#ifdef CONFIG_EEH
108 struct pnv_eeh_ops *eeh_ops;
109#endif
110
111#ifdef CONFIG_DEBUG_FS 95#ifdef CONFIG_DEBUG_FS
112 int has_dbgfs; 96 int has_dbgfs;
113 struct dentry *dbgfs; 97 struct dentry *dbgfs;
@@ -165,6 +149,8 @@ struct pnv_phb {
165 149
166 /* PE allocation bitmap */ 150 /* PE allocation bitmap */
167 unsigned long *pe_alloc; 151 unsigned long *pe_alloc;
152 /* PE allocation mutex */
153 struct mutex pe_alloc_mutex;
168 154
169 /* M32 & IO segment maps */ 155 /* M32 & IO segment maps */
170 unsigned int *m32_segmap; 156 unsigned int *m32_segmap;
@@ -179,6 +165,7 @@ struct pnv_phb {
179 * on the sequence of creation 165 * on the sequence of creation
180 */ 166 */
181 struct list_head pe_list; 167 struct list_head pe_list;
168 struct mutex pe_list_mutex;
182 169
183 /* Reverse map of PEs, will have to extend if 170 /* Reverse map of PEs, will have to extend if
184 * we are to support more than 256 PEs, indexed 171 * we are to support more than 256 PEs, indexed
@@ -213,15 +200,12 @@ struct pnv_phb {
213}; 200};
214 201
215extern struct pci_ops pnv_pci_ops; 202extern struct pci_ops pnv_pci_ops;
216#ifdef CONFIG_EEH
217extern struct pnv_eeh_ops ioda_eeh_ops;
218#endif
219 203
220void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 204void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
221 unsigned char *log_buff); 205 unsigned char *log_buff);
222int pnv_pci_cfg_read(struct device_node *dn, 206int pnv_pci_cfg_read(struct pci_dn *pdn,
223 int where, int size, u32 *val); 207 int where, int size, u32 *val);
224int pnv_pci_cfg_write(struct device_node *dn, 208int pnv_pci_cfg_write(struct pci_dn *pdn,
225 int where, int size, u32 val); 209 int where, int size, u32 val);
226extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, 210extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
227 void *tce_mem, u64 tce_size, 211 void *tce_mem, u64 tce_size,
@@ -232,6 +216,6 @@ extern void pnv_pci_init_ioda2_phb(struct device_node *np);
232extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, 216extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
233 __be64 *startp, __be64 *endp, bool rm); 217 __be64 *startp, __be64 *endp, bool rm);
234extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); 218extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
235extern int ioda_eeh_phb_reset(struct pci_controller *hose, int option); 219extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
236 220
237#endif /* __POWERNV_PCI_H */ 221#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 604c48e7879a..826d2c9bea56 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -29,6 +29,8 @@ static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
29} 29}
30#endif 30#endif
31 31
32extern struct pci_controller_ops pnv_pci_controller_ops;
33
32extern u32 pnv_get_supported_cpuidle_states(void); 34extern u32 pnv_get_supported_cpuidle_states(void);
33 35
34extern void pnv_lpc_init(void); 36extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index d2de7d5d7574..16fdcb23f4c3 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -32,7 +32,6 @@
32#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/firmware.h> 33#include <asm/firmware.h>
34#include <asm/xics.h> 34#include <asm/xics.h>
35#include <asm/rtas.h>
36#include <asm/opal.h> 35#include <asm/opal.h>
37#include <asm/kexec.h> 36#include <asm/kexec.h>
38#include <asm/smp.h> 37#include <asm/smp.h>
@@ -278,20 +277,6 @@ static void __init pnv_setup_machdep_opal(void)
278 ppc_md.handle_hmi_exception = opal_handle_hmi_exception; 277 ppc_md.handle_hmi_exception = opal_handle_hmi_exception;
279} 278}
280 279
281#ifdef CONFIG_PPC_POWERNV_RTAS
282static void __init pnv_setup_machdep_rtas(void)
283{
284 if (rtas_token("get-time-of-day") != RTAS_UNKNOWN_SERVICE) {
285 ppc_md.get_boot_time = rtas_get_boot_time;
286 ppc_md.get_rtc_time = rtas_get_rtc_time;
287 ppc_md.set_rtc_time = rtas_set_rtc_time;
288 }
289 ppc_md.restart = rtas_restart;
290 pm_power_off = rtas_power_off;
291 ppc_md.halt = rtas_halt;
292}
293#endif /* CONFIG_PPC_POWERNV_RTAS */
294
295static u32 supported_cpuidle_states; 280static u32 supported_cpuidle_states;
296 281
297int pnv_save_sprs_for_winkle(void) 282int pnv_save_sprs_for_winkle(void)
@@ -409,37 +394,39 @@ static int __init pnv_init_idle_states(void)
409{ 394{
410 struct device_node *power_mgt; 395 struct device_node *power_mgt;
411 int dt_idle_states; 396 int dt_idle_states;
412 const __be32 *idle_state_flags; 397 u32 *flags;
413 u32 len_flags, flags;
414 int i; 398 int i;
415 399
416 supported_cpuidle_states = 0; 400 supported_cpuidle_states = 0;
417 401
418 if (cpuidle_disable != IDLE_NO_OVERRIDE) 402 if (cpuidle_disable != IDLE_NO_OVERRIDE)
419 return 0; 403 goto out;
420 404
421 if (!firmware_has_feature(FW_FEATURE_OPALv3)) 405 if (!firmware_has_feature(FW_FEATURE_OPALv3))
422 return 0; 406 goto out;
423 407
424 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); 408 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
425 if (!power_mgt) { 409 if (!power_mgt) {
426 pr_warn("opal: PowerMgmt Node not found\n"); 410 pr_warn("opal: PowerMgmt Node not found\n");
427 return 0; 411 goto out;
412 }
413 dt_idle_states = of_property_count_u32_elems(power_mgt,
414 "ibm,cpu-idle-state-flags");
415 if (dt_idle_states < 0) {
416 pr_warn("cpuidle-powernv: no idle states found in the DT\n");
417 goto out;
428 } 418 }
429 419
430 idle_state_flags = of_get_property(power_mgt, 420 flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
431 "ibm,cpu-idle-state-flags", &len_flags); 421 if (of_property_read_u32_array(power_mgt,
432 if (!idle_state_flags) { 422 "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
433 pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n"); 423 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
434 return 0; 424 goto out_free;
435 } 425 }
436 426
437 dt_idle_states = len_flags / sizeof(u32); 427 for (i = 0; i < dt_idle_states; i++)
428 supported_cpuidle_states |= flags[i];
438 429
439 for (i = 0; i < dt_idle_states; i++) {
440 flags = be32_to_cpu(idle_state_flags[i]);
441 supported_cpuidle_states |= flags;
442 }
443 if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { 430 if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
444 patch_instruction( 431 patch_instruction(
445 (unsigned int *)pnv_fastsleep_workaround_at_entry, 432 (unsigned int *)pnv_fastsleep_workaround_at_entry,
@@ -449,6 +436,9 @@ static int __init pnv_init_idle_states(void)
449 PPC_INST_NOP); 436 PPC_INST_NOP);
450 } 437 }
451 pnv_alloc_idle_core_states(); 438 pnv_alloc_idle_core_states();
439out_free:
440 kfree(flags);
441out:
452 return 0; 442 return 0;
453} 443}
454 444
@@ -465,10 +455,6 @@ static int __init pnv_probe(void)
465 455
466 if (firmware_has_feature(FW_FEATURE_OPAL)) 456 if (firmware_has_feature(FW_FEATURE_OPAL))
467 pnv_setup_machdep_opal(); 457 pnv_setup_machdep_opal();
468#ifdef CONFIG_PPC_POWERNV_RTAS
469 else if (rtas.base)
470 pnv_setup_machdep_rtas();
471#endif /* CONFIG_PPC_POWERNV_RTAS */
472 458
473 pr_debug("PowerNV detected !\n"); 459 pr_debug("PowerNV detected !\n");
474 460
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 38a45088f633..8f70ba681a78 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -25,7 +25,6 @@
25#include <asm/machdep.h> 25#include <asm/machdep.h>
26#include <asm/cputable.h> 26#include <asm/cputable.h>
27#include <asm/firmware.h> 27#include <asm/firmware.h>
28#include <asm/rtas.h>
29#include <asm/vdso_datapage.h> 28#include <asm/vdso_datapage.h>
30#include <asm/cputhreads.h> 29#include <asm/cputhreads.h>
31#include <asm/xics.h> 30#include <asm/xics.h>
@@ -251,18 +250,6 @@ void __init pnv_smp_init(void)
251{ 250{
252 smp_ops = &pnv_smp_ops; 251 smp_ops = &pnv_smp_ops;
253 252
254 /* XXX We don't yet have a proper entry point from HAL, for
255 * now we rely on kexec-style entry from BML
256 */
257
258#ifdef CONFIG_PPC_RTAS
259 /* Non-lpar has additional take/give timebase */
260 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
261 smp_ops->give_timebase = rtas_give_timebase;
262 smp_ops->take_timebase = rtas_take_timebase;
263 }
264#endif /* CONFIG_PPC_RTAS */
265
266#ifdef CONFIG_HOTPLUG_CPU 253#ifdef CONFIG_HOTPLUG_CPU
267 ppc_md.cpu_die = pnv_smp_cpu_kill_self; 254 ppc_md.cpu_die = pnv_smp_cpu_kill_self;
268#endif 255#endif
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index b358bec6c8cb..3c7707af3384 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -57,7 +57,7 @@ static void ps3_smp_message_pass(int cpu, int msg)
57 " (%d)\n", __func__, __LINE__, cpu, msg, result); 57 " (%d)\n", __func__, __LINE__, cpu, msg, result);
58} 58}
59 59
60static int __init ps3_smp_probe(void) 60static void __init ps3_smp_probe(void)
61{ 61{
62 int cpu; 62 int cpu;
63 63
@@ -100,8 +100,6 @@ static int __init ps3_smp_probe(void)
100 100
101 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu); 101 DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
102 } 102 }
103
104 return 2;
105} 103}
106 104
107void ps3_smp_cleanup_cpu(int cpu) 105void ps3_smp_cleanup_cpu(int cpu)
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index a758a9c3bbba..54c87d5d349d 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -16,7 +16,6 @@ config PPC_PSERIES
16 select PPC_UDBG_16550 16 select PPC_UDBG_16550
17 select PPC_NATIVE 17 select PPC_NATIVE
18 select PPC_PCI_CHOICE if EXPERT 18 select PPC_PCI_CHOICE if EXPERT
19 select ZLIB_DEFLATE
20 select PPC_DOORBELL 19 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING 20 select HAVE_CONTEXT_TRACKING
22 select HOTPLUG_CPU if SMP 21 select HOTPLUG_CPU if SMP
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index c22bb1b4beb8..b4b11096ea8b 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -10,6 +10,8 @@
10 * 2 as published by the Free Software Foundation. 10 * 2 as published by the Free Software Foundation.
11 */ 11 */
12 12
13#define pr_fmt(fmt) "dlpar: " fmt
14
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/notifier.h> 16#include <linux/notifier.h>
15#include <linux/spinlock.h> 17#include <linux/spinlock.h>
@@ -535,13 +537,125 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
535 return count; 537 return count;
536} 538}
537 539
540#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
541
542static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
543{
544 int rc;
545
546 /* pseries error logs are in BE format, convert to cpu type */
547 switch (hp_elog->id_type) {
548 case PSERIES_HP_ELOG_ID_DRC_COUNT:
549 hp_elog->_drc_u.drc_count =
550 be32_to_cpu(hp_elog->_drc_u.drc_count);
551 break;
552 case PSERIES_HP_ELOG_ID_DRC_INDEX:
553 hp_elog->_drc_u.drc_index =
554 be32_to_cpu(hp_elog->_drc_u.drc_index);
555 }
556
557 switch (hp_elog->resource) {
558 case PSERIES_HP_ELOG_RESOURCE_MEM:
559 rc = dlpar_memory(hp_elog);
560 break;
561 default:
562 pr_warn_ratelimited("Invalid resource (%d) specified\n",
563 hp_elog->resource);
564 rc = -EINVAL;
565 }
566
567 return rc;
568}
569
570static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
571 const char *buf, size_t count)
572{
573 struct pseries_hp_errorlog *hp_elog;
574 const char *arg;
575 int rc;
576
577 hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
578 if (!hp_elog) {
579 rc = -ENOMEM;
580 goto dlpar_store_out;
581 }
582
583 /* Parse out the request from the user, this will be in the form
584 * <resource> <action> <id_type> <id>
585 */
586 arg = buf;
587 if (!strncmp(arg, "memory", 6)) {
588 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
589 arg += strlen("memory ");
590 } else {
591 pr_err("Invalid resource specified: \"%s\"\n", buf);
592 rc = -EINVAL;
593 goto dlpar_store_out;
594 }
595
596 if (!strncmp(arg, "add", 3)) {
597 hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
598 arg += strlen("add ");
599 } else if (!strncmp(arg, "remove", 6)) {
600 hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
601 arg += strlen("remove ");
602 } else {
603 pr_err("Invalid action specified: \"%s\"\n", buf);
604 rc = -EINVAL;
605 goto dlpar_store_out;
606 }
607
608 if (!strncmp(arg, "index", 5)) {
609 u32 index;
610
611 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
612 arg += strlen("index ");
613 if (kstrtou32(arg, 0, &index)) {
614 rc = -EINVAL;
615 pr_err("Invalid drc_index specified: \"%s\"\n", buf);
616 goto dlpar_store_out;
617 }
618
619 hp_elog->_drc_u.drc_index = cpu_to_be32(index);
620 } else if (!strncmp(arg, "count", 5)) {
621 u32 count;
622
623 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
624 arg += strlen("count ");
625 if (kstrtou32(arg, 0, &count)) {
626 rc = -EINVAL;
627 pr_err("Invalid count specified: \"%s\"\n", buf);
628 goto dlpar_store_out;
629 }
630
631 hp_elog->_drc_u.drc_count = cpu_to_be32(count);
632 } else {
633 pr_err("Invalid id_type specified: \"%s\"\n", buf);
634 rc = -EINVAL;
635 goto dlpar_store_out;
636 }
637
638 rc = handle_dlpar_errorlog(hp_elog);
639
640dlpar_store_out:
641 kfree(hp_elog);
642 return rc ? rc : count;
643}
644
645static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store);
646
538static int __init pseries_dlpar_init(void) 647static int __init pseries_dlpar_init(void)
539{ 648{
649 int rc;
650
651#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
540 ppc_md.cpu_probe = dlpar_cpu_probe; 652 ppc_md.cpu_probe = dlpar_cpu_probe;
541 ppc_md.cpu_release = dlpar_cpu_release; 653 ppc_md.cpu_release = dlpar_cpu_release;
654#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
542 655
543 return 0; 656 rc = sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
657
658 return rc;
544} 659}
545machine_device_initcall(pseries, pseries_dlpar_init); 660machine_device_initcall(pseries, pseries_dlpar_init);
546 661
547#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index a6c7e19f5eb3..2039397cc75d 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -118,9 +118,8 @@ static int pseries_eeh_init(void)
118 return 0; 118 return 0;
119} 119}
120 120
121static int pseries_eeh_cap_start(struct device_node *dn) 121static int pseries_eeh_cap_start(struct pci_dn *pdn)
122{ 122{
123 struct pci_dn *pdn = PCI_DN(dn);
124 u32 status; 123 u32 status;
125 124
126 if (!pdn) 125 if (!pdn)
@@ -134,10 +133,9 @@ static int pseries_eeh_cap_start(struct device_node *dn)
134} 133}
135 134
136 135
137static int pseries_eeh_find_cap(struct device_node *dn, int cap) 136static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
138{ 137{
139 struct pci_dn *pdn = PCI_DN(dn); 138 int pos = pseries_eeh_cap_start(pdn);
140 int pos = pseries_eeh_cap_start(dn);
141 int cnt = 48; /* Maximal number of capabilities */ 139 int cnt = 48; /* Maximal number of capabilities */
142 u32 id; 140 u32 id;
143 141
@@ -160,10 +158,9 @@ static int pseries_eeh_find_cap(struct device_node *dn, int cap)
160 return 0; 158 return 0;
161} 159}
162 160
163static int pseries_eeh_find_ecap(struct device_node *dn, int cap) 161static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
164{ 162{
165 struct pci_dn *pdn = PCI_DN(dn); 163 struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
166 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
167 u32 header; 164 u32 header;
168 int pos = 256; 165 int pos = 256;
169 int ttl = (4096 - 256) / 8; 166 int ttl = (4096 - 256) / 8;
@@ -191,53 +188,44 @@ static int pseries_eeh_find_ecap(struct device_node *dn, int cap)
191} 188}
192 189
193/** 190/**
194 * pseries_eeh_of_probe - EEH probe on the given device 191 * pseries_eeh_probe - EEH probe on the given device
195 * @dn: OF node 192 * @pdn: PCI device node
196 * @flag: Unused 193 * @data: Unused
197 * 194 *
198 * When EEH module is installed during system boot, all PCI devices 195 * When EEH module is installed during system boot, all PCI devices
199 * are checked one by one to see if it supports EEH. The function 196 * are checked one by one to see if it supports EEH. The function
200 * is introduced for the purpose. 197 * is introduced for the purpose.
201 */ 198 */
202static void *pseries_eeh_of_probe(struct device_node *dn, void *flag) 199static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
203{ 200{
204 struct eeh_dev *edev; 201 struct eeh_dev *edev;
205 struct eeh_pe pe; 202 struct eeh_pe pe;
206 struct pci_dn *pdn = PCI_DN(dn);
207 const __be32 *classp, *vendorp, *devicep;
208 u32 class_code;
209 const __be32 *regs;
210 u32 pcie_flags; 203 u32 pcie_flags;
211 int enable = 0; 204 int enable = 0;
212 int ret; 205 int ret;
213 206
214 /* Retrieve OF node and eeh device */ 207 /* Retrieve OF node and eeh device */
215 edev = of_node_to_eeh_dev(dn); 208 edev = pdn_to_eeh_dev(pdn);
216 if (edev->pe || !of_device_is_available(dn)) 209 if (!edev || edev->pe)
217 return NULL; 210 return NULL;
218 211
219 /* Retrieve class/vendor/device IDs */ 212 /* Check class/vendor/device IDs */
220 classp = of_get_property(dn, "class-code", NULL); 213 if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
221 vendorp = of_get_property(dn, "vendor-id", NULL);
222 devicep = of_get_property(dn, "device-id", NULL);
223
224 /* Skip for bad OF node or PCI-ISA bridge */
225 if (!classp || !vendorp || !devicep)
226 return NULL;
227 if (dn->type && !strcmp(dn->type, "isa"))
228 return NULL; 214 return NULL;
229 215
230 class_code = of_read_number(classp, 1); 216 /* Skip for PCI-ISA bridge */
217 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
218 return NULL;
231 219
232 /* 220 /*
233 * Update class code and mode of eeh device. We need 221 * Update class code and mode of eeh device. We need
234 * correctly reflects that current device is root port 222 * correctly reflects that current device is root port
235 * or PCIe switch downstream port. 223 * or PCIe switch downstream port.
236 */ 224 */
237 edev->class_code = class_code; 225 edev->class_code = pdn->class_code;
238 edev->pcix_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_PCIX); 226 edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
239 edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP); 227 edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
240 edev->aer_cap = pseries_eeh_find_ecap(dn, PCI_EXT_CAP_ID_ERR); 228 edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
241 edev->mode &= 0xFFFFFF00; 229 edev->mode &= 0xFFFFFF00;
242 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 230 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
243 edev->mode |= EEH_DEV_BRIDGE; 231 edev->mode |= EEH_DEV_BRIDGE;
@@ -252,24 +240,16 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
252 } 240 }
253 } 241 }
254 242
255 /* Retrieve the device address */
256 regs = of_get_property(dn, "reg", NULL);
257 if (!regs) {
258 pr_warn("%s: OF node property %s::reg not found\n",
259 __func__, dn->full_name);
260 return NULL;
261 }
262
263 /* Initialize the fake PE */ 243 /* Initialize the fake PE */
264 memset(&pe, 0, sizeof(struct eeh_pe)); 244 memset(&pe, 0, sizeof(struct eeh_pe));
265 pe.phb = edev->phb; 245 pe.phb = edev->phb;
266 pe.config_addr = of_read_number(regs, 1); 246 pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
267 247
268 /* Enable EEH on the device */ 248 /* Enable EEH on the device */
269 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE); 249 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
270 if (!ret) { 250 if (!ret) {
271 edev->config_addr = of_read_number(regs, 1);
272 /* Retrieve PE address */ 251 /* Retrieve PE address */
252 edev->config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
273 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe); 253 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
274 pe.addr = edev->pe_config_addr; 254 pe.addr = edev->pe_config_addr;
275 255
@@ -285,16 +265,17 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
285 eeh_add_flag(EEH_ENABLED); 265 eeh_add_flag(EEH_ENABLED);
286 eeh_add_to_parent_pe(edev); 266 eeh_add_to_parent_pe(edev);
287 267
288 pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n", 268 pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%d-PE#%x\n",
289 __func__, dn->full_name, pe.phb->global_number, 269 __func__, pdn->busno, PCI_SLOT(pdn->devfn),
290 pe.addr, pe.config_addr); 270 PCI_FUNC(pdn->devfn), pe.phb->global_number,
291 } else if (dn->parent && of_node_to_eeh_dev(dn->parent) && 271 pe.addr);
292 (of_node_to_eeh_dev(dn->parent))->pe) { 272 } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) &&
273 (pdn_to_eeh_dev(pdn->parent))->pe) {
293 /* This device doesn't support EEH, but it may have an 274 /* This device doesn't support EEH, but it may have an
294 * EEH parent, in which case we mark it as supported. 275 * EEH parent, in which case we mark it as supported.
295 */ 276 */
296 edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr; 277 edev->config_addr = pdn_to_eeh_dev(pdn->parent)->config_addr;
297 edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr; 278 edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr;
298 eeh_add_to_parent_pe(edev); 279 eeh_add_to_parent_pe(edev);
299 } 280 }
300 } 281 }
@@ -670,45 +651,36 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
670 651
671/** 652/**
672 * pseries_eeh_read_config - Read PCI config space 653 * pseries_eeh_read_config - Read PCI config space
673 * @dn: device node 654 * @pdn: PCI device node
674 * @where: PCI address 655 * @where: PCI address
675 * @size: size to read 656 * @size: size to read
676 * @val: return value 657 * @val: return value
677 * 658 *
678 * Read config space from the speicifed device 659 * Read config space from the speicifed device
679 */ 660 */
680static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val) 661static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
681{ 662{
682 struct pci_dn *pdn;
683
684 pdn = PCI_DN(dn);
685
686 return rtas_read_config(pdn, where, size, val); 663 return rtas_read_config(pdn, where, size, val);
687} 664}
688 665
689/** 666/**
690 * pseries_eeh_write_config - Write PCI config space 667 * pseries_eeh_write_config - Write PCI config space
691 * @dn: device node 668 * @pdn: PCI device node
692 * @where: PCI address 669 * @where: PCI address
693 * @size: size to write 670 * @size: size to write
694 * @val: value to be written 671 * @val: value to be written
695 * 672 *
696 * Write config space to the specified device 673 * Write config space to the specified device
697 */ 674 */
698static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val) 675static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val)
699{ 676{
700 struct pci_dn *pdn;
701
702 pdn = PCI_DN(dn);
703
704 return rtas_write_config(pdn, where, size, val); 677 return rtas_write_config(pdn, where, size, val);
705} 678}
706 679
707static struct eeh_ops pseries_eeh_ops = { 680static struct eeh_ops pseries_eeh_ops = {
708 .name = "pseries", 681 .name = "pseries",
709 .init = pseries_eeh_init, 682 .init = pseries_eeh_init,
710 .of_probe = pseries_eeh_of_probe, 683 .probe = pseries_eeh_probe,
711 .dev_probe = NULL,
712 .set_option = pseries_eeh_set_option, 684 .set_option = pseries_eeh_set_option,
713 .get_pe_addr = pseries_eeh_get_pe_addr, 685 .get_pe_addr = pseries_eeh_get_pe_addr,
714 .get_state = pseries_eeh_get_state, 686 .get_state = pseries_eeh_get_state,
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index fa41f0da5b6f..0ced387e1463 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -9,11 +9,14 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
13
12#include <linux/of.h> 14#include <linux/of.h>
13#include <linux/of_address.h> 15#include <linux/of_address.h>
14#include <linux/memblock.h> 16#include <linux/memblock.h>
15#include <linux/memory.h> 17#include <linux/memory.h>
16#include <linux/memory_hotplug.h> 18#include <linux/memory_hotplug.h>
19#include <linux/slab.h>
17 20
18#include <asm/firmware.h> 21#include <asm/firmware.h>
19#include <asm/machdep.h> 22#include <asm/machdep.h>
@@ -21,6 +24,8 @@
21#include <asm/sparsemem.h> 24#include <asm/sparsemem.h>
22#include "pseries.h" 25#include "pseries.h"
23 26
27static bool rtas_hp_event;
28
24unsigned long pseries_memory_block_size(void) 29unsigned long pseries_memory_block_size(void)
25{ 30{
26 struct device_node *np; 31 struct device_node *np;
@@ -64,6 +69,67 @@ unsigned long pseries_memory_block_size(void)
64 return memblock_size; 69 return memblock_size;
65} 70}
66 71
72static void dlpar_free_drconf_property(struct property *prop)
73{
74 kfree(prop->name);
75 kfree(prop->value);
76 kfree(prop);
77}
78
79static struct property *dlpar_clone_drconf_property(struct device_node *dn)
80{
81 struct property *prop, *new_prop;
82 struct of_drconf_cell *lmbs;
83 u32 num_lmbs, *p;
84 int i;
85
86 prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
87 if (!prop)
88 return NULL;
89
90 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
91 if (!new_prop)
92 return NULL;
93
94 new_prop->name = kstrdup(prop->name, GFP_KERNEL);
95 new_prop->value = kmalloc(prop->length, GFP_KERNEL);
96 if (!new_prop->name || !new_prop->value) {
97 dlpar_free_drconf_property(new_prop);
98 return NULL;
99 }
100
101 memcpy(new_prop->value, prop->value, prop->length);
102 new_prop->length = prop->length;
103
104 /* Convert the property to cpu endian-ness */
105 p = new_prop->value;
106 *p = be32_to_cpu(*p);
107
108 num_lmbs = *p++;
109 lmbs = (struct of_drconf_cell *)p;
110
111 for (i = 0; i < num_lmbs; i++) {
112 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
113 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
114 lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
115 }
116
117 return new_prop;
118}
119
120static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
121{
122 unsigned long section_nr;
123 struct mem_section *mem_sect;
124 struct memory_block *mem_block;
125
126 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
127 mem_sect = __nr_to_section(section_nr);
128
129 mem_block = find_memory_block(mem_sect);
130 return mem_block;
131}
132
67#ifdef CONFIG_MEMORY_HOTREMOVE 133#ifdef CONFIG_MEMORY_HOTREMOVE
68static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) 134static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
69{ 135{
@@ -122,6 +188,173 @@ static int pseries_remove_mem_node(struct device_node *np)
122 pseries_remove_memblock(base, lmb_size); 188 pseries_remove_memblock(base, lmb_size);
123 return 0; 189 return 0;
124} 190}
191
192static bool lmb_is_removable(struct of_drconf_cell *lmb)
193{
194 int i, scns_per_block;
195 int rc = 1;
196 unsigned long pfn, block_sz;
197 u64 phys_addr;
198
199 if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
200 return false;
201
202 block_sz = memory_block_size_bytes();
203 scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
204 phys_addr = lmb->base_addr;
205
206 for (i = 0; i < scns_per_block; i++) {
207 pfn = PFN_DOWN(phys_addr);
208 if (!pfn_present(pfn))
209 continue;
210
211 rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
212 phys_addr += MIN_MEMORY_BLOCK_SIZE;
213 }
214
215 return rc ? true : false;
216}
217
218static int dlpar_add_lmb(struct of_drconf_cell *);
219
220static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
221{
222 struct memory_block *mem_block;
223 unsigned long block_sz;
224 int nid, rc;
225
226 if (!lmb_is_removable(lmb))
227 return -EINVAL;
228
229 mem_block = lmb_to_memblock(lmb);
230 if (!mem_block)
231 return -EINVAL;
232
233 rc = device_offline(&mem_block->dev);
234 put_device(&mem_block->dev);
235 if (rc)
236 return rc;
237
238 block_sz = pseries_memory_block_size();
239 nid = memory_add_physaddr_to_nid(lmb->base_addr);
240
241 remove_memory(nid, lmb->base_addr, block_sz);
242
243 /* Update memory regions for memory remove */
244 memblock_remove(lmb->base_addr, block_sz);
245
246 dlpar_release_drc(lmb->drc_index);
247
248 lmb->flags &= ~DRCONF_MEM_ASSIGNED;
249 return 0;
250}
251
252static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
253 struct property *prop)
254{
255 struct of_drconf_cell *lmbs;
256 int lmbs_removed = 0;
257 int lmbs_available = 0;
258 u32 num_lmbs, *p;
259 int i, rc;
260
261 pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
262
263 if (lmbs_to_remove == 0)
264 return -EINVAL;
265
266 p = prop->value;
267 num_lmbs = *p++;
268 lmbs = (struct of_drconf_cell *)p;
269
270 /* Validate that there are enough LMBs to satisfy the request */
271 for (i = 0; i < num_lmbs; i++) {
272 if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
273 lmbs_available++;
274 }
275
276 if (lmbs_available < lmbs_to_remove)
277 return -EINVAL;
278
279 for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
280 rc = dlpar_remove_lmb(&lmbs[i]);
281 if (rc)
282 continue;
283
284 lmbs_removed++;
285
286 /* Mark this lmb so we can add it later if all of the
287 * requested LMBs cannot be removed.
288 */
289 lmbs[i].reserved = 1;
290 }
291
292 if (lmbs_removed != lmbs_to_remove) {
293 pr_err("Memory hot-remove failed, adding LMB's back\n");
294
295 for (i = 0; i < num_lmbs; i++) {
296 if (!lmbs[i].reserved)
297 continue;
298
299 rc = dlpar_add_lmb(&lmbs[i]);
300 if (rc)
301 pr_err("Failed to add LMB back, drc index %x\n",
302 lmbs[i].drc_index);
303
304 lmbs[i].reserved = 0;
305 }
306
307 rc = -EINVAL;
308 } else {
309 for (i = 0; i < num_lmbs; i++) {
310 if (!lmbs[i].reserved)
311 continue;
312
313 pr_info("Memory at %llx was hot-removed\n",
314 lmbs[i].base_addr);
315
316 lmbs[i].reserved = 0;
317 }
318 rc = 0;
319 }
320
321 return rc;
322}
323
324static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
325{
326 struct of_drconf_cell *lmbs;
327 u32 num_lmbs, *p;
328 int lmb_found;
329 int i, rc;
330
331 pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
332
333 p = prop->value;
334 num_lmbs = *p++;
335 lmbs = (struct of_drconf_cell *)p;
336
337 lmb_found = 0;
338 for (i = 0; i < num_lmbs; i++) {
339 if (lmbs[i].drc_index == drc_index) {
340 lmb_found = 1;
341 rc = dlpar_remove_lmb(&lmbs[i]);
342 break;
343 }
344 }
345
346 if (!lmb_found)
347 rc = -EINVAL;
348
349 if (rc)
350 pr_info("Failed to hot-remove memory at %llx\n",
351 lmbs[i].base_addr);
352 else
353 pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
354
355 return rc;
356}
357
125#else 358#else
126static inline int pseries_remove_memblock(unsigned long base, 359static inline int pseries_remove_memblock(unsigned long base,
127 unsigned int memblock_size) 360 unsigned int memblock_size)
@@ -132,8 +365,261 @@ static inline int pseries_remove_mem_node(struct device_node *np)
132{ 365{
133 return 0; 366 return 0;
134} 367}
368static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
369{
370 return -EOPNOTSUPP;
371}
372static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
373{
374 return -EOPNOTSUPP;
375}
376static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
377 struct property *prop)
378{
379 return -EOPNOTSUPP;
380}
381static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
382{
383 return -EOPNOTSUPP;
384}
385
135#endif /* CONFIG_MEMORY_HOTREMOVE */ 386#endif /* CONFIG_MEMORY_HOTREMOVE */
136 387
388static int dlpar_add_lmb(struct of_drconf_cell *lmb)
389{
390 struct memory_block *mem_block;
391 unsigned long block_sz;
392 int nid, rc;
393
394 if (lmb->flags & DRCONF_MEM_ASSIGNED)
395 return -EINVAL;
396
397 block_sz = memory_block_size_bytes();
398
399 rc = dlpar_acquire_drc(lmb->drc_index);
400 if (rc)
401 return rc;
402
403 /* Find the node id for this address */
404 nid = memory_add_physaddr_to_nid(lmb->base_addr);
405
406 /* Add the memory */
407 rc = add_memory(nid, lmb->base_addr, block_sz);
408 if (rc) {
409 dlpar_release_drc(lmb->drc_index);
410 return rc;
411 }
412
413 /* Register this block of memory */
414 rc = memblock_add(lmb->base_addr, block_sz);
415 if (rc) {
416 remove_memory(nid, lmb->base_addr, block_sz);
417 dlpar_release_drc(lmb->drc_index);
418 return rc;
419 }
420
421 mem_block = lmb_to_memblock(lmb);
422 if (!mem_block) {
423 remove_memory(nid, lmb->base_addr, block_sz);
424 dlpar_release_drc(lmb->drc_index);
425 return -EINVAL;
426 }
427
428 rc = device_online(&mem_block->dev);
429 put_device(&mem_block->dev);
430 if (rc) {
431 remove_memory(nid, lmb->base_addr, block_sz);
432 dlpar_release_drc(lmb->drc_index);
433 return rc;
434 }
435
436 lmb->flags |= DRCONF_MEM_ASSIGNED;
437 return 0;
438}
439
440static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
441{
442 struct of_drconf_cell *lmbs;
443 u32 num_lmbs, *p;
444 int lmbs_available = 0;
445 int lmbs_added = 0;
446 int i, rc;
447
448 pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
449
450 if (lmbs_to_add == 0)
451 return -EINVAL;
452
453 p = prop->value;
454 num_lmbs = *p++;
455 lmbs = (struct of_drconf_cell *)p;
456
457 /* Validate that there are enough LMBs to satisfy the request */
458 for (i = 0; i < num_lmbs; i++) {
459 if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
460 lmbs_available++;
461 }
462
463 if (lmbs_available < lmbs_to_add)
464 return -EINVAL;
465
466 for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
467 rc = dlpar_add_lmb(&lmbs[i]);
468 if (rc)
469 continue;
470
471 lmbs_added++;
472
473 /* Mark this lmb so we can remove it later if all of the
474 * requested LMBs cannot be added.
475 */
476 lmbs[i].reserved = 1;
477 }
478
479 if (lmbs_added != lmbs_to_add) {
480 pr_err("Memory hot-add failed, removing any added LMBs\n");
481
482 for (i = 0; i < num_lmbs; i++) {
483 if (!lmbs[i].reserved)
484 continue;
485
486 rc = dlpar_remove_lmb(&lmbs[i]);
487 if (rc)
488 pr_err("Failed to remove LMB, drc index %x\n",
489 be32_to_cpu(lmbs[i].drc_index));
490 }
491 rc = -EINVAL;
492 } else {
493 for (i = 0; i < num_lmbs; i++) {
494 if (!lmbs[i].reserved)
495 continue;
496
497 pr_info("Memory at %llx (drc index %x) was hot-added\n",
498 lmbs[i].base_addr, lmbs[i].drc_index);
499 lmbs[i].reserved = 0;
500 }
501 }
502
503 return rc;
504}
505
506static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
507{
508 struct of_drconf_cell *lmbs;
509 u32 num_lmbs, *p;
510 int i, lmb_found;
511 int rc;
512
513 pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
514
515 p = prop->value;
516 num_lmbs = *p++;
517 lmbs = (struct of_drconf_cell *)p;
518
519 lmb_found = 0;
520 for (i = 0; i < num_lmbs; i++) {
521 if (lmbs[i].drc_index == drc_index) {
522 lmb_found = 1;
523 rc = dlpar_add_lmb(&lmbs[i]);
524 break;
525 }
526 }
527
528 if (!lmb_found)
529 rc = -EINVAL;
530
531 if (rc)
532 pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
533 else
534 pr_info("Memory at %llx (drc index %x) was hot-added\n",
535 lmbs[i].base_addr, drc_index);
536
537 return rc;
538}
539
540static void dlpar_update_drconf_property(struct device_node *dn,
541 struct property *prop)
542{
543 struct of_drconf_cell *lmbs;
544 u32 num_lmbs, *p;
545 int i;
546
547 /* Convert the property back to BE */
548 p = prop->value;
549 num_lmbs = *p;
550 *p = cpu_to_be32(*p);
551 p++;
552
553 lmbs = (struct of_drconf_cell *)p;
554 for (i = 0; i < num_lmbs; i++) {
555 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
556 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
557 lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
558 }
559
560 rtas_hp_event = true;
561 of_update_property(dn, prop);
562 rtas_hp_event = false;
563}
564
565int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
566{
567 struct device_node *dn;
568 struct property *prop;
569 u32 count, drc_index;
570 int rc;
571
572 count = hp_elog->_drc_u.drc_count;
573 drc_index = hp_elog->_drc_u.drc_index;
574
575 lock_device_hotplug();
576
577 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
578 if (!dn) {
579 rc = -EINVAL;
580 goto dlpar_memory_out;
581 }
582
583 prop = dlpar_clone_drconf_property(dn);
584 if (!prop) {
585 rc = -EINVAL;
586 goto dlpar_memory_out;
587 }
588
589 switch (hp_elog->action) {
590 case PSERIES_HP_ELOG_ACTION_ADD:
591 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
592 rc = dlpar_memory_add_by_count(count, prop);
593 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
594 rc = dlpar_memory_add_by_index(drc_index, prop);
595 else
596 rc = -EINVAL;
597 break;
598 case PSERIES_HP_ELOG_ACTION_REMOVE:
599 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
600 rc = dlpar_memory_remove_by_count(count, prop);
601 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
602 rc = dlpar_memory_remove_by_index(drc_index, prop);
603 else
604 rc = -EINVAL;
605 break;
606 default:
607 pr_err("Invalid action (%d) specified\n", hp_elog->action);
608 rc = -EINVAL;
609 break;
610 }
611
612 if (rc)
613 dlpar_free_drconf_property(prop);
614 else
615 dlpar_update_drconf_property(dn, prop);
616
617dlpar_memory_out:
618 of_node_put(dn);
619 unlock_device_hotplug();
620 return rc;
621}
622
137static int pseries_add_mem_node(struct device_node *np) 623static int pseries_add_mem_node(struct device_node *np)
138{ 624{
139 const char *type; 625 const char *type;
@@ -174,6 +660,9 @@ static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
174 __be32 *p; 660 __be32 *p;
175 int i, rc = -EINVAL; 661 int i, rc = -EINVAL;
176 662
663 if (rtas_hp_event)
664 return 0;
665
177 memblock_size = pseries_memory_block_size(); 666 memblock_size = pseries_memory_block_size();
178 if (!memblock_size) 667 if (!memblock_size)
179 return -EINVAL; 668 return -EINVAL;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 7803a19adb31..61d5a17f45c0 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -49,6 +49,7 @@
49#include <asm/mmzone.h> 49#include <asm/mmzone.h>
50#include <asm/plpar_wrappers.h> 50#include <asm/plpar_wrappers.h>
51 51
52#include "pseries.h"
52 53
53static void tce_invalidate_pSeries_sw(struct iommu_table *tbl, 54static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
54 __be64 *startp, __be64 *endp) 55 __be64 *startp, __be64 *endp)
@@ -1307,16 +1308,16 @@ void iommu_init_early_pSeries(void)
1307 ppc_md.tce_free = tce_free_pSeriesLP; 1308 ppc_md.tce_free = tce_free_pSeriesLP;
1308 } 1309 }
1309 ppc_md.tce_get = tce_get_pSeriesLP; 1310 ppc_md.tce_get = tce_get_pSeriesLP;
1310 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP; 1311 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1311 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP; 1312 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1312 ppc_md.dma_set_mask = dma_set_mask_pSeriesLP; 1313 ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
1313 ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP; 1314 ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
1314 } else { 1315 } else {
1315 ppc_md.tce_build = tce_build_pSeries; 1316 ppc_md.tce_build = tce_build_pSeries;
1316 ppc_md.tce_free = tce_free_pSeries; 1317 ppc_md.tce_free = tce_free_pSeries;
1317 ppc_md.tce_get = tce_get_pseries; 1318 ppc_md.tce_get = tce_get_pseries;
1318 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries; 1319 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
1319 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries; 1320 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
1320 } 1321 }
1321 1322
1322 1323
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 8f35d525cede..ceb18d349459 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -320,28 +320,34 @@ static ssize_t migrate_store(struct class *class, struct class_attribute *attr,
320{ 320{
321 u64 streamid; 321 u64 streamid;
322 int rc; 322 int rc;
323 int vasi_rc = 0;
324 323
325 rc = kstrtou64(buf, 0, &streamid); 324 rc = kstrtou64(buf, 0, &streamid);
326 if (rc) 325 if (rc)
327 return rc; 326 return rc;
328 327
329 do { 328 do {
330 rc = rtas_ibm_suspend_me(streamid, &vasi_rc); 329 rc = rtas_ibm_suspend_me(streamid);
331 if (!rc && vasi_rc == RTAS_NOT_SUSPENDABLE) 330 if (rc == -EAGAIN)
332 ssleep(1); 331 ssleep(1);
333 } while (!rc && vasi_rc == RTAS_NOT_SUSPENDABLE); 332 } while (rc == -EAGAIN);
334 333
335 if (rc) 334 if (rc)
336 return rc; 335 return rc;
337 if (vasi_rc)
338 return vasi_rc;
339 336
340 post_mobility_fixup(); 337 post_mobility_fixup();
341 return count; 338 return count;
342} 339}
343 340
341/*
342 * Used by drmgr to determine the kernel behavior of the migration interface.
343 *
344 * Version 1: Performs all PAPR requirements for migration including
345 * firmware activation and device tree update.
346 */
347#define MIGRATION_API_VERSION 1
348
344static CLASS_ATTR(migration, S_IWUSR, NULL, migrate_store); 349static CLASS_ATTR(migration, S_IWUSR, NULL, migrate_store);
350static CLASS_ATTR_STRING(api_version, S_IRUGO, __stringify(MIGRATION_API_VERSION));
345 351
346static int __init mobility_sysfs_init(void) 352static int __init mobility_sysfs_init(void)
347{ 353{
@@ -352,7 +358,13 @@ static int __init mobility_sysfs_init(void)
352 return -ENOMEM; 358 return -ENOMEM;
353 359
354 rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr); 360 rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
361 if (rc)
362 pr_err("mobility: unable to create migration sysfs file (%d)\n", rc);
355 363
356 return rc; 364 rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr);
365 if (rc)
366 pr_err("mobility: unable to create api_version sysfs file (%d)\n", rc);
367
368 return 0;
357} 369}
358machine_device_initcall(pseries, mobility_sysfs_init); 370machine_device_initcall(pseries, mobility_sysfs_init);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 691a154c286d..c8d24f9a6948 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -195,6 +195,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
195static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) 195static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
196{ 196{
197 struct device_node *dn; 197 struct device_node *dn;
198 struct pci_dn *pdn;
198 struct eeh_dev *edev; 199 struct eeh_dev *edev;
199 200
200 /* Found our PE and assume 8 at that point. */ 201 /* Found our PE and assume 8 at that point. */
@@ -204,10 +205,11 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
204 return NULL; 205 return NULL;
205 206
206 /* Get the top level device in the PE */ 207 /* Get the top level device in the PE */
207 edev = of_node_to_eeh_dev(dn); 208 edev = pdn_to_eeh_dev(PCI_DN(dn));
208 if (edev->pe) 209 if (edev->pe)
209 edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); 210 edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
210 dn = eeh_dev_to_of_node(edev); 211 pdn = eeh_dev_to_pdn(edev);
212 dn = pdn ? pdn->node : NULL;
211 if (!dn) 213 if (!dn)
212 return NULL; 214 return NULL;
213 215
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 054a0ed5c7ee..9f8184175c86 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -20,7 +20,6 @@
20#include <linux/kmsg_dump.h> 20#include <linux/kmsg_dump.h>
21#include <linux/pstore.h> 21#include <linux/pstore.h>
22#include <linux/ctype.h> 22#include <linux/ctype.h>
23#include <linux/zlib.h>
24#include <asm/uaccess.h> 23#include <asm/uaccess.h>
25#include <asm/nvram.h> 24#include <asm/nvram.h>
26#include <asm/rtas.h> 25#include <asm/rtas.h>
@@ -30,129 +29,17 @@
30/* Max bytes to read/write in one go */ 29/* Max bytes to read/write in one go */
31#define NVRW_CNT 0x20 30#define NVRW_CNT 0x20
32 31
33/*
34 * Set oops header version to distinguish between old and new format header.
35 * lnx,oops-log partition max size is 4000, header version > 4000 will
36 * help in identifying new header.
37 */
38#define OOPS_HDR_VERSION 5000
39
40static unsigned int nvram_size; 32static unsigned int nvram_size;
41static int nvram_fetch, nvram_store; 33static int nvram_fetch, nvram_store;
42static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ 34static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
43static DEFINE_SPINLOCK(nvram_lock); 35static DEFINE_SPINLOCK(nvram_lock);
44 36
45struct err_log_info {
46 __be32 error_type;
47 __be32 seq_num;
48};
49
50struct nvram_os_partition {
51 const char *name;
52 int req_size; /* desired size, in bytes */
53 int min_size; /* minimum acceptable size (0 means req_size) */
54 long size; /* size of data portion (excluding err_log_info) */
55 long index; /* offset of data portion of partition */
56 bool os_partition; /* partition initialized by OS, not FW */
57};
58
59static struct nvram_os_partition rtas_log_partition = {
60 .name = "ibm,rtas-log",
61 .req_size = 2079,
62 .min_size = 1055,
63 .index = -1,
64 .os_partition = true
65};
66
67static struct nvram_os_partition oops_log_partition = {
68 .name = "lnx,oops-log",
69 .req_size = 4000,
70 .min_size = 2000,
71 .index = -1,
72 .os_partition = true
73};
74
75static const char *pseries_nvram_os_partitions[] = {
76 "ibm,rtas-log",
77 "lnx,oops-log",
78 NULL
79};
80
81struct oops_log_info {
82 __be16 version;
83 __be16 report_length;
84 __be64 timestamp;
85} __attribute__((packed));
86
87static void oops_to_nvram(struct kmsg_dumper *dumper,
88 enum kmsg_dump_reason reason);
89
90static struct kmsg_dumper nvram_kmsg_dumper = {
91 .dump = oops_to_nvram
92};
93
94/* See clobbering_unread_rtas_event() */ 37/* See clobbering_unread_rtas_event() */
95#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */ 38#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */
96static unsigned long last_unread_rtas_event; /* timestamp */ 39static time64_t last_unread_rtas_event; /* timestamp */
97
98/*
99 * For capturing and compressing an oops or panic report...
100
101 * big_oops_buf[] holds the uncompressed text we're capturing.
102 *
103 * oops_buf[] holds the compressed text, preceded by a oops header.
104 * oops header has u16 holding the version of oops header (to differentiate
105 * between old and new format header) followed by u16 holding the length of
106 * the compressed* text (*Or uncompressed, if compression fails.) and u64
107 * holding the timestamp. oops_buf[] gets written to NVRAM.
108 *
109 * oops_log_info points to the header. oops_data points to the compressed text.
110 *
111 * +- oops_buf
112 * | +- oops_data
113 * v v
114 * +-----------+-----------+-----------+------------------------+
115 * | version | length | timestamp | text |
116 * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) |
117 * +-----------+-----------+-----------+------------------------+
118 * ^
119 * +- oops_log_info
120 *
121 * We preallocate these buffers during init to avoid kmalloc during oops/panic.
122 */
123static size_t big_oops_buf_sz;
124static char *big_oops_buf, *oops_buf;
125static char *oops_data;
126static size_t oops_data_sz;
127
128/* Compression parameters */
129#define COMPR_LEVEL 6
130#define WINDOW_BITS 12
131#define MEM_LEVEL 4
132static struct z_stream_s stream;
133 40
134#ifdef CONFIG_PSTORE 41#ifdef CONFIG_PSTORE
135static struct nvram_os_partition of_config_partition = { 42time64_t last_rtas_event;
136 .name = "of-config",
137 .index = -1,
138 .os_partition = false
139};
140
141static struct nvram_os_partition common_partition = {
142 .name = "common",
143 .index = -1,
144 .os_partition = false
145};
146
147static enum pstore_type_id nvram_type_ids[] = {
148 PSTORE_TYPE_DMESG,
149 PSTORE_TYPE_PPC_RTAS,
150 PSTORE_TYPE_PPC_OF,
151 PSTORE_TYPE_PPC_COMMON,
152 -1
153};
154static int read_type;
155static unsigned long last_rtas_event;
156#endif 43#endif
157 44
158static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index) 45static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
@@ -246,132 +133,26 @@ static ssize_t pSeries_nvram_get_size(void)
246 return nvram_size ? nvram_size : -ENODEV; 133 return nvram_size ? nvram_size : -ENODEV;
247} 134}
248 135
249 136/* nvram_write_error_log
250/* nvram_write_os_partition, nvram_write_error_log
251 * 137 *
252 * We need to buffer the error logs into nvram to ensure that we have 138 * We need to buffer the error logs into nvram to ensure that we have
253 * the failure information to decode. If we have a severe error there 139 * the failure information to decode.
254 * is no way to guarantee that the OS or the machine is in a state to
255 * get back to user land and write the error to disk. For example if
256 * the SCSI device driver causes a Machine Check by writing to a bad
257 * IO address, there is no way of guaranteeing that the device driver
258 * is in any state that is would also be able to write the error data
259 * captured to disk, thus we buffer it in NVRAM for analysis on the
260 * next boot.
261 *
262 * In NVRAM the partition containing the error log buffer will looks like:
263 * Header (in bytes):
264 * +-----------+----------+--------+------------+------------------+
265 * | signature | checksum | length | name | data |
266 * |0 |1 |2 3|4 15|16 length-1|
267 * +-----------+----------+--------+------------+------------------+
268 *
269 * The 'data' section would look like (in bytes):
270 * +--------------+------------+-----------------------------------+
271 * | event_logged | sequence # | error log |
272 * |0 3|4 7|8 error_log_size-1|
273 * +--------------+------------+-----------------------------------+
274 *
275 * event_logged: 0 if event has not been logged to syslog, 1 if it has
276 * sequence #: The unique sequence # for each event. (until it wraps)
277 * error log: The error log from event_scan
278 */ 140 */
279static int nvram_write_os_partition(struct nvram_os_partition *part,
280 char *buff, int length,
281 unsigned int err_type,
282 unsigned int error_log_cnt)
283{
284 int rc;
285 loff_t tmp_index;
286 struct err_log_info info;
287
288 if (part->index == -1) {
289 return -ESPIPE;
290 }
291
292 if (length > part->size) {
293 length = part->size;
294 }
295
296 info.error_type = cpu_to_be32(err_type);
297 info.seq_num = cpu_to_be32(error_log_cnt);
298
299 tmp_index = part->index;
300
301 rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
302 if (rc <= 0) {
303 pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
304 return rc;
305 }
306
307 rc = ppc_md.nvram_write(buff, length, &tmp_index);
308 if (rc <= 0) {
309 pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
310 return rc;
311 }
312
313 return 0;
314}
315
316int nvram_write_error_log(char * buff, int length, 141int nvram_write_error_log(char * buff, int length,
317 unsigned int err_type, unsigned int error_log_cnt) 142 unsigned int err_type, unsigned int error_log_cnt)
318{ 143{
319 int rc = nvram_write_os_partition(&rtas_log_partition, buff, length, 144 int rc = nvram_write_os_partition(&rtas_log_partition, buff, length,
320 err_type, error_log_cnt); 145 err_type, error_log_cnt);
321 if (!rc) { 146 if (!rc) {
322 last_unread_rtas_event = get_seconds(); 147 last_unread_rtas_event = ktime_get_real_seconds();
323#ifdef CONFIG_PSTORE 148#ifdef CONFIG_PSTORE
324 last_rtas_event = get_seconds(); 149 last_rtas_event = ktime_get_real_seconds();
325#endif 150#endif
326 } 151 }
327 152
328 return rc; 153 return rc;
329} 154}
330 155
331/* nvram_read_partition
332 *
333 * Reads nvram partition for at most 'length'
334 */
335static int nvram_read_partition(struct nvram_os_partition *part, char *buff,
336 int length, unsigned int *err_type,
337 unsigned int *error_log_cnt)
338{
339 int rc;
340 loff_t tmp_index;
341 struct err_log_info info;
342
343 if (part->index == -1)
344 return -1;
345
346 if (length > part->size)
347 length = part->size;
348
349 tmp_index = part->index;
350
351 if (part->os_partition) {
352 rc = ppc_md.nvram_read((char *)&info,
353 sizeof(struct err_log_info),
354 &tmp_index);
355 if (rc <= 0) {
356 pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
357 return rc;
358 }
359 }
360
361 rc = ppc_md.nvram_read(buff, length, &tmp_index);
362 if (rc <= 0) {
363 pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
364 return rc;
365 }
366
367 if (part->os_partition) {
368 *error_log_cnt = be32_to_cpu(info.seq_num);
369 *err_type = be32_to_cpu(info.error_type);
370 }
371
372 return 0;
373}
374
375/* nvram_read_error_log 156/* nvram_read_error_log
376 * 157 *
377 * Reads nvram for error log for at most 'length' 158 * Reads nvram for error log for at most 'length'
@@ -407,67 +188,6 @@ int nvram_clear_error_log(void)
407 return 0; 188 return 0;
408} 189}
409 190
410/* pseries_nvram_init_os_partition
411 *
412 * This sets up a partition with an "OS" signature.
413 *
414 * The general strategy is the following:
415 * 1.) If a partition with the indicated name already exists...
416 * - If it's large enough, use it.
417 * - Otherwise, recycle it and keep going.
418 * 2.) Search for a free partition that is large enough.
419 * 3.) If there's not a free partition large enough, recycle any obsolete
420 * OS partitions and try again.
421 * 4.) Will first try getting a chunk that will satisfy the requested size.
422 * 5.) If a chunk of the requested size cannot be allocated, then try finding
423 * a chunk that will satisfy the minum needed.
424 *
425 * Returns 0 on success, else -1.
426 */
427static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
428 *part)
429{
430 loff_t p;
431 int size;
432
433 /* Look for ours */
434 p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
435
436 /* Found one but too small, remove it */
437 if (p && size < part->min_size) {
438 pr_info("nvram: Found too small %s partition,"
439 " removing it...\n", part->name);
440 nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL);
441 p = 0;
442 }
443
444 /* Create one if we didn't find */
445 if (!p) {
446 p = nvram_create_partition(part->name, NVRAM_SIG_OS,
447 part->req_size, part->min_size);
448 if (p == -ENOSPC) {
449 pr_info("nvram: No room to create %s partition, "
450 "deleting any obsolete OS partitions...\n",
451 part->name);
452 nvram_remove_partition(NULL, NVRAM_SIG_OS,
453 pseries_nvram_os_partitions);
454 p = nvram_create_partition(part->name, NVRAM_SIG_OS,
455 part->req_size, part->min_size);
456 }
457 }
458
459 if (p <= 0) {
460 pr_err("nvram: Failed to find or create %s"
461 " partition, err %d\n", part->name, (int)p);
462 return -1;
463 }
464
465 part->index = p;
466 part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
467
468 return 0;
469}
470
471/* 191/*
472 * Are we using the ibm,rtas-log for oops/panic reports? And if so, 192 * Are we using the ibm,rtas-log for oops/panic reports? And if so,
473 * would logging this oops/panic overwrite an RTAS event that rtas_errd 193 * would logging this oops/panic overwrite an RTAS event that rtas_errd
@@ -476,321 +196,14 @@ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
476 * We assume that if rtas_errd hasn't read the RTAS event in 196 * We assume that if rtas_errd hasn't read the RTAS event in
477 * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to. 197 * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to.
478 */ 198 */
479static int clobbering_unread_rtas_event(void) 199int clobbering_unread_rtas_event(void)
480{ 200{
481 return (oops_log_partition.index == rtas_log_partition.index 201 return (oops_log_partition.index == rtas_log_partition.index
482 && last_unread_rtas_event 202 && last_unread_rtas_event
483 && get_seconds() - last_unread_rtas_event <= 203 && ktime_get_real_seconds() - last_unread_rtas_event <=
484 NVRAM_RTAS_READ_TIMEOUT); 204 NVRAM_RTAS_READ_TIMEOUT);
485} 205}
486 206
487/* Derived from logfs_compress() */
488static int nvram_compress(const void *in, void *out, size_t inlen,
489 size_t outlen)
490{
491 int err, ret;
492
493 ret = -EIO;
494 err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
495 MEM_LEVEL, Z_DEFAULT_STRATEGY);
496 if (err != Z_OK)
497 goto error;
498
499 stream.next_in = in;
500 stream.avail_in = inlen;
501 stream.total_in = 0;
502 stream.next_out = out;
503 stream.avail_out = outlen;
504 stream.total_out = 0;
505
506 err = zlib_deflate(&stream, Z_FINISH);
507 if (err != Z_STREAM_END)
508 goto error;
509
510 err = zlib_deflateEnd(&stream);
511 if (err != Z_OK)
512 goto error;
513
514 if (stream.total_out >= stream.total_in)
515 goto error;
516
517 ret = stream.total_out;
518error:
519 return ret;
520}
521
522/* Compress the text from big_oops_buf into oops_buf. */
523static int zip_oops(size_t text_len)
524{
525 struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
526 int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
527 oops_data_sz);
528 if (zipped_len < 0) {
529 pr_err("nvram: compression failed; returned %d\n", zipped_len);
530 pr_err("nvram: logging uncompressed oops/panic report\n");
531 return -1;
532 }
533 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
534 oops_hdr->report_length = cpu_to_be16(zipped_len);
535 oops_hdr->timestamp = cpu_to_be64(get_seconds());
536 return 0;
537}
538
539#ifdef CONFIG_PSTORE
540static int nvram_pstore_open(struct pstore_info *psi)
541{
542 /* Reset the iterator to start reading partitions again */
543 read_type = -1;
544 return 0;
545}
546
547/**
548 * nvram_pstore_write - pstore write callback for nvram
549 * @type: Type of message logged
550 * @reason: reason behind dump (oops/panic)
551 * @id: identifier to indicate the write performed
552 * @part: pstore writes data to registered buffer in parts,
553 * part number will indicate the same.
554 * @count: Indicates oops count
555 * @compressed: Flag to indicate the log is compressed
556 * @size: number of bytes written to the registered buffer
557 * @psi: registered pstore_info structure
558 *
559 * Called by pstore_dump() when an oops or panic report is logged in the
560 * printk buffer.
561 * Returns 0 on successful write.
562 */
563static int nvram_pstore_write(enum pstore_type_id type,
564 enum kmsg_dump_reason reason,
565 u64 *id, unsigned int part, int count,
566 bool compressed, size_t size,
567 struct pstore_info *psi)
568{
569 int rc;
570 unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
571 struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
572
573 /* part 1 has the recent messages from printk buffer */
574 if (part > 1 || type != PSTORE_TYPE_DMESG ||
575 clobbering_unread_rtas_event())
576 return -1;
577
578 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
579 oops_hdr->report_length = cpu_to_be16(size);
580 oops_hdr->timestamp = cpu_to_be64(get_seconds());
581
582 if (compressed)
583 err_type = ERR_TYPE_KERNEL_PANIC_GZ;
584
585 rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
586 (int) (sizeof(*oops_hdr) + size), err_type, count);
587
588 if (rc != 0)
589 return rc;
590
591 *id = part;
592 return 0;
593}
594
595/*
596 * Reads the oops/panic report, rtas, of-config and common partition.
597 * Returns the length of the data we read from each partition.
598 * Returns 0 if we've been called before.
599 */
600static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
601 int *count, struct timespec *time, char **buf,
602 bool *compressed, struct pstore_info *psi)
603{
604 struct oops_log_info *oops_hdr;
605 unsigned int err_type, id_no, size = 0;
606 struct nvram_os_partition *part = NULL;
607 char *buff = NULL;
608 int sig = 0;
609 loff_t p;
610
611 read_type++;
612
613 switch (nvram_type_ids[read_type]) {
614 case PSTORE_TYPE_DMESG:
615 part = &oops_log_partition;
616 *type = PSTORE_TYPE_DMESG;
617 break;
618 case PSTORE_TYPE_PPC_RTAS:
619 part = &rtas_log_partition;
620 *type = PSTORE_TYPE_PPC_RTAS;
621 time->tv_sec = last_rtas_event;
622 time->tv_nsec = 0;
623 break;
624 case PSTORE_TYPE_PPC_OF:
625 sig = NVRAM_SIG_OF;
626 part = &of_config_partition;
627 *type = PSTORE_TYPE_PPC_OF;
628 *id = PSTORE_TYPE_PPC_OF;
629 time->tv_sec = 0;
630 time->tv_nsec = 0;
631 break;
632 case PSTORE_TYPE_PPC_COMMON:
633 sig = NVRAM_SIG_SYS;
634 part = &common_partition;
635 *type = PSTORE_TYPE_PPC_COMMON;
636 *id = PSTORE_TYPE_PPC_COMMON;
637 time->tv_sec = 0;
638 time->tv_nsec = 0;
639 break;
640 default:
641 return 0;
642 }
643
644 if (!part->os_partition) {
645 p = nvram_find_partition(part->name, sig, &size);
646 if (p <= 0) {
647 pr_err("nvram: Failed to find partition %s, "
648 "err %d\n", part->name, (int)p);
649 return 0;
650 }
651 part->index = p;
652 part->size = size;
653 }
654
655 buff = kmalloc(part->size, GFP_KERNEL);
656
657 if (!buff)
658 return -ENOMEM;
659
660 if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) {
661 kfree(buff);
662 return 0;
663 }
664
665 *count = 0;
666
667 if (part->os_partition)
668 *id = id_no;
669
670 if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
671 size_t length, hdr_size;
672
673 oops_hdr = (struct oops_log_info *)buff;
674 if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
675 /* Old format oops header had 2-byte record size */
676 hdr_size = sizeof(u16);
677 length = be16_to_cpu(oops_hdr->version);
678 time->tv_sec = 0;
679 time->tv_nsec = 0;
680 } else {
681 hdr_size = sizeof(*oops_hdr);
682 length = be16_to_cpu(oops_hdr->report_length);
683 time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
684 time->tv_nsec = 0;
685 }
686 *buf = kmalloc(length, GFP_KERNEL);
687 if (*buf == NULL)
688 return -ENOMEM;
689 memcpy(*buf, buff + hdr_size, length);
690 kfree(buff);
691
692 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
693 *compressed = true;
694 else
695 *compressed = false;
696 return length;
697 }
698
699 *buf = buff;
700 return part->size;
701}
702
703static struct pstore_info nvram_pstore_info = {
704 .owner = THIS_MODULE,
705 .name = "nvram",
706 .open = nvram_pstore_open,
707 .read = nvram_pstore_read,
708 .write = nvram_pstore_write,
709};
710
711static int nvram_pstore_init(void)
712{
713 int rc = 0;
714
715 nvram_pstore_info.buf = oops_data;
716 nvram_pstore_info.bufsize = oops_data_sz;
717
718 spin_lock_init(&nvram_pstore_info.buf_lock);
719
720 rc = pstore_register(&nvram_pstore_info);
721 if (rc != 0)
722 pr_err("nvram: pstore_register() failed, defaults to "
723 "kmsg_dump; returned %d\n", rc);
724
725 return rc;
726}
727#else
728static int nvram_pstore_init(void)
729{
730 return -1;
731}
732#endif
733
734static void __init nvram_init_oops_partition(int rtas_partition_exists)
735{
736 int rc;
737
738 rc = pseries_nvram_init_os_partition(&oops_log_partition);
739 if (rc != 0) {
740 if (!rtas_partition_exists)
741 return;
742 pr_notice("nvram: Using %s partition to log both"
743 " RTAS errors and oops/panic reports\n",
744 rtas_log_partition.name);
745 memcpy(&oops_log_partition, &rtas_log_partition,
746 sizeof(rtas_log_partition));
747 }
748 oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
749 if (!oops_buf) {
750 pr_err("nvram: No memory for %s partition\n",
751 oops_log_partition.name);
752 return;
753 }
754 oops_data = oops_buf + sizeof(struct oops_log_info);
755 oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
756
757 rc = nvram_pstore_init();
758
759 if (!rc)
760 return;
761
762 /*
763 * Figure compression (preceded by elimination of each line's <n>
764 * severity prefix) will reduce the oops/panic report to at most
765 * 45% of its original size.
766 */
767 big_oops_buf_sz = (oops_data_sz * 100) / 45;
768 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
769 if (big_oops_buf) {
770 stream.workspace = kmalloc(zlib_deflate_workspacesize(
771 WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
772 if (!stream.workspace) {
773 pr_err("nvram: No memory for compression workspace; "
774 "skipping compression of %s partition data\n",
775 oops_log_partition.name);
776 kfree(big_oops_buf);
777 big_oops_buf = NULL;
778 }
779 } else {
780 pr_err("No memory for uncompressed %s data; "
781 "skipping compression\n", oops_log_partition.name);
782 stream.workspace = NULL;
783 }
784
785 rc = kmsg_dump_register(&nvram_kmsg_dumper);
786 if (rc != 0) {
787 pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
788 kfree(oops_buf);
789 kfree(big_oops_buf);
790 kfree(stream.workspace);
791 }
792}
793
794static int __init pseries_nvram_init_log_partitions(void) 207static int __init pseries_nvram_init_log_partitions(void)
795{ 208{
796 int rc; 209 int rc;
@@ -798,7 +211,7 @@ static int __init pseries_nvram_init_log_partitions(void)
798 /* Scan nvram for partitions */ 211 /* Scan nvram for partitions */
799 nvram_scan_partitions(); 212 nvram_scan_partitions();
800 213
801 rc = pseries_nvram_init_os_partition(&rtas_log_partition); 214 rc = nvram_init_os_partition(&rtas_log_partition);
802 nvram_init_oops_partition(rc == 0); 215 nvram_init_oops_partition(rc == 0);
803 return 0; 216 return 0;
804} 217}
@@ -834,72 +247,3 @@ int __init pSeries_nvram_init(void)
834 return 0; 247 return 0;
835} 248}
836 249
837
838/*
839 * This is our kmsg_dump callback, called after an oops or panic report
840 * has been written to the printk buffer. We want to capture as much
841 * of the printk buffer as possible. First, capture as much as we can
842 * that we think will compress sufficiently to fit in the lnx,oops-log
843 * partition. If that's too much, go back and capture uncompressed text.
844 */
845static void oops_to_nvram(struct kmsg_dumper *dumper,
846 enum kmsg_dump_reason reason)
847{
848 struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
849 static unsigned int oops_count = 0;
850 static bool panicking = false;
851 static DEFINE_SPINLOCK(lock);
852 unsigned long flags;
853 size_t text_len;
854 unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
855 int rc = -1;
856
857 switch (reason) {
858 case KMSG_DUMP_RESTART:
859 case KMSG_DUMP_HALT:
860 case KMSG_DUMP_POWEROFF:
861 /* These are almost always orderly shutdowns. */
862 return;
863 case KMSG_DUMP_OOPS:
864 break;
865 case KMSG_DUMP_PANIC:
866 panicking = true;
867 break;
868 case KMSG_DUMP_EMERG:
869 if (panicking)
870 /* Panic report already captured. */
871 return;
872 break;
873 default:
874 pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
875 __func__, (int) reason);
876 return;
877 }
878
879 if (clobbering_unread_rtas_event())
880 return;
881
882 if (!spin_trylock_irqsave(&lock, flags))
883 return;
884
885 if (big_oops_buf) {
886 kmsg_dump_get_buffer(dumper, false,
887 big_oops_buf, big_oops_buf_sz, &text_len);
888 rc = zip_oops(text_len);
889 }
890 if (rc != 0) {
891 kmsg_dump_rewind(dumper);
892 kmsg_dump_get_buffer(dumper, false,
893 oops_data, oops_data_sz, &text_len);
894 err_type = ERR_TYPE_KERNEL_PANIC;
895 oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
896 oops_hdr->report_length = cpu_to_be16(text_len);
897 oops_hdr->timestamp = cpu_to_be64(get_seconds());
898 }
899
900 (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
901 (int) (sizeof(*oops_hdr) + text_len), err_type,
902 ++oops_count);
903
904 spin_unlock_irqrestore(&lock, flags);
905}
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 89e23811199c..5d4a3df59d0c 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -32,6 +32,8 @@
32#include <asm/firmware.h> 32#include <asm/firmware.h>
33#include <asm/eeh.h> 33#include <asm/eeh.h>
34 34
35#include "pseries.h"
36
35static struct pci_bus * 37static struct pci_bus *
36find_bus_among_children(struct pci_bus *bus, 38find_bus_among_children(struct pci_bus *bus,
37 struct device_node *dn) 39 struct device_node *dn)
@@ -75,6 +77,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
75 return NULL; 77 return NULL;
76 rtas_setup_phb(phb); 78 rtas_setup_phb(phb);
77 pci_process_bridge_OF_ranges(phb, dn, 0); 79 pci_process_bridge_OF_ranges(phb, dn, 0);
80 phb->controller_ops = pseries_pci_controller_ops;
78 81
79 pci_devs_phb_init_dynamic(phb); 82 pci_devs_phb_init_dynamic(phb);
80 83
@@ -82,7 +85,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
82 eeh_dev_phb_init_dynamic(phb); 85 eeh_dev_phb_init_dynamic(phb);
83 86
84 if (dn->child) 87 if (dn->child)
85 eeh_add_device_tree_early(dn); 88 eeh_add_device_tree_early(PCI_DN(dn));
86 89
87 pcibios_scan_phb(phb); 90 pcibios_scan_phb(phb);
88 pcibios_finish_adding_to_bus(phb->bus); 91 pcibios_finish_adding_to_bus(phb->bus);
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 1796c5438cc6..8411c27293e4 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -11,6 +11,7 @@
11#define _PSERIES_PSERIES_H 11#define _PSERIES_PSERIES_H
12 12
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <asm/rtas.h>
14 15
15struct device_node; 16struct device_node;
16 17
@@ -60,11 +61,24 @@ extern struct device_node *dlpar_configure_connector(__be32,
60 struct device_node *); 61 struct device_node *);
61extern int dlpar_attach_node(struct device_node *); 62extern int dlpar_attach_node(struct device_node *);
62extern int dlpar_detach_node(struct device_node *); 63extern int dlpar_detach_node(struct device_node *);
64extern int dlpar_acquire_drc(u32 drc_index);
65extern int dlpar_release_drc(u32 drc_index);
66
67#ifdef CONFIG_MEMORY_HOTPLUG
68int dlpar_memory(struct pseries_hp_errorlog *hp_elog);
69#else
70static inline int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
71{
72 return -EOPNOTSUPP;
73}
74#endif
63 75
64/* PCI root bridge prepare function override for pseries */ 76/* PCI root bridge prepare function override for pseries */
65struct pci_host_bridge; 77struct pci_host_bridge;
66int pseries_root_bridge_prepare(struct pci_host_bridge *bridge); 78int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
67 79
80extern struct pci_controller_ops pseries_pci_controller_ops;
81
68unsigned long pseries_memory_block_size(void); 82unsigned long pseries_memory_block_size(void);
69 83
70#endif /* _PSERIES_PSERIES_H */ 84#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index e445b6701f50..df6a7041922b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -265,7 +265,7 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
265 update_dn_pci_info(np, pci->phb); 265 update_dn_pci_info(np, pci->phb);
266 266
267 /* Create EEH device for the OF node */ 267 /* Create EEH device for the OF node */
268 eeh_dev_init(np, pci->phb); 268 eeh_dev_init(PCI_DN(np), pci->phb);
269 } 269 }
270 break; 270 break;
271 default: 271 default:
@@ -461,6 +461,47 @@ static long pseries_little_endian_exceptions(void)
461} 461}
462#endif 462#endif
463 463
464static void __init find_and_init_phbs(void)
465{
466 struct device_node *node;
467 struct pci_controller *phb;
468 struct device_node *root = of_find_node_by_path("/");
469
470 for_each_child_of_node(root, node) {
471 if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
472 strcmp(node->type, "pciex") != 0))
473 continue;
474
475 phb = pcibios_alloc_controller(node);
476 if (!phb)
477 continue;
478 rtas_setup_phb(phb);
479 pci_process_bridge_OF_ranges(phb, node, 0);
480 isa_bridge_find_early(phb);
481 phb->controller_ops = pseries_pci_controller_ops;
482 }
483
484 of_node_put(root);
485 pci_devs_phb_init();
486
487 /*
488 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
489 * in chosen.
490 */
491 if (of_chosen) {
492 const int *prop;
493
494 prop = of_get_property(of_chosen,
495 "linux,pci-probe-only", NULL);
496 if (prop) {
497 if (*prop)
498 pci_add_flags(PCI_PROBE_ONLY);
499 else
500 pci_clear_flags(PCI_PROBE_ONLY);
501 }
502 }
503}
504
464static void __init pSeries_setup_arch(void) 505static void __init pSeries_setup_arch(void)
465{ 506{
466 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 507 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -793,6 +834,10 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
793void pSeries_final_fixup(void) { } 834void pSeries_final_fixup(void) { }
794#endif 835#endif
795 836
837struct pci_controller_ops pseries_pci_controller_ops = {
838 .probe_mode = pSeries_pci_probe_mode,
839};
840
796define_machine(pseries) { 841define_machine(pseries) {
797 .name = "pSeries", 842 .name = "pSeries",
798 .probe = pSeries_probe, 843 .probe = pSeries_probe,
@@ -801,7 +846,6 @@ define_machine(pseries) {
801 .show_cpuinfo = pSeries_show_cpuinfo, 846 .show_cpuinfo = pSeries_show_cpuinfo,
802 .log_error = pSeries_log_error, 847 .log_error = pSeries_log_error,
803 .pcibios_fixup = pSeries_final_fixup, 848 .pcibios_fixup = pSeries_final_fixup,
804 .pci_probe_mode = pSeries_pci_probe_mode,
805 .restart = rtas_restart, 849 .restart = rtas_restart,
806 .halt = rtas_halt, 850 .halt = rtas_halt,
807 .panic = rtas_os_term, 851 .panic = rtas_os_term,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index a3555b10c1a5..6932ea803e33 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -197,16 +197,14 @@ static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
197 xics_cause_ipi(cpu, data); 197 xics_cause_ipi(cpu, data);
198} 198}
199 199
200static __init int pSeries_smp_probe(void) 200static __init void pSeries_smp_probe(void)
201{ 201{
202 int ret = xics_smp_probe(); 202 xics_smp_probe();
203 203
204 if (cpu_has_feature(CPU_FTR_DBELL)) { 204 if (cpu_has_feature(CPU_FTR_DBELL)) {
205 xics_cause_ipi = smp_ops->cause_ipi; 205 xics_cause_ipi = smp_ops->cause_ipi;
206 smp_ops->cause_ipi = pSeries_cause_ipi_mux; 206 smp_ops->cause_ipi = pSeries_cause_ipi_mux;
207 } 207 }
208
209 return ret;
210} 208}
211 209
212static struct smp_ops_t pSeries_mpic_smp_ops = { 210static struct smp_ops_t pSeries_mpic_smp_ops = {
diff --git a/arch/powerpc/relocs_check.pl b/arch/powerpc/relocs_check.pl
deleted file mode 100755
index 3f46e8b9c56d..000000000000
--- a/arch/powerpc/relocs_check.pl
+++ /dev/null
@@ -1,66 +0,0 @@
1#!/usr/bin/perl
2
3# Copyright © 2009 IBM Corporation
4
5# This program is free software; you can redistribute it and/or
6# modify it under the terms of the GNU General Public License
7# as published by the Free Software Foundation; either version
8# 2 of the License, or (at your option) any later version.
9
10# This script checks the relocations of a vmlinux for "suspicious"
11# relocations.
12
13use strict;
14use warnings;
15
16if ($#ARGV != 1) {
17 die "$0 [path to objdump] [path to vmlinux]\n";
18}
19
20# Have Kbuild supply the path to objdump so we handle cross compilation.
21my $objdump = shift;
22my $vmlinux = shift;
23my $bad_relocs_count = 0;
24my $bad_relocs = "";
25my $old_binutils = 0;
26
27open(FD, "$objdump -R $vmlinux|") or die;
28while (<FD>) {
29 study $_;
30
31 # Only look at relocation lines.
32 next if (!/\s+R_/);
33
34 # These relocations are okay
35 # On PPC64:
36 # R_PPC64_RELATIVE, R_PPC64_NONE, R_PPC64_ADDR64
37 # On PPC:
38 # R_PPC_RELATIVE, R_PPC_ADDR16_HI,
39 # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
40 # R_PPC_NONE
41
42 next if (/\bR_PPC64_RELATIVE\b/ or /\bR_PPC64_NONE\b/ or
43 /\bR_PPC64_ADDR64\s+mach_/);
44 next if (/\bR_PPC_ADDR16_LO\b/ or /\bR_PPC_ADDR16_HI\b/ or
45 /\bR_PPC_ADDR16_HA\b/ or /\bR_PPC_RELATIVE\b/ or
46 /\bR_PPC_NONE\b/);
47
48 # If we see this type of relocation it's an idication that
49 # we /may/ be using an old version of binutils.
50 if (/R_PPC64_UADDR64/) {
51 $old_binutils++;
52 }
53
54 $bad_relocs_count++;
55 $bad_relocs .= $_;
56}
57
58if ($bad_relocs_count) {
59 print "WARNING: $bad_relocs_count bad relocations\n";
60 print $bad_relocs;
61}
62
63if ($old_binutils) {
64 print "WARNING: You need at least binutils >= 2.19 to build a ".
65 "CONFIG_RELOCATABLE kernel\n";
66}
diff --git a/arch/powerpc/relocs_check.sh b/arch/powerpc/relocs_check.sh
new file mode 100755
index 000000000000..2e4ebd0e25b3
--- /dev/null
+++ b/arch/powerpc/relocs_check.sh
@@ -0,0 +1,59 @@
1#!/bin/sh
2
3# Copyright © 2015 IBM Corporation
4
5# This program is free software; you can redistribute it and/or
6# modify it under the terms of the GNU General Public License
7# as published by the Free Software Foundation; either version
8# 2 of the License, or (at your option) any later version.
9
10# This script checks the relocations of a vmlinux for "suspicious"
11# relocations.
12
13# based on relocs_check.pl
14# Copyright © 2009 IBM Corporation
15
16if [ $# -lt 2 ]; then
17 echo "$0 [path to objdump] [path to vmlinux]" 1>&2
18 exit 1
19fi
20
21# Have Kbuild supply the path to objdump so we handle cross compilation.
22objdump="$1"
23vmlinux="$2"
24
25bad_relocs=$(
26"$objdump" -R "$vmlinux" |
27 # Only look at relocation lines.
28 grep -E '\<R_' |
29 # These relocations are okay
30 # On PPC64:
31 # R_PPC64_RELATIVE, R_PPC64_NONE
32 # R_PPC64_ADDR64 mach_<name>
33 # On PPC:
34 # R_PPC_RELATIVE, R_PPC_ADDR16_HI,
35 # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
36 # R_PPC_NONE
37 grep -F -w -v 'R_PPC64_RELATIVE
38R_PPC64_NONE
39R_PPC_ADDR16_LO
40R_PPC_ADDR16_HI
41R_PPC_ADDR16_HA
42R_PPC_RELATIVE
43R_PPC_NONE' |
44 grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_'
45)
46
47if [ -z "$bad_relocs" ]; then
48 exit 0
49fi
50
51num_bad=$(echo "$bad_relocs" | wc -l)
52echo "WARNING: $num_bad bad relocations"
53echo "$bad_relocs"
54
55# If we see this type of relocation it's an idication that
56# we /may/ be using an old version of binutils.
57if echo "$bad_relocs" | grep -q -F -w R_PPC64_UADDR64; then
58 echo "WARNING: You need at least binutils >= 2.19 to build a CONFIG_RELOCATABLE kernel"
59fi
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 9e5353ff6d1b..d00a5663e312 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -369,7 +369,7 @@ static int dart_dma_set_mask(struct device *dev, u64 dma_mask)
369 return 0; 369 return 0;
370} 370}
371 371
372void __init iommu_init_early_dart(void) 372void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
373{ 373{
374 struct device_node *dn; 374 struct device_node *dn;
375 375
@@ -395,8 +395,8 @@ void __init iommu_init_early_dart(void)
395 if (dart_is_u4) 395 if (dart_is_u4)
396 ppc_md.dma_set_mask = dart_dma_set_mask; 396 ppc_md.dma_set_mask = dart_dma_set_mask;
397 397
398 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart; 398 controller_ops->dma_dev_setup = pci_dma_dev_setup_dart;
399 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart; 399 controller_ops->dma_bus_setup = pci_dma_bus_setup_dart;
400 400
401 /* Setup pci_dma ops */ 401 /* Setup pci_dma ops */
402 set_pci_dma_ops(&dma_iommu_ops); 402 set_pci_dma_ops(&dma_iommu_ops);
@@ -404,8 +404,8 @@ void __init iommu_init_early_dart(void)
404 404
405 bail: 405 bail:
406 /* If init failed, use direct iommu and null setup functions */ 406 /* If init failed, use direct iommu and null setup functions */
407 ppc_md.pci_dma_dev_setup = NULL; 407 controller_ops->dma_dev_setup = NULL;
408 ppc_md.pci_dma_bus_setup = NULL; 408 controller_ops->dma_bus_setup = NULL;
409 409
410 /* Setup pci_dma ops */ 410 /* Setup pci_dma ops */
411 set_pci_dma_ops(&dma_direct_ops); 411 set_pci_dma_ops(&dma_direct_ops);
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index 2d8a101b6b9e..121e26fffd50 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -54,7 +54,7 @@ bool dcr_map_ok_generic(dcr_host_t host)
54 else if (host.type == DCR_HOST_MMIO) 54 else if (host.type == DCR_HOST_MMIO)
55 return dcr_map_ok_mmio(host.host.mmio); 55 return dcr_map_ok_mmio(host.host.mmio);
56 else 56 else
57 return 0; 57 return false;
58} 58}
59EXPORT_SYMBOL_GPL(dcr_map_ok_generic); 59EXPORT_SYMBOL_GPL(dcr_map_ok_generic);
60 60
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 4bbb4b8dfd09..f086c6f22dc9 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -162,7 +162,17 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
162 msg->address_lo = lower_32_bits(address); 162 msg->address_lo = lower_32_bits(address);
163 msg->address_hi = upper_32_bits(address); 163 msg->address_hi = upper_32_bits(address);
164 164
165 msg->data = hwirq; 165 /*
166 * MPIC version 2.0 has erratum PIC1. It causes
167 * that neither MSI nor MSI-X can work fine.
168 * This is a workaround to allow MSI-X to function
169 * properly. It only works for MSI-X, we prevent
170 * MSI on buggy chips in fsl_setup_msi_irqs().
171 */
172 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
173 msg->data = __swab32(hwirq);
174 else
175 msg->data = hwirq;
166 176
167 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__, 177 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
168 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK, 178 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
@@ -180,8 +190,16 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
180 struct msi_msg msg; 190 struct msi_msg msg;
181 struct fsl_msi *msi_data; 191 struct fsl_msi *msi_data;
182 192
183 if (type == PCI_CAP_ID_MSIX) 193 if (type == PCI_CAP_ID_MSI) {
184 pr_debug("fslmsi: MSI-X untested, trying anyway.\n"); 194 /*
195 * MPIC version 2.0 has erratum PIC1. For now MSI
196 * could not work. So check to prevent MSI from
197 * being used on the board with this erratum.
198 */
199 list_for_each_entry(msi_data, &msi_head, list)
200 if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
201 return -EINVAL;
202 }
185 203
186 /* 204 /*
187 * If the PCI node has an fsl,msi property, then we need to use it 205 * If the PCI node has an fsl,msi property, then we need to use it
@@ -446,6 +464,11 @@ static int fsl_of_msi_probe(struct platform_device *dev)
446 464
447 msi->feature = features->fsl_pic_ip; 465 msi->feature = features->fsl_pic_ip;
448 466
467 /* For erratum PIC1 on MPIC version 2.0*/
468 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
469 && (fsl_mpic_primary_get_version() == 0x0200))
470 msi->feature |= MSI_HW_ERRATA_ENDIAN;
471
449 /* 472 /*
450 * Remember the phandle, so that we can match with any PCI nodes 473 * Remember the phandle, so that we can match with any PCI nodes
451 * that have an "fsl,msi" property. 474 * that have an "fsl,msi" property.
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index 420cfcbdac01..a67359d993e5 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -27,6 +27,8 @@
27#define FSL_PIC_IP_IPIC 0x00000002 27#define FSL_PIC_IP_IPIC 0x00000002
28#define FSL_PIC_IP_VMPIC 0x00000003 28#define FSL_PIC_IP_VMPIC 0x00000003
29 29
30#define MSI_HW_ERRATA_ENDIAN 0x00000010
31
30struct fsl_msi_cascade_data; 32struct fsl_msi_cascade_data;
31 33
32struct fsl_msi { 34struct fsl_msi {
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4b74c276e427..9a8fcf0d79d7 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -111,6 +111,18 @@ static struct pci_ops fsl_indirect_pcie_ops =
111#define MAX_PHYS_ADDR_BITS 40 111#define MAX_PHYS_ADDR_BITS 40
112static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; 112static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
113 113
114#ifdef CONFIG_SWIOTLB
115static void setup_swiotlb_ops(struct pci_controller *hose)
116{
117 if (ppc_swiotlb_enable) {
118 hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
119 set_pci_dma_ops(&swiotlb_dma_ops);
120 }
121}
122#else
123static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
124#endif
125
114static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask) 126static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
115{ 127{
116 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 128 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
@@ -548,6 +560,9 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
548 /* Setup PEX window registers */ 560 /* Setup PEX window registers */
549 setup_pci_atmu(hose); 561 setup_pci_atmu(hose);
550 562
563 /* Set up controller operations */
564 setup_swiotlb_ops(hose);
565
551 return 0; 566 return 0;
552 567
553no_bridge: 568no_bridge:
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index bbfbbf2025fd..b2b8447a227a 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -655,7 +655,6 @@ static inline struct mpic * mpic_from_irq_data(struct irq_data *d)
655static inline void mpic_eoi(struct mpic *mpic) 655static inline void mpic_eoi(struct mpic *mpic)
656{ 656{
657 mpic_cpu_write(MPIC_INFO(CPU_EOI), 0); 657 mpic_cpu_write(MPIC_INFO(CPU_EOI), 0);
658 (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
659} 658}
660 659
661/* 660/*
@@ -1676,31 +1675,6 @@ void __init mpic_init(struct mpic *mpic)
1676 mpic_err_int_init(mpic, MPIC_FSL_ERR_INT); 1675 mpic_err_int_init(mpic, MPIC_FSL_ERR_INT);
1677} 1676}
1678 1677
1679void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
1680{
1681 u32 v;
1682
1683 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
1684 v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
1685 v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
1686 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
1687}
1688
1689void __init mpic_set_serial_int(struct mpic *mpic, int enable)
1690{
1691 unsigned long flags;
1692 u32 v;
1693
1694 raw_spin_lock_irqsave(&mpic_lock, flags);
1695 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
1696 if (enable)
1697 v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
1698 else
1699 v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
1700 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
1701 raw_spin_unlock_irqrestore(&mpic_lock, flags);
1702}
1703
1704void mpic_irq_set_priority(unsigned int irq, unsigned int pri) 1678void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
1705{ 1679{
1706 struct mpic *mpic = mpic_find(irq); 1680 struct mpic *mpic = mpic_find(irq);
@@ -1923,7 +1897,7 @@ void smp_mpic_message_pass(int cpu, int msg)
1923 msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask); 1897 msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
1924} 1898}
1925 1899
1926int __init smp_mpic_probe(void) 1900void __init smp_mpic_probe(void)
1927{ 1901{
1928 int nr_cpus; 1902 int nr_cpus;
1929 1903
@@ -1935,8 +1909,6 @@ int __init smp_mpic_probe(void)
1935 1909
1936 if (nr_cpus > 1) 1910 if (nr_cpus > 1)
1937 mpic_request_ipis(); 1911 mpic_request_ipis();
1938
1939 return nr_cpus;
1940} 1912}
1941 1913
1942void smp_mpic_setup_cpu(int cpu) 1914void smp_mpic_setup_cpu(int cpu)
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index d09994164daf..7ea0174f6d3d 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -190,28 +190,3 @@ int par_io_of_config(struct device_node *np)
190 return 0; 190 return 0;
191} 191}
192EXPORT_SYMBOL(par_io_of_config); 192EXPORT_SYMBOL(par_io_of_config);
193
194#ifdef DEBUG
195static void dump_par_io(void)
196{
197 unsigned int i;
198
199 printk(KERN_INFO "%s: par_io=%p\n", __func__, par_io);
200 for (i = 0; i < num_par_io_ports; i++) {
201 printk(KERN_INFO " cpodr[%u]=%08x\n", i,
202 in_be32(&par_io[i].cpodr));
203 printk(KERN_INFO " cpdata[%u]=%08x\n", i,
204 in_be32(&par_io[i].cpdata));
205 printk(KERN_INFO " cpdir1[%u]=%08x\n", i,
206 in_be32(&par_io[i].cpdir1));
207 printk(KERN_INFO " cpdir2[%u]=%08x\n", i,
208 in_be32(&par_io[i].cpdir2));
209 printk(KERN_INFO " cppar1[%u]=%08x\n", i,
210 in_be32(&par_io[i].cppar1));
211 printk(KERN_INFO " cppar2[%u]=%08x\n", i,
212 in_be32(&par_io[i].cppar2));
213 }
214
215}
216EXPORT_SYMBOL(dump_par_io);
217#endif /* DEBUG */
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index befaf1123f7f..5f91628209eb 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -43,11 +43,6 @@ u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
43} 43}
44EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock); 44EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
45 45
46void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs)
47{
48 out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD);
49}
50
51void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs) 46void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
52{ 47{
53 struct ucc_slow_info *us_info = uccs->us_info; 48 struct ucc_slow_info *us_info = uccs->us_info;
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 125743b58c70..878a54036a25 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -140,15 +140,13 @@ static void xics_request_ipi(void)
140 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL)); 140 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
141} 141}
142 142
143int __init xics_smp_probe(void) 143void __init xics_smp_probe(void)
144{ 144{
145 /* Setup cause_ipi callback based on which ICP is used */ 145 /* Setup cause_ipi callback based on which ICP is used */
146 smp_ops->cause_ipi = icp_ops->cause_ipi; 146 smp_ops->cause_ipi = icp_ops->cause_ipi;
147 147
148 /* Register all the IPIs */ 148 /* Register all the IPIs */
149 xics_request_ipi(); 149 xics_request_ipi();
150
151 return num_possible_cpus();
152} 150}
153 151
154#endif /* CONFIG_SMP */ 152#endif /* CONFIG_SMP */