aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-05 14:16:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-05 14:16:12 -0400
commit5f3d2f2e1a63679cf1c4a4210f2f1cc2f335bef6 (patch)
tree9189bd6c81fe5f982a7ae45d2f3d900176658509
parent283dbd82055eb70ff3b469f812d9c695f18c9641 (diff)
parentd900bd7366463fd96a907b2c212242e2b68b27d8 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt: "Some highlights in addition to the usual batch of fixes: - 64TB address space support for 64-bit processes by Aneesh Kumar - Gavin Shan did a major cleanup & re-organization of our EEH support code (IBM fancy PCI error handling & recovery infrastructure) which paves the way for supporting different platform backends, along with some rework of the PCIe code for the PowerNV platform in order to remove home made resource allocations and instead use the generic code (which is possible after some small improvements to it done by Gavin). - Uprobes support by Ananth N Mavinakayanahalli - A pile of embedded updates from Freescale folks, including new SoC and board supports, more KVM stuff including preparing for 64-bit BookE KVM support, ePAPR 1.1 updates, etc..." Fixup trivial conflicts in drivers/scsi/ipr.c * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (146 commits) powerpc/iommu: Fix multiple issues with IOMMU pools code powerpc: Fix VMX fix for memcpy case driver/mtd:IFC NAND:Initialise internal SRAM before any write powerpc/fsl-pci: use 'Header Type' to identify PCIE mode powerpc/eeh: Don't release eeh_mutex in eeh_phb_pe_get powerpc: Remove tlb batching hack for nighthawk powerpc: Set paca->data_offset = 0 for boot cpu powerpc/perf: Sample only if SIAR-Valid bit is set in P7+ powerpc/fsl-pci: fix warning when CONFIG_SWIOTLB is disabled powerpc/mpc85xx: Update interrupt handling for IFC controller powerpc/85xx: Enable USB support in p1023rds_defconfig powerpc/smp: Do not disable IPI interrupts during suspend powerpc/eeh: Fix crash on converting OF node to edev powerpc/eeh: Lock module while handling EEH event powerpc/kprobe: Don't emulate store when kprobe stwu r1 powerpc/kprobe: Complete kprobe and migrate exception frame powerpc/kprobe: Introduce a new thread flag powerpc: Remove unused __get_user64() and __put_user64() powerpc/eeh: Global mutex to protect PE tree powerpc/eeh: Remove EEH PE for normal PCI hotplug ...
-rw-r--r--Documentation/devicetree/bindings/misc/ifm-csi.txt41
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/ifc.txt9
-rw-r--r--arch/powerpc/Kconfig17
-rw-r--r--arch/powerpc/boot/Makefile1
-rw-r--r--arch/powerpc/boot/dts/fsl/e500mc_power_isa.dtsi58
-rw-r--r--arch/powerpc/boot/dts/fsl/e500v2_power_isa.dtsi52
-rw-r--r--arch/powerpc/boot/dts/fsl/e5500_power_isa.dtsi59
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-post.dtsi320
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi114
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sec5.2-0.dtsi118
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dtsi4
-rw-r--r--arch/powerpc/boot/dts/mpc8540ads.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8541cds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8544ds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8544ds.dtsi39
-rw-r--r--arch/powerpc/boot/dts/mpc8555cds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8560ads.dts2
-rw-r--r--arch/powerpc/boot/dts/o2d.dts47
-rw-r--r--arch/powerpc/boot/dts/o2d.dtsi139
-rw-r--r--arch/powerpc/boot/dts/o2d300.dts52
-rw-r--r--arch/powerpc/boot/dts/o2dnt2.dts48
-rw-r--r--arch/powerpc/boot/dts/o2i.dts33
-rw-r--r--arch/powerpc/boot/dts/o2mnt.dts33
-rw-r--r--arch/powerpc/boot/dts/o3dnt.dts48
-rw-r--r--arch/powerpc/boot/dts/p1020rdb_camp_core0.dts63
-rw-r--r--arch/powerpc/boot/dts/p1020rdb_camp_core1.dts141
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dtsi4
-rw-r--r--arch/powerpc/boot/dts/p1022rdk.dts188
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core0.dts67
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core1.dts125
-rw-r--r--arch/powerpc/boot/dts/p2041rdb.dts4
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts4
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts4
-rw-r--r--arch/powerpc/boot/dts/p5020ds.dts4
-rw-r--r--arch/powerpc/boot/dts/p5040ds.dts207
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig6
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig1
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/include/asm/abs_addr.h56
-rw-r--r--arch/powerpc/include/asm/cacheflush.h2
-rw-r--r--arch/powerpc/include/asm/debug.h2
-rw-r--r--arch/powerpc/include/asm/eeh.h141
-rw-r--r--arch/powerpc/include/asm/eeh_event.h6
-rw-r--r--arch/powerpc/include/asm/exception-64e.h6
-rw-r--r--arch/powerpc/include/asm/fsl_guts.h2
-rw-r--r--arch/powerpc/include/asm/fsl_ifc.h14
-rw-r--r--arch/powerpc/include/asm/hvcall.h5
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h9
-rw-r--r--arch/powerpc/include/asm/kprobes.h15
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h1
-rw-r--r--arch/powerpc/include/asm/machdep.h9
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h169
-rw-r--r--arch/powerpc/include/asm/mmu.h9
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h2
-rw-r--r--arch/powerpc/include/asm/mpic.h19
-rw-r--r--arch/powerpc/include/asm/paca.h3
-rw-r--r--arch/powerpc/include/asm/page_64.h10
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h11
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-4k.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h19
-rw-r--r--arch/powerpc/include/asm/pgtable.h10
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h20
-rw-r--r--arch/powerpc/include/asm/probes.h42
-rw-r--r--arch/powerpc/include/asm/processor.h6
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h18
-rw-r--r--arch/powerpc/include/asm/reg.h54
-rw-r--r--arch/powerpc/include/asm/setup.h2
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/sparsemem.h4
-rw-r--r--arch/powerpc/include/asm/swiotlb.h6
-rw-r--r--arch/powerpc/include/asm/thread_info.h7
-rw-r--r--arch/powerpc/include/asm/tlbflush.h7
-rw-r--r--arch/powerpc/include/asm/uaccess.h11
-rw-r--r--arch/powerpc/include/asm/uprobes.h54
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S74
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c22
-rw-r--r--arch/powerpc/kernel/dma.c3
-rw-r--r--arch/powerpc/kernel/entry_32.S47
-rw-r--r--arch/powerpc/kernel/entry_64.S35
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S212
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S127
-rw-r--r--arch/powerpc/kernel/fadump.c3
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S46
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c25
-rw-r--r--arch/powerpc/kernel/ibmebus.c1
-rw-r--r--arch/powerpc/kernel/iommu.c5
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/paca.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c16
-rw-r--r--arch/powerpc/kernel/process.c16
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/ptrace.c3
-rw-r--r--arch/powerpc/kernel/rtas_flash.c7
-rw-r--r--arch/powerpc/kernel/rtas_pci.c5
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.c8
-rw-r--r--arch/powerpc/kernel/smp.c14
-rw-r--r--arch/powerpc/kernel/time.c8
-rw-r--r--arch/powerpc/kernel/traps.c1
-rw-r--r--arch/powerpc/kernel/uprobes.c184
-rw-r--r--arch/powerpc/kernel/vdso.c4
-rw-r--r--arch/powerpc/kernel/vio.c1
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c17
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/trace.h14
-rw-r--r--arch/powerpc/lib/memcpy_power7.S4
-rw-r--r--arch/powerpc/lib/sstep.c36
-rw-r--r--arch/powerpc/mm/fault.c1
-rw-r--r--arch/powerpc/mm/hash_low_64.S97
-rw-r--r--arch/powerpc/mm/hash_native_64.c192
-rw-r--r--arch/powerpc/mm/hash_utils_64.c48
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c15
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c5
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c10
-rw-r--r--arch/powerpc/mm/pgtable_64.c13
-rw-r--r--arch/powerpc/mm/slb_low.S62
-rw-r--r--arch/powerpc/mm/slice.c112
-rw-r--r--arch/powerpc/mm/stab.c3
-rw-r--r--arch/powerpc/mm/subpage-prot.c6
-rw-r--r--arch/powerpc/mm/tlb_hash64.c11
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S18
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c116
-rw-r--r--arch/powerpc/perf/core-book3s.c46
-rw-r--r--arch/powerpc/perf/power7-pmu.c3
-rw-r--r--arch/powerpc/platforms/44x/currituck.c10
-rw-r--r--arch/powerpc/platforms/512x/Kconfig1
-rw-r--r--arch/powerpc/platforms/512x/clock.c6
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c6
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c35
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig21
-rw-r--r--arch/powerpc/platforms/85xx/Makefile2
-rw-r--r--arch/powerpc/platforms/85xx/common.c10
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c38
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c62
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c36
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c11
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c44
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c15
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c40
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c30
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c14
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c36
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c167
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rds.c9
-rw-r--r--arch/powerpc/platforms/85xx/p2041_rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/p3041_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/p4080_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/p5020_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/p5040_ds.c89
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c5
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c21
-rw-r--r--arch/powerpc/platforms/85xx/smp.c220
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c11
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c13
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c21
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c56
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c12
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c13
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c12
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c21
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c42
-rw-r--r--arch/powerpc/platforms/86xx/sbc8641d.c14
-rw-r--r--arch/powerpc/platforms/cell/beat.c4
-rw-r--r--arch/powerpc/platforms/cell/beat.h2
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c45
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c6
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c695
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c7
-rw-r--r--arch/powerpc/platforms/powernv/pci.h21
-rw-r--r--arch/powerpc/platforms/ps3/htab.c22
-rw-r--r--arch/powerpc/platforms/ps3/setup.c10
-rw-r--r--arch/powerpc/platforms/pseries/Makefile5
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c543
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c59
-rw-r--r--arch/powerpc/platforms/pseries/eeh_dev.c14
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c310
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c54
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pe.c652
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c247
-rw-r--r--arch/powerpc/platforms/pseries/eeh_sysfs.c9
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c12
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c77
-rw-r--r--arch/powerpc/platforms/pseries/msi.c26
-rw-r--r--arch/powerpc/platforms/pseries/pci.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c32
-rw-r--r--arch/powerpc/platforms/pseries/setup.c22
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c19
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c10
-rw-r--r--arch/powerpc/sysdev/fsl_ifc.c20
-rw-r--r--arch/powerpc/sysdev/fsl_mpic_err.c149
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c157
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h20
-rw-r--r--arch/powerpc/sysdev/mpic.c102
-rw-r--r--arch/powerpc/sysdev/mpic.h22
-rw-r--r--arch/powerpc/xmon/xmon.c111
-rw-r--r--drivers/crypto/nx/nx.c17
-rw-r--r--drivers/edac/mpc85xx_edac.c43
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c45
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h1
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c12
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c2
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c56
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.c12
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c14
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c2
-rw-r--r--drivers/scsi/ipr.c18
-rw-r--r--drivers/tty/hvc/hvc_console.c33
-rw-r--r--drivers/tty/hvc/hvc_vio.c123
-rw-r--r--drivers/video/ps3fb.c3
249 files changed, 6423 insertions, 3240 deletions
diff --git a/Documentation/devicetree/bindings/misc/ifm-csi.txt b/Documentation/devicetree/bindings/misc/ifm-csi.txt
new file mode 100644
index 000000000000..5bdfffb0b9f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/ifm-csi.txt
@@ -0,0 +1,41 @@
1IFM camera sensor interface on mpc5200 LocalPlus bus
2
3Required properties:
4- compatible: "ifm,o2d-csi"
5- reg: specifies sensor chip select number and associated address range
6- interrupts: external interrupt line number and interrupt sense mode
7 of the interrupt line signaling frame valid events
8- gpios: three gpio-specifiers for "capture", "reset" and "master enable"
9 GPIOs (strictly in this order).
10- ifm,csi-clk-handle: the phandle to a node in the DT describing the sensor
11 clock generator. This node is usually a general purpose timer controller.
12- ifm,csi-addr-bus-width: address bus width (valid values are 16, 24, 25)
13- ifm,csi-data-bus-width: data bus width (valid values are 8 and 16)
14- ifm,csi-wait-cycles: sensor bus wait cycles
15
16Optional properties:
17- ifm,csi-byte-swap: if this property is present, the byte swapping on
18 the bus will be enabled.
19
20Example:
21
22 csi@3,0 {
23 compatible = "ifm,o2d-csi";
24 reg = <3 0 0x00100000>; /* CS 3, 1 MiB range */
25 interrupts = <1 1 2>; /* IRQ1, edge falling */
26
27 ifm,csi-clk-handle = <&timer7>;
28 gpios = <&gpio_simple 23 0 /* image_capture */
29 &gpio_simple 26 0 /* image_reset */
30 &gpio_simple 29 0>; /* image_master_en */
31
32 ifm,csi-addr-bus-width = <24>;
33 ifm,csi-data-bus-width = <8>;
34 ifm,csi-wait-cycles = <0>;
35 };
36
37The base address of the used chip select is specified in the
38ranges property of the parent localbus node, for example:
39
40 ranges = <0 0 0xff000000 0x01000000
41 3 0 0xe3000000 0x00100000>;
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/ifc.txt b/Documentation/devicetree/bindings/powerpc/fsl/ifc.txt
index 939a26d541f6..d5e370450ac0 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/ifc.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/ifc.txt
@@ -12,9 +12,12 @@ Properties:
12- #size-cells : Either one or two, depending on how large each chipselect 12- #size-cells : Either one or two, depending on how large each chipselect
13 can be. 13 can be.
14- reg : Offset and length of the register set for the device 14- reg : Offset and length of the register set for the device
15- interrupts : IFC has two interrupts. The first one is the "common" 15- interrupts: IFC may have one or two interrupts. If two interrupt
16 interrupt(CM_EVTER_STAT), and second is the NAND interrupt 16 specifiers are present, the first is the "common"
17 (NAND_EVTER_STAT). 17 interrupt (CM_EVTER_STAT), and the second is the NAND
18 interrupt (NAND_EVTER_STAT). If there is only one,
19 that interrupt reports both types of event.
20
18 21
19- ranges : Each range corresponds to a single chipselect, and covers 22- ranges : Each range corresponds to a single chipselect, and covers
20 the entire access window as configured. 23 the entire access window as configured.
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 352f416269ce..4ce0be32d153 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -215,7 +215,8 @@ config ARCH_HIBERNATION_POSSIBLE
215config ARCH_SUSPEND_POSSIBLE 215config ARCH_SUSPEND_POSSIBLE
216 def_bool y 216 def_bool y
217 depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ 217 depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
218 (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x 218 (PPC_85xx && !PPC_E500MC) || PPC_86xx || PPC_PSERIES \
219 || 44x || 40x
219 220
220config PPC_DCR_NATIVE 221config PPC_DCR_NATIVE
221 bool 222 bool
@@ -239,6 +240,9 @@ config PPC_OF_PLATFORM_PCI
239config ARCH_SUPPORTS_DEBUG_PAGEALLOC 240config ARCH_SUPPORTS_DEBUG_PAGEALLOC
240 def_bool y 241 def_bool y
241 242
243config ARCH_SUPPORTS_UPROBES
244 def_bool y
245
242config PPC_ADV_DEBUG_REGS 246config PPC_ADV_DEBUG_REGS
243 bool 247 bool
244 depends on 40x || BOOKE 248 depends on 40x || BOOKE
@@ -325,7 +329,8 @@ config SWIOTLB
325 329
326config HOTPLUG_CPU 330config HOTPLUG_CPU
327 bool "Support for enabling/disabling CPUs" 331 bool "Support for enabling/disabling CPUs"
328 depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC || PPC_POWERNV) 332 depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || \
333 PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC))
329 ---help--- 334 ---help---
330 Say Y here to be able to disable and re-enable individual 335 Say Y here to be able to disable and re-enable individual
331 CPUs at runtime on SMP machines. 336 CPUs at runtime on SMP machines.
@@ -557,6 +562,14 @@ config SCHED_SMT
557 when dealing with POWER5 cpus at a cost of slightly increased 562 when dealing with POWER5 cpus at a cost of slightly increased
558 overhead in some places. If unsure say N here. 563 overhead in some places. If unsure say N here.
559 564
565config PPC_DENORMALISATION
566 bool "PowerPC denormalisation exception handling"
567 depends on PPC_BOOK3S_64
568 default "n"
569 ---help---
570 Add support for handling denormalisation of single precision
571 values. Useful for bare metal only. If unsure say Y here.
572
560config CMDLINE_BOOL 573config CMDLINE_BOOL
561 bool "Default bootloader kernel arguments" 574 bool "Default bootloader kernel arguments"
562 575
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index b7d833382be4..6a15c968d214 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -107,6 +107,7 @@ src-boot := $(addprefix $(obj)/, $(src-boot))
107obj-boot := $(addsuffix .o, $(basename $(src-boot))) 107obj-boot := $(addsuffix .o, $(basename $(src-boot)))
108obj-wlib := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-wlib)))) 108obj-wlib := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-wlib))))
109obj-plat := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-plat)))) 109obj-plat := $(addsuffix .o, $(basename $(addprefix $(obj)/, $(src-plat))))
110obj-plat: $(libfdt)
110 111
111quiet_cmd_copy_zlib = COPY $@ 112quiet_cmd_copy_zlib = COPY $@
112 cmd_copy_zlib = sed "s@__used@@;s@<linux/\([^>]*\).*@\"\1\"@" $< > $@ 113 cmd_copy_zlib = sed "s@__used@@;s@<linux/\([^>]*\).*@\"\1\"@" $< > $@
diff --git a/arch/powerpc/boot/dts/fsl/e500mc_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e500mc_power_isa.dtsi
new file mode 100644
index 000000000000..870c6535a053
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/e500mc_power_isa.dtsi
@@ -0,0 +1,58 @@
1/*
2 * e500mc Power ISA Device Tree Source (include)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/ {
36 cpus {
37 power-isa-version = "2.06";
38 power-isa-b; // Base
39 power-isa-e; // Embedded
40 power-isa-atb; // Alternate Time Base
41 power-isa-cs; // Cache Specification
42 power-isa-ds; // Decorated Storage
43 power-isa-e.ed; // Embedded.Enhanced Debug
44 power-isa-e.pd; // Embedded.External PID
45 power-isa-e.hv; // Embedded.Hypervisor
46 power-isa-e.le; // Embedded.Little-Endian
47 power-isa-e.pm; // Embedded.Performance Monitor
48 power-isa-e.pc; // Embedded.Processor Control
49 power-isa-ecl; // Embedded Cache Locking
50 power-isa-exp; // External Proxy
51 power-isa-fp; // Floating Point
52 power-isa-fp.r; // Floating Point.Record
53 power-isa-mmc; // Memory Coherence
54 power-isa-scpm; // Store Conditional Page Mobility
55 power-isa-wt; // Wait
56 mmu-type = "power-embedded";
57 };
58};
diff --git a/arch/powerpc/boot/dts/fsl/e500v2_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e500v2_power_isa.dtsi
new file mode 100644
index 000000000000..f4928144d2c8
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/e500v2_power_isa.dtsi
@@ -0,0 +1,52 @@
1/*
2 * e500v2 Power ISA Device Tree Source (include)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/ {
36 cpus {
37 power-isa-version = "2.03";
38 power-isa-b; // Base
39 power-isa-e; // Embedded
40 power-isa-atb; // Alternate Time Base
41 power-isa-cs; // Cache Specification
42 power-isa-e.le; // Embedded.Little-Endian
43 power-isa-e.pm; // Embedded.Performance Monitor
44 power-isa-ecl; // Embedded Cache Locking
45 power-isa-mmc; // Memory Coherence
46 power-isa-sp; // Signal Processing Engine
47 power-isa-sp.fd; // SPE.Embedded Float Scalar Double
48 power-isa-sp.fs; // SPE.Embedded Float Scalar Single
49 power-isa-sp.fv; // SPE.Embedded Float Vector
50 mmu-type = "power-embedded";
51 };
52};
diff --git a/arch/powerpc/boot/dts/fsl/e5500_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e5500_power_isa.dtsi
new file mode 100644
index 000000000000..3230212f7ad5
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/e5500_power_isa.dtsi
@@ -0,0 +1,59 @@
1/*
2 * e5500 Power ISA Device Tree Source (include)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/ {
36 cpus {
37 power-isa-version = "2.06";
38 power-isa-b; // Base
39 power-isa-e; // Embedded
40 power-isa-atb; // Alternate Time Base
41 power-isa-cs; // Cache Specification
42 power-isa-ds; // Decorated Storage
43 power-isa-e.ed; // Embedded.Enhanced Debug
44 power-isa-e.pd; // Embedded.External PID
45 power-isa-e.hv; // Embedded.Hypervisor
46 power-isa-e.le; // Embedded.Little-Endian
47 power-isa-e.pm; // Embedded.Performance Monitor
48 power-isa-e.pc; // Embedded.Processor Control
49 power-isa-ecl; // Embedded Cache Locking
50 power-isa-exp; // External Proxy
51 power-isa-fp; // Floating Point
52 power-isa-fp.r; // Floating Point.Record
53 power-isa-mmc; // Memory Coherence
54 power-isa-scpm; // Store Conditional Page Mobility
55 power-isa-wt; // Wait
56 power-isa-64; // 64-bit
57 mmu-type = "power-embedded";
58 };
59};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi
index 7de45a784df6..152906f98a0f 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8536si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,MPC8536"; 40 compatible = "fsl,MPC8536";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi
index 8777f9239d9e..5a69bafb652a 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8544si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,MPC8544"; 40 compatible = "fsl,MPC8544";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
index 720422d83529..fc1ce977422b 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,MPC8548"; 40 compatible = "fsl,MPC8548";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi
index eacd62c5fe6c..122ca3bd0b03 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8568si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,MPC8568"; 40 compatible = "fsl,MPC8568";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi
index b07064d11930..2cd15a2a0422 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8569si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,MPC8569"; 40 compatible = "fsl,MPC8569";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi
index ca188326c2ca..28c2a862be96 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8572si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,MPC8572"; 40 compatible = "fsl,MPC8572";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi
index 7354a8f90ea5..6e76f9b282a1 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,P1010"; 40 compatible = "fsl,P1010";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi
index 6f0376e554eb..fed9c4c8d962 100644
--- a/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1020si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,P1020"; 40 compatible = "fsl,P1020";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi
index 4abd54bc3308..36161b500176 100644
--- a/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1021si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,P1021"; 40 compatible = "fsl,P1021";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
index e930f4f7ca89..1956dea040cc 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,P1022"; 40 compatible = "fsl,P1022";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi
index ac45f6d93385..132a1521921a 100644
--- a/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1023si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,P1023"; 40 compatible = "fsl,P1023";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi
index 3213288641d1..42bf3c6d25ca 100644
--- a/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2020si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,P2020"; 40 compatible = "fsl,P2020";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi
index 2d0a40d6b10f..7a2697d04549 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500mc_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,P2041"; 40 compatible = "fsl,P2041";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi
index 136def3536b6..c9ca2c305cfe 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500mc_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,P3041"; 40 compatible = "fsl,P3041";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi
index b9556ee3a639..493d9a056b5c 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e500mc_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,P4080"; 40 compatible = "fsl,P4080";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
index ae823a47584e..0a198b0a77e5 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
@@ -33,6 +33,9 @@
33 */ 33 */
34 34
35/dts-v1/; 35/dts-v1/;
36
37/include/ "e5500_power_isa.dtsi"
38
36/ { 39/ {
37 compatible = "fsl,P5020"; 40 compatible = "fsl,P5020";
38 #address-cells = <2>; 41 #address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
new file mode 100644
index 000000000000..db2c9a7b3a0e
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
@@ -0,0 +1,320 @@
1/*
2 * P5040 Silicon/SoC Device Tree Source (post include)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * This software is provided by Freescale Semiconductor "as is" and any
24 * express or implied warranties, including, but not limited to, the implied
25 * warranties of merchantability and fitness for a particular purpose are
26 * disclaimed. In no event shall Freescale Semiconductor be liable for any
27 * direct, indirect, incidental, special, exemplary, or consequential damages
28 * (including, but not limited to, procurement of substitute goods or services;
29 * loss of use, data, or profits; or business interruption) however caused and
30 * on any theory of liability, whether in contract, strict liability, or tort
31 * (including negligence or otherwise) arising in any way out of the use of this
32 * software, even if advised of the possibility of such damage.
33 */
34
35&lbc {
36 compatible = "fsl,p5040-elbc", "fsl,elbc", "simple-bus";
37 interrupts = <25 2 0 0>;
38 #address-cells = <2>;
39 #size-cells = <1>;
40};
41
42/* controller at 0x200000 */
43&pci0 {
44 compatible = "fsl,p5040-pcie", "fsl,qoriq-pcie-v2.4";
45 device_type = "pci";
46 #size-cells = <2>;
47 #address-cells = <3>;
48 bus-range = <0x0 0xff>;
49 clock-frequency = <33333333>;
50 interrupts = <16 2 1 15>;
51 pcie@0 {
52 reg = <0 0 0 0 0>;
53 #interrupt-cells = <1>;
54 #size-cells = <2>;
55 #address-cells = <3>;
56 device_type = "pci";
57 interrupts = <16 2 1 15>;
58 interrupt-map-mask = <0xf800 0 0 7>;
59 interrupt-map = <
60 /* IDSEL 0x0 */
61 0000 0 0 1 &mpic 40 1 0 0
62 0000 0 0 2 &mpic 1 1 0 0
63 0000 0 0 3 &mpic 2 1 0 0
64 0000 0 0 4 &mpic 3 1 0 0
65 >;
66 };
67};
68
69/* controller at 0x201000 */
70&pci1 {
71 compatible = "fsl,p5040-pcie", "fsl,qoriq-pcie-v2.4";
72 device_type = "pci";
73 #size-cells = <2>;
74 #address-cells = <3>;
75 bus-range = <0 0xff>;
76 clock-frequency = <33333333>;
77 interrupts = <16 2 1 14>;
78 pcie@0 {
79 reg = <0 0 0 0 0>;
80 #interrupt-cells = <1>;
81 #size-cells = <2>;
82 #address-cells = <3>;
83 device_type = "pci";
84 interrupts = <16 2 1 14>;
85 interrupt-map-mask = <0xf800 0 0 7>;
86 interrupt-map = <
87 /* IDSEL 0x0 */
88 0000 0 0 1 &mpic 41 1 0 0
89 0000 0 0 2 &mpic 5 1 0 0
90 0000 0 0 3 &mpic 6 1 0 0
91 0000 0 0 4 &mpic 7 1 0 0
92 >;
93 };
94};
95
96/* controller at 0x202000 */
97&pci2 {
98 compatible = "fsl,p5040-pcie", "fsl,qoriq-pcie-v2.4";
99 device_type = "pci";
100 #size-cells = <2>;
101 #address-cells = <3>;
102 bus-range = <0x0 0xff>;
103 clock-frequency = <33333333>;
104 interrupts = <16 2 1 13>;
105 pcie@0 {
106 reg = <0 0 0 0 0>;
107 #interrupt-cells = <1>;
108 #size-cells = <2>;
109 #address-cells = <3>;
110 device_type = "pci";
111 interrupts = <16 2 1 13>;
112 interrupt-map-mask = <0xf800 0 0 7>;
113 interrupt-map = <
114 /* IDSEL 0x0 */
115 0000 0 0 1 &mpic 42 1 0 0
116 0000 0 0 2 &mpic 9 1 0 0
117 0000 0 0 3 &mpic 10 1 0 0
118 0000 0 0 4 &mpic 11 1 0 0
119 >;
120 };
121};
122
123&dcsr {
124 #address-cells = <1>;
125 #size-cells = <1>;
126 compatible = "fsl,dcsr", "simple-bus";
127
128 dcsr-epu@0 {
129 compatible = "fsl,dcsr-epu";
130 interrupts = <52 2 0 0
131 84 2 0 0
132 85 2 0 0>;
133 reg = <0x0 0x1000>;
134 };
135 dcsr-npc {
136 compatible = "fsl,dcsr-npc";
137 reg = <0x1000 0x1000 0x1000000 0x8000>;
138 };
139 dcsr-nxc@2000 {
140 compatible = "fsl,dcsr-nxc";
141 reg = <0x2000 0x1000>;
142 };
143 dcsr-corenet {
144 compatible = "fsl,dcsr-corenet";
145 reg = <0x8000 0x1000 0xB0000 0x1000>;
146 };
147 dcsr-dpaa@9000 {
148 compatible = "fsl,p5040-dcsr-dpaa", "fsl,dcsr-dpaa";
149 reg = <0x9000 0x1000>;
150 };
151 dcsr-ocn@11000 {
152 compatible = "fsl,p5040-dcsr-ocn", "fsl,dcsr-ocn";
153 reg = <0x11000 0x1000>;
154 };
155 dcsr-ddr@12000 {
156 compatible = "fsl,dcsr-ddr";
157 dev-handle = <&ddr1>;
158 reg = <0x12000 0x1000>;
159 };
160 dcsr-ddr@13000 {
161 compatible = "fsl,dcsr-ddr";
162 dev-handle = <&ddr2>;
163 reg = <0x13000 0x1000>;
164 };
165 dcsr-nal@18000 {
166 compatible = "fsl,p5040-dcsr-nal", "fsl,dcsr-nal";
167 reg = <0x18000 0x1000>;
168 };
169 dcsr-rcpm@22000 {
170 compatible = "fsl,p5040-dcsr-rcpm", "fsl,dcsr-rcpm";
171 reg = <0x22000 0x1000>;
172 };
173 dcsr-cpu-sb-proxy@40000 {
174 compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
175 cpu-handle = <&cpu0>;
176 reg = <0x40000 0x1000>;
177 };
178 dcsr-cpu-sb-proxy@41000 {
179 compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
180 cpu-handle = <&cpu1>;
181 reg = <0x41000 0x1000>;
182 };
183 dcsr-cpu-sb-proxy@42000 {
184 compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
185 cpu-handle = <&cpu2>;
186 reg = <0x42000 0x1000>;
187 };
188 dcsr-cpu-sb-proxy@43000 {
189 compatible = "fsl,dcsr-e5500-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
190 cpu-handle = <&cpu3>;
191 reg = <0x43000 0x1000>;
192 };
193};
194
195&soc {
196 #address-cells = <1>;
197 #size-cells = <1>;
198 device_type = "soc";
199 compatible = "simple-bus";
200
201 soc-sram-error {
202 compatible = "fsl,soc-sram-error";
203 interrupts = <16 2 1 29>;
204 };
205
206 corenet-law@0 {
207 compatible = "fsl,corenet-law";
208 reg = <0x0 0x1000>;
209 fsl,num-laws = <32>;
210 };
211
212 ddr1: memory-controller@8000 {
213 compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
214 reg = <0x8000 0x1000>;
215 interrupts = <16 2 1 23>;
216 };
217
218 ddr2: memory-controller@9000 {
219 compatible = "fsl,qoriq-memory-controller-v4.5","fsl,qoriq-memory-controller";
220 reg = <0x9000 0x1000>;
221 interrupts = <16 2 1 22>;
222 };
223
224 cpc: l3-cache-controller@10000 {
225 compatible = "fsl,p5040-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache";
226 reg = <0x10000 0x1000
227 0x11000 0x1000>;
228 interrupts = <16 2 1 27
229 16 2 1 26>;
230 };
231
232 corenet-cf@18000 {
233 compatible = "fsl,corenet-cf";
234 reg = <0x18000 0x1000>;
235 interrupts = <16 2 1 31>;
236 fsl,ccf-num-csdids = <32>;
237 fsl,ccf-num-snoopids = <32>;
238 };
239
240 iommu@20000 {
241 compatible = "fsl,pamu-v1.0", "fsl,pamu";
242 reg = <0x20000 0x5000>;
243 interrupts = <
244 24 2 0 0
245 16 2 1 30>;
246 };
247
248/include/ "qoriq-mpic.dtsi"
249
250 guts: global-utilities@e0000 {
251 compatible = "fsl,p5040-device-config", "fsl,qoriq-device-config-1.0";
252 reg = <0xe0000 0xe00>;
253 fsl,has-rstcr;
254 #sleep-cells = <1>;
255 fsl,liodn-bits = <12>;
256 };
257
258 pins: global-utilities@e0e00 {
259 compatible = "fsl,p5040-pin-control", "fsl,qoriq-pin-control-1.0";
260 reg = <0xe0e00 0x200>;
261 #sleep-cells = <2>;
262 };
263
264 clockgen: global-utilities@e1000 {
265 compatible = "fsl,p5040-clockgen", "fsl,qoriq-clockgen-1.0";
266 reg = <0xe1000 0x1000>;
267 clock-frequency = <0>;
268 };
269
270 rcpm: global-utilities@e2000 {
271 compatible = "fsl,p5040-rcpm", "fsl,qoriq-rcpm-1.0";
272 reg = <0xe2000 0x1000>;
273 #sleep-cells = <1>;
274 };
275
276 sfp: sfp@e8000 {
277 compatible = "fsl,p5040-sfp", "fsl,qoriq-sfp-1.0";
278 reg = <0xe8000 0x1000>;
279 };
280
281 serdes: serdes@ea000 {
282 compatible = "fsl,p5040-serdes";
283 reg = <0xea000 0x1000>;
284 };
285
286/include/ "qoriq-dma-0.dtsi"
287/include/ "qoriq-dma-1.dtsi"
288/include/ "qoriq-espi-0.dtsi"
289 spi@110000 {
290 fsl,espi-num-chipselects = <4>;
291 };
292
293/include/ "qoriq-esdhc-0.dtsi"
294 sdhc@114000 {
295 sdhci,auto-cmd12;
296 };
297
298/include/ "qoriq-i2c-0.dtsi"
299/include/ "qoriq-i2c-1.dtsi"
300/include/ "qoriq-duart-0.dtsi"
301/include/ "qoriq-duart-1.dtsi"
302/include/ "qoriq-gpio-0.dtsi"
303/include/ "qoriq-usb2-mph-0.dtsi"
304 usb0: usb@210000 {
305 compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
306 phy_type = "utmi";
307 port0;
308 };
309
310/include/ "qoriq-usb2-dr-0.dtsi"
311 usb1: usb@211000 {
312 compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
313 dr_mode = "host";
314 phy_type = "utmi";
315 };
316
317/include/ "qoriq-sata2-0.dtsi"
318/include/ "qoriq-sata2-1.dtsi"
319/include/ "qoriq-sec5.2-0.dtsi"
320};
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi
new file mode 100644
index 000000000000..40ca943f5d1c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi
@@ -0,0 +1,114 @@
1/*
2 * P5040 Silicon/SoC Device Tree Source (pre include)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * This software is provided by Freescale Semiconductor "as is" and any
24 * express or implied warranties, including, but not limited to, the implied
25 * warranties of merchantability and fitness for a particular purpose are
26 * disclaimed. In no event shall Freescale Semiconductor be liable for any
27 * direct, indirect, incidental, special, exemplary, or consequential damages
28 * (including, but not limited to, procurement of substitute goods or services;
29 * loss of use, data, or profits; or business interruption) however caused and
30 * on any theory of liability, whether in contract, strict liability, or tort
31 * (including negligence or otherwise) arising in any way out of the use of this
32 * software, even if advised of the possibility of such damage.
33 */
34
35/dts-v1/;
36
37/include/ "e5500_power_isa.dtsi"
38
39/ {
40 compatible = "fsl,P5040";
41 #address-cells = <2>;
42 #size-cells = <2>;
43 interrupt-parent = <&mpic>;
44
45 aliases {
46 ccsr = &soc;
47 dcsr = &dcsr;
48
49 serial0 = &serial0;
50 serial1 = &serial1;
51 serial2 = &serial2;
52 serial3 = &serial3;
53 pci0 = &pci0;
54 pci1 = &pci1;
55 pci2 = &pci2;
56 usb0 = &usb0;
57 usb1 = &usb1;
58 dma0 = &dma0;
59 dma1 = &dma1;
60 sdhc = &sdhc;
61 msi0 = &msi0;
62 msi1 = &msi1;
63 msi2 = &msi2;
64
65 crypto = &crypto;
66 sec_jr0 = &sec_jr0;
67 sec_jr1 = &sec_jr1;
68 sec_jr2 = &sec_jr2;
69 sec_jr3 = &sec_jr3;
70 rtic_a = &rtic_a;
71 rtic_b = &rtic_b;
72 rtic_c = &rtic_c;
73 rtic_d = &rtic_d;
74 sec_mon = &sec_mon;
75 };
76
77 cpus {
78 #address-cells = <1>;
79 #size-cells = <0>;
80
81 cpu0: PowerPC,e5500@0 {
82 device_type = "cpu";
83 reg = <0>;
84 next-level-cache = <&L2_0>;
85 L2_0: l2-cache {
86 next-level-cache = <&cpc>;
87 };
88 };
89 cpu1: PowerPC,e5500@1 {
90 device_type = "cpu";
91 reg = <1>;
92 next-level-cache = <&L2_1>;
93 L2_1: l2-cache {
94 next-level-cache = <&cpc>;
95 };
96 };
97 cpu2: PowerPC,e5500@2 {
98 device_type = "cpu";
99 reg = <2>;
100 next-level-cache = <&L2_2>;
101 L2_2: l2-cache {
102 next-level-cache = <&cpc>;
103 };
104 };
105 cpu3: PowerPC,e5500@3 {
106 device_type = "cpu";
107 reg = <3>;
108 next-level-cache = <&L2_3>;
109 L2_3: l2-cache {
110 next-level-cache = <&cpc>;
111 };
112 };
113 };
114};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec5.2-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec5.2-0.dtsi
new file mode 100644
index 000000000000..7b2ab8a8c1f4
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec5.2-0.dtsi
@@ -0,0 +1,118 @@
1/*
2 * QorIQ Sec/Crypto 5.2 device tree stub [ controller @ offset 0x300000 ]
3 *
4 * Copyright 2011-2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35crypto: crypto@300000 {
36 compatible = "fsl,sec-v5.2", "fsl,sec-v5.0", "fsl,sec-v4.0";
37 #address-cells = <1>;
38 #size-cells = <1>;
39 reg = <0x300000 0x10000>;
40 ranges = <0 0x300000 0x10000>;
41 interrupts = <92 2 0 0>;
42
43 sec_jr0: jr@1000 {
44 compatible = "fsl,sec-v5.2-job-ring",
45 "fsl,sec-v5.0-job-ring",
46 "fsl,sec-v4.0-job-ring";
47 reg = <0x1000 0x1000>;
48 interrupts = <88 2 0 0>;
49 };
50
51 sec_jr1: jr@2000 {
52 compatible = "fsl,sec-v5.2-job-ring",
53 "fsl,sec-v5.0-job-ring",
54 "fsl,sec-v4.0-job-ring";
55 reg = <0x2000 0x1000>;
56 interrupts = <89 2 0 0>;
57 };
58
59 sec_jr2: jr@3000 {
60 compatible = "fsl,sec-v5.2-job-ring",
61 "fsl,sec-v5.0-job-ring",
62 "fsl,sec-v4.0-job-ring";
63 reg = <0x3000 0x1000>;
64 interrupts = <90 2 0 0>;
65 };
66
67 sec_jr3: jr@4000 {
68 compatible = "fsl,sec-v5.2-job-ring",
69 "fsl,sec-v5.0-job-ring",
70 "fsl,sec-v4.0-job-ring";
71 reg = <0x4000 0x1000>;
72 interrupts = <91 2 0 0>;
73 };
74
75 rtic@6000 {
76 compatible = "fsl,sec-v5.2-rtic",
77 "fsl,sec-v5.0-rtic",
78 "fsl,sec-v4.0-rtic";
79 #address-cells = <1>;
80 #size-cells = <1>;
81 reg = <0x6000 0x100>;
82 ranges = <0x0 0x6100 0xe00>;
83
84 rtic_a: rtic-a@0 {
85 compatible = "fsl,sec-v5.2-rtic-memory",
86 "fsl,sec-v5.0-rtic-memory",
87 "fsl,sec-v4.0-rtic-memory";
88 reg = <0x00 0x20 0x100 0x80>;
89 };
90
91 rtic_b: rtic-b@20 {
92 compatible = "fsl,sec-v5.2-rtic-memory",
93 "fsl,sec-v5.0-rtic-memory",
94 "fsl,sec-v4.0-rtic-memory";
95 reg = <0x20 0x20 0x200 0x80>;
96 };
97
98 rtic_c: rtic-c@40 {
99 compatible = "fsl,sec-v5.2-rtic-memory",
100 "fsl,sec-v5.0-rtic-memory",
101 "fsl,sec-v4.0-rtic-memory";
102 reg = <0x40 0x20 0x300 0x80>;
103 };
104
105 rtic_d: rtic-d@60 {
106 compatible = "fsl,sec-v5.2-rtic-memory",
107 "fsl,sec-v5.0-rtic-memory",
108 "fsl,sec-v4.0-rtic-memory";
109 reg = <0x60 0x20 0x500 0x80>;
110 };
111 };
112};
113
114sec_mon: sec_mon@314000 {
115 compatible = "fsl,sec-v5.2-mon", "fsl,sec-v5.0-mon", "fsl,sec-v4.0-mon";
116 reg = <0x314000 0x1000>;
117 interrupts = <93 2 0 0>;
118};
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dtsi b/arch/powerpc/boot/dts/mpc8536ds.dtsi
index d304a2d68c62..7c3dde84d193 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8536ds.dtsi
@@ -132,6 +132,10 @@
132 reg = <0x68>; 132 reg = <0x68>;
133 interrupts = <0 0x1 0 0>; 133 interrupts = <0 0x1 0 0>;
134 }; 134 };
135 adt7461@4c {
136 compatible = "adi,adt7461";
137 reg = <0x4c>;
138 };
135 }; 139 };
136 140
137 spi@7000 { 141 spi@7000 {
diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
index f99fb110c97f..2d31863accf5 100644
--- a/arch/powerpc/boot/dts/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/mpc8540ads.dts
@@ -11,6 +11,8 @@
11 11
12/dts-v1/; 12/dts-v1/;
13 13
14/include/ "fsl/e500v2_power_isa.dtsi"
15
14/ { 16/ {
15 model = "MPC8540ADS"; 17 model = "MPC8540ADS";
16 compatible = "MPC8540ADS", "MPC85xxADS"; 18 compatible = "MPC8540ADS", "MPC85xxADS";
diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
index 0f5e93912799..1c03c2667373 100644
--- a/arch/powerpc/boot/dts/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/mpc8541cds.dts
@@ -11,6 +11,8 @@
11 11
12/dts-v1/; 12/dts-v1/;
13 13
14/include/ "fsl/e500v2_power_isa.dtsi"
15
14/ { 16/ {
15 model = "MPC8541CDS"; 17 model = "MPC8541CDS";
16 compatible = "MPC8541CDS", "MPC85xxCDS"; 18 compatible = "MPC8541CDS", "MPC85xxCDS";
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts
index e934987e882b..ed38874c3a36 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dts
+++ b/arch/powerpc/boot/dts/mpc8544ds.dts
@@ -20,8 +20,10 @@
20 reg = <0 0 0 0>; // Filled by U-Boot 20 reg = <0 0 0 0>; // Filled by U-Boot
21 }; 21 };
22 22
23 lbc: localbus@e0005000 { 23 board_lbc: lbc: localbus@e0005000 {
24 reg = <0 0xe0005000 0 0x1000>; 24 reg = <0 0xe0005000 0 0x1000>;
25
26 ranges = <0x0 0x0 0x0 0xff800000 0x800000>;
25 }; 27 };
26 28
27 board_soc: soc: soc8544@e0000000 { 29 board_soc: soc: soc8544@e0000000 {
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dtsi b/arch/powerpc/boot/dts/mpc8544ds.dtsi
index 77ebc9f1d37c..b219d035d794 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8544ds.dtsi
@@ -32,6 +32,45 @@
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35&board_lbc {
36 nor@0,0 {
37 #address-cells = <1>;
38 #size-cells = <1>;
39 compatible = "cfi-flash";
40 reg = <0x0 0x0 0x800000>;
41 bank-width = <2>;
42 device-width = <1>;
43
44 partition@0 {
45 reg = <0x0 0x10000>;
46 label = "dtb-nor";
47 };
48
49 partition@20000 {
50 reg = <0x20000 0x30000>;
51 label = "diagnostic-nor";
52 read-only;
53 };
54
55 partition@200000 {
56 reg = <0x200000 0x200000>;
57 label = "dink-nor";
58 read-only;
59 };
60
61 partition@400000 {
62 reg = <0x400000 0x380000>;
63 label = "kernel-nor";
64 };
65
66 partition@780000 {
67 reg = <0x780000 0x80000>;
68 label = "u-boot-nor";
69 read-only;
70 };
71 };
72};
73
35&board_soc { 74&board_soc {
36 enet0: ethernet@24000 { 75 enet0: ethernet@24000 {
37 phy-handle = <&phy0>; 76 phy-handle = <&phy0>;
diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
index fe10438613d6..36a7ea138c2f 100644
--- a/arch/powerpc/boot/dts/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/mpc8555cds.dts
@@ -11,6 +11,8 @@
11 11
12/dts-v1/; 12/dts-v1/;
13 13
14/include/ "fsl/e500v2_power_isa.dtsi"
15
14/ { 16/ {
15 model = "MPC8555CDS"; 17 model = "MPC8555CDS";
16 compatible = "MPC8555CDS", "MPC85xxCDS"; 18 compatible = "MPC8555CDS", "MPC85xxCDS";
diff --git a/arch/powerpc/boot/dts/mpc8560ads.dts b/arch/powerpc/boot/dts/mpc8560ads.dts
index 6e85e1ba0851..1a43f5a968f5 100644
--- a/arch/powerpc/boot/dts/mpc8560ads.dts
+++ b/arch/powerpc/boot/dts/mpc8560ads.dts
@@ -11,6 +11,8 @@
11 11
12/dts-v1/; 12/dts-v1/;
13 13
14/include/ "fsl/e500v2_power_isa.dtsi"
15
14/ { 16/ {
15 model = "MPC8560ADS"; 17 model = "MPC8560ADS";
16 compatible = "MPC8560ADS", "MPC85xxADS"; 18 compatible = "MPC8560ADS", "MPC85xxADS";
diff --git a/arch/powerpc/boot/dts/o2d.dts b/arch/powerpc/boot/dts/o2d.dts
new file mode 100644
index 000000000000..9f6dd4d889b3
--- /dev/null
+++ b/arch/powerpc/boot/dts/o2d.dts
@@ -0,0 +1,47 @@
1/*
2 * O2D Device Tree Source
3 *
4 * Copyright (C) 2012 DENX Software Engineering
5 * Anatolij Gustschin <agust@denx.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13/include/ "o2d.dtsi"
14
15/ {
16 model = "ifm,o2d";
17 compatible = "ifm,o2d";
18
19 memory {
20 reg = <0x00000000 0x08000000>; // 128MB
21 };
22
23 localbus {
24 ranges = <0 0 0xfc000000 0x02000000
25 3 0 0xe3000000 0x00100000>;
26
27 flash@0,0 {
28 compatible = "cfi-flash";
29 reg = <0 0 0x02000000>;
30 bank-width = <2>;
31 device-width = <2>;
32 #size-cells = <1>;
33 #address-cells = <1>;
34
35 partition@60000 {
36 label = "kernel";
37 reg = <0x00060000 0x00260000>;
38 read-only;
39 };
40 /* o2d specific partitions */
41 partition@2c0000 {
42 label = "o2d user defined";
43 reg = <0x002c0000 0x01d40000>;
44 };
45 };
46 };
47};
diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi
new file mode 100644
index 000000000000..3444eb8f0ade
--- /dev/null
+++ b/arch/powerpc/boot/dts/o2d.dtsi
@@ -0,0 +1,139 @@
1/*
2 * O2D base Device Tree Source
3 *
4 * Copyright (C) 2012 DENX Software Engineering
5 * Anatolij Gustschin <agust@denx.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13/include/ "mpc5200b.dtsi"
14
15/ {
16 model = "ifm,o2d";
17 compatible = "ifm,o2d";
18
19 memory {
20 reg = <0x00000000 0x04000000>; // 64MB
21 };
22
23 soc5200@f0000000 {
24
25 gpio_simple: gpio@b00 {
26 };
27
28 timer@600 { // General Purpose Timer
29 #gpio-cells = <2>;
30 gpio-controller;
31 fsl,has-wdt;
32 fsl,wdt-on-boot = <0>;
33 };
34
35 timer@610 {
36 #gpio-cells = <2>;
37 gpio-controller;
38 };
39
40 timer7: timer@670 {
41 };
42
43 rtc@800 {
44 status = "disabled";
45 };
46
47 psc@2000 { // PSC1
48 compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi";
49 #address-cells = <1>;
50 #size-cells = <0>;
51 cell-index = <0>;
52
53 spidev@0 {
54 compatible = "spidev";
55 spi-max-frequency = <250000>;
56 reg = <0>;
57 };
58 };
59
60 psc@2200 { // PSC2
61 status = "disabled";
62 };
63
64 psc@2400 { // PSC3
65 status = "disabled";
66 };
67
68 psc@2600 { // PSC4
69 compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
70 };
71
72 psc@2800 { // PSC5
73 compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
74 };
75
76 psc@2c00 { // PSC6
77 status = "disabled";
78 };
79
80 ethernet@3000 {
81 phy-handle = <&phy0>;
82 };
83
84 mdio@3000 {
85 phy0: ethernet-phy@0 {
86 reg = <0>;
87 };
88 };
89
90 sclpc@3c00 {
91 compatible = "fsl,mpc5200-lpbfifo";
92 reg = <0x3c00 0x60>;
93 interrupts = <3 23 0>;
94 };
95 };
96
97 localbus {
98 ranges = <0 0 0xff000000 0x01000000
99 3 0 0xe3000000 0x00100000>;
100
101 // flash device at LocalPlus Bus CS0
102 flash@0,0 {
103 compatible = "cfi-flash";
104 reg = <0 0 0x01000000>;
105 bank-width = <1>;
106 device-width = <2>;
107 #size-cells = <1>;
108 #address-cells = <1>;
109 no-unaligned-direct-access;
110
111 /* common layout for all machines */
112 partition@0 {
113 label = "u-boot";
114 reg = <0x00000000 0x00040000>;
115 read-only;
116 };
117 partition@40000 {
118 label = "env";
119 reg = <0x00040000 0x00020000>;
120 read-only;
121 };
122 };
123
124 csi@3,0 {
125 compatible = "ifm,o2d-csi";
126 reg = <3 0 0x00100000>;
127 ifm,csi-clk-handle = <&timer7>;
128 gpios = <&gpio_simple 23 0 /* imag_capture */
129 &gpio_simple 26 0 /* imag_reset */
130 &gpio_simple 29 0>; /* imag_master_en */
131
132 interrupts = <1 1 2>; /* IRQ1, edge falling */
133
134 ifm,csi-addr-bus-width = <24>;
135 ifm,csi-data-bus-width = <8>;
136 ifm,csi-wait-cycles = <0>;
137 };
138 };
139};
diff --git a/arch/powerpc/boot/dts/o2d300.dts b/arch/powerpc/boot/dts/o2d300.dts
new file mode 100644
index 000000000000..29affe0f0da3
--- /dev/null
+++ b/arch/powerpc/boot/dts/o2d300.dts
@@ -0,0 +1,52 @@
1/*
2 * O2D300 Device Tree Source
3 *
4 * Copyright (C) 2012 DENX Software Engineering
5 * Anatolij Gustschin <agust@denx.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13/include/ "o2d.dtsi"
14
15/ {
16 model = "ifm,o2d300";
17 compatible = "ifm,o2d";
18
19 localbus {
20 ranges = <0 0 0xfc000000 0x02000000
21 3 0 0xe3000000 0x00100000>;
22 flash@0,0 {
23 compatible = "cfi-flash";
24 reg = <0 0 0x02000000>;
25 bank-width = <2>;
26 device-width = <2>;
27 #size-cells = <1>;
28 #address-cells = <1>;
29
30 partition@40000 {
31 label = "env_1";
32 reg = <0x00040000 0x00020000>;
33 read-only;
34 };
35 partition@60000 {
36 label = "env_2";
37 reg = <0x00060000 0x00020000>;
38 read-only;
39 };
40 partition@80000 {
41 label = "kernel";
42 reg = <0x00080000 0x00260000>;
43 read-only;
44 };
45 /* o2d300 specific partitions */
46 partition@2e0000 {
47 label = "o2d300 user defined";
48 reg = <0x002e0000 0x01d20000>;
49 };
50 };
51 };
52};
diff --git a/arch/powerpc/boot/dts/o2dnt2.dts b/arch/powerpc/boot/dts/o2dnt2.dts
new file mode 100644
index 000000000000..a0f5b97a4f06
--- /dev/null
+++ b/arch/powerpc/boot/dts/o2dnt2.dts
@@ -0,0 +1,48 @@
1/*
2 * O2DNT2 Device Tree Source
3 *
4 * Copyright (C) 2012 DENX Software Engineering
5 * Anatolij Gustschin <agust@denx.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13/include/ "o2d.dtsi"
14
15/ {
16 model = "ifm,o2dnt2";
17 compatible = "ifm,o2d";
18
19 memory {
20 reg = <0x00000000 0x08000000>; // 128MB
21 };
22
23 localbus {
24 ranges = <0 0 0xfc000000 0x02000000
25 3 0 0xe3000000 0x00100000>;
26
27 flash@0,0 {
28 compatible = "cfi-flash";
29 reg = <0 0 0x02000000>;
30 bank-width = <2>;
31 device-width = <2>;
32 #size-cells = <1>;
33 #address-cells = <1>;
34
35 partition@60000 {
36 label = "kernel";
37 reg = <0x00060000 0x00260000>;
38 read-only;
39 };
40
41 /* o2dnt2 specific partitions */
42 partition@2c0000 {
43 label = "o2dnt2 user defined";
44 reg = <0x002c0000 0x01d40000>;
45 };
46 };
47 };
48};
diff --git a/arch/powerpc/boot/dts/o2i.dts b/arch/powerpc/boot/dts/o2i.dts
new file mode 100644
index 000000000000..e3cc99d1360b
--- /dev/null
+++ b/arch/powerpc/boot/dts/o2i.dts
@@ -0,0 +1,33 @@
1/*
2 * O2I Device Tree Source
3 *
4 * Copyright (C) 2012 DENX Software Engineering
5 * Anatolij Gustschin <agust@denx.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13/include/ "o2d.dtsi"
14
15/ {
16 model = "ifm,o2i";
17 compatible = "ifm,o2d";
18
19 localbus {
20 flash@0,0 {
21 partition@60000 {
22 label = "kernel";
23 reg = <0x00060000 0x00260000>;
24 read-only;
25 };
26 /* o2i specific partitions */
27 partition@2c0000 {
28 label = "o2i user defined";
29 reg = <0x002c0000 0x00d40000>;
30 };
31 };
32 };
33};
diff --git a/arch/powerpc/boot/dts/o2mnt.dts b/arch/powerpc/boot/dts/o2mnt.dts
new file mode 100644
index 000000000000..d91859a9e940
--- /dev/null
+++ b/arch/powerpc/boot/dts/o2mnt.dts
@@ -0,0 +1,33 @@
1/*
2 * O2MNT Device Tree Source
3 *
4 * Copyright (C) 2012 DENX Software Engineering
5 * Anatolij Gustschin <agust@denx.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13/include/ "o2d.dtsi"
14
15/ {
16 model = "ifm,o2mnt";
17 compatible = "ifm,o2d";
18
19 localbus {
20 flash@0,0 {
21 partition@60000 {
22 label = "kernel";
23 reg = <0x00060000 0x00260000>;
24 read-only;
25 };
26 /* add o2mnt specific partitions */
27 partition@2c0000 {
28 label = "o2mnt user defined";
29 reg = <0x002c0000 0x00d40000>;
30 };
31 };
32 };
33};
diff --git a/arch/powerpc/boot/dts/o3dnt.dts b/arch/powerpc/boot/dts/o3dnt.dts
new file mode 100644
index 000000000000..acce49326491
--- /dev/null
+++ b/arch/powerpc/boot/dts/o3dnt.dts
@@ -0,0 +1,48 @@
1/*
2 * O3DNT Device Tree Source
3 *
4 * Copyright (C) 2012 DENX Software Engineering
5 * Anatolij Gustschin <agust@denx.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13/include/ "o2d.dtsi"
14
15/ {
16 model = "ifm,o3dnt";
17 compatible = "ifm,o2d";
18
19 memory {
20 reg = <0x00000000 0x04000000>; // 64MB
21 };
22
23 localbus {
24 ranges = <0 0 0xfc000000 0x01000000
25 3 0 0xe3000000 0x00100000>;
26
27 flash@0,0 {
28 compatible = "cfi-flash";
29 reg = <0 0 0x01000000>;
30 bank-width = <2>;
31 device-width = <2>;
32 #size-cells = <1>;
33 #address-cells = <1>;
34
35 partition@60000 {
36 label = "kernel";
37 reg = <0x00060000 0x00260000>;
38 read-only;
39 };
40
41 /* o3dnt specific partitions */
42 partition@2c0000 {
43 label = "o3dnt user defined";
44 reg = <0x002c0000 0x00d40000>;
45 };
46 };
47 };
48};
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
deleted file mode 100644
index 41b4585c5da8..000000000000
--- a/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * P1020 RDB Core0 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts file allows core0 to have memory, l2, i2c, spi, gpio, tdm, dma, usb,
7 * eth1, eth2, sdhc, crypto, global-util, message, pci0, pci1, msi.
8 *
9 * Please note to add "-b 0" for core0's dts compiling.
10 *
11 * Copyright 2011 Freescale Semiconductor Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18
19/include/ "p1020rdb.dts"
20
21/ {
22 model = "fsl,P1020RDB";
23 compatible = "fsl,P1020RDB", "fsl,MPC85XXRDB-CAMP";
24
25 aliases {
26 ethernet1 = &enet1;
27 ethernet2 = &enet2;
28 serial0 = &serial0;
29 pci0 = &pci0;
30 pci1 = &pci1;
31 };
32
33 cpus {
34 PowerPC,P1020@1 {
35 status = "disabled";
36 };
37 };
38
39 memory {
40 device_type = "memory";
41 };
42
43 localbus@ffe05000 {
44 status = "disabled";
45 };
46
47 soc@ffe00000 {
48 serial1: serial@4600 {
49 status = "disabled";
50 };
51
52 enet0: ethernet@b0000 {
53 status = "disabled";
54 };
55
56 mpic: pic@40000 {
57 protected-sources = <
58 42 29 30 34 /* serial1, enet0-queue-group0 */
59 17 18 24 45 /* enet0-queue-group1, crypto */
60 >;
61 };
62 };
63};
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
deleted file mode 100644
index 517453821884..000000000000
--- a/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
+++ /dev/null
@@ -1,141 +0,0 @@
1/*
2 * P1020 RDB Core1 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts allows core1 to have l2, eth0, crypto.
7 *
8 * Please note to add "-b 1" for core1's dts compiling.
9 *
10 * Copyright 2011 Freescale Semiconductor Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18/include/ "p1020rdb.dts"
19
20/ {
21 model = "fsl,P1020RDB";
22 compatible = "fsl,P1020RDB", "fsl,MPC85XXRDB-CAMP";
23
24 aliases {
25 ethernet0 = &enet0;
26 serial0 = &serial1;
27 };
28
29 cpus {
30 PowerPC,P1020@0 {
31 status = "disabled";
32 };
33 };
34
35 memory {
36 device_type = "memory";
37 };
38
39 localbus@ffe05000 {
40 status = "disabled";
41 };
42
43 soc@ffe00000 {
44 ecm-law@0 {
45 status = "disabled";
46 };
47
48 ecm@1000 {
49 status = "disabled";
50 };
51
52 memory-controller@2000 {
53 status = "disabled";
54 };
55
56 i2c@3000 {
57 status = "disabled";
58 };
59
60 i2c@3100 {
61 status = "disabled";
62 };
63
64 serial0: serial@4500 {
65 status = "disabled";
66 };
67
68 spi@7000 {
69 status = "disabled";
70 };
71
72 gpio: gpio-controller@f000 {
73 status = "disabled";
74 };
75
76 dma@21300 {
77 status = "disabled";
78 };
79
80 mdio@24000 {
81 status = "disabled";
82 };
83
84 mdio@25000 {
85 status = "disabled";
86 };
87
88 enet1: ethernet@b1000 {
89 status = "disabled";
90 };
91
92 enet2: ethernet@b2000 {
93 status = "disabled";
94 };
95
96 usb@22000 {
97 status = "disabled";
98 };
99
100 sdhci@2e000 {
101 status = "disabled";
102 };
103
104 mpic: pic@40000 {
105 protected-sources = <
106 16 /* ecm, mem, L2, pci0, pci1 */
107 43 42 59 /* i2c, serial0, spi */
108 47 63 62 /* gpio, tdm */
109 20 21 22 23 /* dma */
110 03 02 /* mdio */
111 35 36 40 /* enet1-queue-group0 */
112 51 52 67 /* enet1-queue-group1 */
113 31 32 33 /* enet2-queue-group0 */
114 25 26 27 /* enet2-queue-group1 */
115 28 72 58 /* usb, sdhci, crypto */
116 0xb0 0xb1 0xb2 /* message */
117 0xb3 0xb4 0xb5
118 0xb6 0xb7
119 0xe0 0xe1 0xe2 /* msi */
120 0xe3 0xe4 0xe5
121 0xe6 0xe7 /* sdhci, crypto , pci */
122 >;
123 };
124
125 msi@41600 {
126 status = "disabled";
127 };
128
129 global-utilities@e0000 { //global utilities block
130 status = "disabled";
131 };
132 };
133
134 pci0: pcie@ffe09000 {
135 status = "disabled";
136 };
137
138 pci1: pcie@ffe0a000 {
139 status = "disabled";
140 };
141};
diff --git a/arch/powerpc/boot/dts/p1022ds.dtsi b/arch/powerpc/boot/dts/p1022ds.dtsi
index c3344b04d8ff..873da350d01b 100644
--- a/arch/powerpc/boot/dts/p1022ds.dtsi
+++ b/arch/powerpc/boot/dts/p1022ds.dtsi
@@ -149,6 +149,10 @@
149 compatible = "dallas,ds1339"; 149 compatible = "dallas,ds1339";
150 reg = <0x68>; 150 reg = <0x68>;
151 }; 151 };
152 adt7461@4c {
153 compatible = "adi,adt7461";
154 reg = <0x4c>;
155 };
152 }; 156 };
153 157
154 spi@7000 { 158 spi@7000 {
diff --git a/arch/powerpc/boot/dts/p1022rdk.dts b/arch/powerpc/boot/dts/p1022rdk.dts
new file mode 100644
index 000000000000..51d82de223f3
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1022rdk.dts
@@ -0,0 +1,188 @@
1/*
2 * P1022 RDK 32-bit Physical Address Map Device Tree Source
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1022si-pre.dtsi"
36/ {
37 model = "fsl,P1022RDK";
38 compatible = "fsl,P1022RDK";
39
40 memory {
41 device_type = "memory";
42 };
43
44 board_lbc: lbc: localbus@ffe05000 {
45 /* The P1022 RDK does not have any localbus devices */
46 status = "disabled";
47 };
48
49 board_soc: soc: soc@ffe00000 {
50 ranges = <0x0 0x0 0xffe00000 0x100000>;
51
52 i2c@3100 {
53 wm8960:codec@1a {
54 compatible = "wlf,wm8960";
55 reg = <0x1a>;
56 /* MCLK source is a stand-alone oscillator */
57 clock-frequency = <12288000>;
58 };
59 rtc@68 {
60 compatible = "stm,m41t62";
61 reg = <0x68>;
62 };
63 adt7461@4c{
64 compatible = "adi,adt7461";
65 reg = <0x4c>;
66 };
67 zl6100@21{
68 compatible = "isil,zl6100";
69 reg = <0x21>;
70 };
71 zl6100@24{
72 compatible = "isil,zl6100";
73 reg = <0x24>;
74 };
75 zl6100@26{
76 compatible = "isil,zl6100";
77 reg = <0x26>;
78 };
79 zl6100@29{
80 compatible = "isil,zl6100";
81 reg = <0x29>;
82 };
83 };
84
85 spi@7000 {
86 flash@0 {
87 #address-cells = <1>;
88 #size-cells = <1>;
89 compatible = "spansion,m25p80";
90 reg = <0>;
91 spi-max-frequency = <1000000>;
92 partition@0 {
93 label = "full-spi-flash";
94 reg = <0x00000000 0x00100000>;
95 };
96 };
97 };
98
99 ssi@15000 {
100 fsl,mode = "i2s-slave";
101 codec-handle = <&wm8960>;
102 };
103
104 usb@22000 {
105 phy_type = "ulpi";
106 };
107
108 usb@23000 {
109 phy_type = "ulpi";
110 };
111
112 mdio@24000 {
113 phy0: ethernet-phy@0 {
114 interrupts = <3 1 0 0>;
115 reg = <0x1>;
116 };
117 phy1: ethernet-phy@1 {
118 interrupts = <9 1 0 0>;
119 reg = <0x2>;
120 };
121 };
122
123 mdio@25000 {
124 tbi0: tbi-phy@11 {
125 reg = <0x11>;
126 device_type = "tbi-phy";
127 };
128 };
129
130 ethernet@b0000 {
131 phy-handle = <&phy0>;
132 phy-connection-type = "rgmii-id";
133 };
134
135 ethernet@b1000 {
136 phy-handle = <&phy1>;
137 tbi-handle = <&tbi0>;
138 phy-connection-type = "sgmii";
139 };
140 };
141
142 pci0: pcie@ffe09000 {
143 ranges = <0x2000000 0x0 0xe0000000 0 0xa0000000 0x0 0x20000000
144 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
145 reg = <0x0 0xffe09000 0 0x1000>;
146 pcie@0 {
147 ranges = <0x2000000 0x0 0xe0000000
148 0x2000000 0x0 0xe0000000
149 0x0 0x20000000
150
151 0x1000000 0x0 0x0
152 0x1000000 0x0 0x0
153 0x0 0x100000>;
154 };
155 };
156
157 pci1: pcie@ffe0a000 {
158 ranges = <0x2000000 0x0 0xe0000000 0 0xc0000000 0x0 0x20000000
159 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
160 reg = <0 0xffe0a000 0 0x1000>;
161 pcie@0 {
162 ranges = <0x2000000 0x0 0xe0000000
163 0x2000000 0x0 0xe0000000
164 0x0 0x20000000
165
166 0x1000000 0x0 0x0
167 0x1000000 0x0 0x0
168 0x0 0x100000>;
169 };
170 };
171
172 pci2: pcie@ffe0b000 {
173 ranges = <0x2000000 0x0 0xe0000000 0 0x80000000 0x0 0x20000000
174 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
175 reg = <0 0xffe0b000 0 0x1000>;
176 pcie@0 {
177 ranges = <0x2000000 0x0 0xe0000000
178 0x2000000 0x0 0xe0000000
179 0x0 0x20000000
180
181 0x1000000 0x0 0x0
182 0x1000000 0x0 0x0
183 0x0 0x100000>;
184 };
185 };
186};
187
188/include/ "fsl/p1022si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
deleted file mode 100644
index 66aac864c4cc..000000000000
--- a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * P2020 RDB Core0 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts file allows core0 to have memory, l2, i2c, spi, gpio, dma1, usb,
7 * eth1, eth2, sdhc, crypto, global-util, pci0.
8 *
9 * Copyright 2009-2011 Freescale Semiconductor Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16
17/include/ "p2020rdb.dts"
18
19/ {
20 model = "fsl,P2020RDB";
21 compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
22
23 cpus {
24 PowerPC,P2020@1 {
25 status = "disabled";
26 };
27 };
28
29 localbus@ffe05000 {
30 status = "disabled";
31 };
32
33 soc@ffe00000 {
34 serial1: serial@4600 {
35 status = "disabled";
36 };
37
38 dma@c300 {
39 status = "disabled";
40 };
41
42 enet0: ethernet@24000 {
43 status = "disabled";
44 };
45
46 mpic: pic@40000 {
47 protected-sources = <
48 42 76 77 78 79 /* serial1 , dma2 */
49 29 30 34 26 /* enet0, pci1 */
50 0xe0 0xe1 0xe2 0xe3 /* msi */
51 0xe4 0xe5 0xe6 0xe7
52 >;
53 };
54
55 msi@41600 {
56 status = "disabled";
57 };
58 };
59
60 pci0: pcie@ffe08000 {
61 status = "disabled";
62 };
63
64 pci2: pcie@ffe0a000 {
65 status = "disabled";
66 };
67};
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
deleted file mode 100644
index 9bd8ef493dd2..000000000000
--- a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 * P2020 RDB Core1 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts allows core1 to have l2, dma2, eth0, pci1, msi.
7 *
8 * Please note to add "-b 1" for core1's dts compiling.
9 *
10 * Copyright 2009-2011 Freescale Semiconductor Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18/include/ "p2020rdb.dts"
19
20/ {
21 model = "fsl,P2020RDB";
22 compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
23
24 cpus {
25 PowerPC,P2020@0 {
26 status = "disabled";
27 };
28 };
29
30 localbus@ffe05000 {
31 status = "disabled";
32 };
33
34 soc@ffe00000 {
35 ecm-law@0 {
36 status = "disabled";
37 };
38
39 ecm@1000 {
40 status = "disabled";
41 };
42
43 memory-controller@2000 {
44 status = "disabled";
45 };
46
47 i2c@3000 {
48 status = "disabled";
49 };
50
51 i2c@3100 {
52 status = "disabled";
53 };
54
55 serial0: serial@4500 {
56 status = "disabled";
57 };
58
59 spi@7000 {
60 status = "disabled";
61 };
62
63 gpio: gpio-controller@f000 {
64 status = "disabled";
65 };
66
67 dma@21300 {
68 status = "disabled";
69 };
70
71 usb@22000 {
72 status = "disabled";
73 };
74
75 mdio@24520 {
76 status = "disabled";
77 };
78
79 mdio@25520 {
80 status = "disabled";
81 };
82
83 mdio@26520 {
84 status = "disabled";
85 };
86
87 enet1: ethernet@25000 {
88 status = "disabled";
89 };
90
91 enet2: ethernet@26000 {
92 status = "disabled";
93 };
94
95 sdhci@2e000 {
96 status = "disabled";
97 };
98
99 crypto@30000 {
100 status = "disabled";
101 };
102
103 mpic: pic@40000 {
104 protected-sources = <
105 17 18 43 42 59 47 /*ecm, mem, i2c, serial0, spi,gpio */
106 16 20 21 22 23 28 /* L2, dma1, USB */
107 03 35 36 40 31 32 33 /* mdio, enet1, enet2 */
108 72 45 58 25 /* sdhci, crypto , pci */
109 >;
110 };
111
112 global-utilities@e0000 { //global utilities block
113 status = "disabled";
114 };
115
116 };
117
118 pci0: pcie@ffe08000 {
119 status = "disabled";
120 };
121
122 pci1: pcie@ffe09000 {
123 status = "disabled";
124 };
125};
diff --git a/arch/powerpc/boot/dts/p2041rdb.dts b/arch/powerpc/boot/dts/p2041rdb.dts
index baab0347dab0..d97ad74c7279 100644
--- a/arch/powerpc/boot/dts/p2041rdb.dts
+++ b/arch/powerpc/boot/dts/p2041rdb.dts
@@ -94,6 +94,10 @@
94 compatible = "pericom,pt7c4338"; 94 compatible = "pericom,pt7c4338";
95 reg = <0x68>; 95 reg = <0x68>;
96 }; 96 };
97 adt7461@4c {
98 compatible = "adi,adt7461";
99 reg = <0x4c>;
100 };
97 }; 101 };
98 102
99 i2c@118100 { 103 i2c@118100 {
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index 6cdcadc80c30..2fed3bc0b990 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -98,6 +98,10 @@
98 reg = <0x68>; 98 reg = <0x68>;
99 interrupts = <0x1 0x1 0 0>; 99 interrupts = <0x1 0x1 0 0>;
100 }; 100 };
101 adt7461@4c {
102 compatible = "adi,adt7461";
103 reg = <0x4c>;
104 };
101 }; 105 };
102 }; 106 };
103 107
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index 3e204609d02e..1cf6148b8b05 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -96,6 +96,10 @@
96 reg = <0x68>; 96 reg = <0x68>;
97 interrupts = <0x1 0x1 0 0>; 97 interrupts = <0x1 0x1 0 0>;
98 }; 98 };
99 adt7461@4c {
100 compatible = "adi,adt7461";
101 reg = <0x4c>;
102 };
99 }; 103 };
100 104
101 usb0: usb@210000 { 105 usb0: usb@210000 {
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
index 27c07ed6adc1..2869fea717dd 100644
--- a/arch/powerpc/boot/dts/p5020ds.dts
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -98,6 +98,10 @@
98 reg = <0x68>; 98 reg = <0x68>;
99 interrupts = <0x1 0x1 0 0>; 99 interrupts = <0x1 0x1 0 0>;
100 }; 100 };
101 adt7461@4c {
102 compatible = "adi,adt7461";
103 reg = <0x4c>;
104 };
101 }; 105 };
102 }; 106 };
103 107
diff --git a/arch/powerpc/boot/dts/p5040ds.dts b/arch/powerpc/boot/dts/p5040ds.dts
new file mode 100644
index 000000000000..860b5ccf76c0
--- /dev/null
+++ b/arch/powerpc/boot/dts/p5040ds.dts
@@ -0,0 +1,207 @@
1/*
2 * P5040DS Device Tree Source
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * This software is provided by Freescale Semiconductor "as is" and any
24 * express or implied warranties, including, but not limited to, the implied
25 * warranties of merchantability and fitness for a particular purpose are
26 * disclaimed. In no event shall Freescale Semiconductor be liable for any
27 * direct, indirect, incidental, special, exemplary, or consequential damages
28 * (including, but not limited to, procurement of substitute goods or services;
29 * loss of use, data, or profits; or business interruption) however caused and
30 * on any theory of liability, whether in contract, strict liability, or tort
31 * (including negligence or otherwise) arising in any way out of the use of this
32 * software, even if advised of the possibility of such damage.
33 */
34
35/include/ "fsl/p5040si-pre.dtsi"
36
37/ {
38 model = "fsl,P5040DS";
39 compatible = "fsl,P5040DS";
40 #address-cells = <2>;
41 #size-cells = <2>;
42 interrupt-parent = <&mpic>;
43
44 memory {
45 device_type = "memory";
46 };
47
48 dcsr: dcsr@f00000000 {
49 ranges = <0x00000000 0xf 0x00000000 0x01008000>;
50 };
51
52 soc: soc@ffe000000 {
53 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
54 reg = <0xf 0xfe000000 0 0x00001000>;
55 spi@110000 {
56 flash@0 {
57 #address-cells = <1>;
58 #size-cells = <1>;
59 compatible = "spansion,s25sl12801";
60 reg = <0>;
61 spi-max-frequency = <40000000>; /* input clock */
62 partition@u-boot {
63 label = "u-boot";
64 reg = <0x00000000 0x00100000>;
65 };
66 partition@kernel {
67 label = "kernel";
68 reg = <0x00100000 0x00500000>;
69 };
70 partition@dtb {
71 label = "dtb";
72 reg = <0x00600000 0x00100000>;
73 };
74 partition@fs {
75 label = "file system";
76 reg = <0x00700000 0x00900000>;
77 };
78 };
79 };
80
81 i2c@118100 {
82 eeprom@51 {
83 compatible = "at24,24c256";
84 reg = <0x51>;
85 };
86 eeprom@52 {
87 compatible = "at24,24c256";
88 reg = <0x52>;
89 };
90 };
91
92 i2c@119100 {
93 rtc@68 {
94 compatible = "dallas,ds3232";
95 reg = <0x68>;
96 interrupts = <0x1 0x1 0 0>;
97 };
98 adt7461@4c {
99 compatible = "adi,adt7461";
100 reg = <0x4c>;
101 };
102 };
103 };
104
105 lbc: localbus@ffe124000 {
106 reg = <0xf 0xfe124000 0 0x1000>;
107 ranges = <0 0 0xf 0xe8000000 0x08000000
108 2 0 0xf 0xffa00000 0x00040000
109 3 0 0xf 0xffdf0000 0x00008000>;
110
111 flash@0,0 {
112 compatible = "cfi-flash";
113 reg = <0 0 0x08000000>;
114 bank-width = <2>;
115 device-width = <2>;
116 };
117
118 nand@2,0 {
119 #address-cells = <1>;
120 #size-cells = <1>;
121 compatible = "fsl,elbc-fcm-nand";
122 reg = <0x2 0x0 0x40000>;
123
124 partition@0 {
125 label = "NAND U-Boot Image";
126 reg = <0x0 0x02000000>;
127 };
128
129 partition@2000000 {
130 label = "NAND Root File System";
131 reg = <0x02000000 0x10000000>;
132 };
133
134 partition@12000000 {
135 label = "NAND Compressed RFS Image";
136 reg = <0x12000000 0x08000000>;
137 };
138
139 partition@1a000000 {
140 label = "NAND Linux Kernel Image";
141 reg = <0x1a000000 0x04000000>;
142 };
143
144 partition@1e000000 {
145 label = "NAND DTB Image";
146 reg = <0x1e000000 0x01000000>;
147 };
148
149 partition@1f000000 {
150 label = "NAND Writable User area";
151 reg = <0x1f000000 0x01000000>;
152 };
153 };
154
155 board-control@3,0 {
156 compatible = "fsl,p5040ds-fpga", "fsl,fpga-ngpixis";
157 reg = <3 0 0x40>;
158 };
159 };
160
161 pci0: pcie@ffe200000 {
162 reg = <0xf 0xfe200000 0 0x1000>;
163 ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
164 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
165 pcie@0 {
166 ranges = <0x02000000 0 0xe0000000
167 0x02000000 0 0xe0000000
168 0 0x20000000
169
170 0x01000000 0 0x00000000
171 0x01000000 0 0x00000000
172 0 0x00010000>;
173 };
174 };
175
176 pci1: pcie@ffe201000 {
177 reg = <0xf 0xfe201000 0 0x1000>;
178 ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
179 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
180 pcie@0 {
181 ranges = <0x02000000 0 0xe0000000
182 0x02000000 0 0xe0000000
183 0 0x20000000
184
185 0x01000000 0 0x00000000
186 0x01000000 0 0x00000000
187 0 0x00010000>;
188 };
189 };
190
191 pci2: pcie@ffe202000 {
192 reg = <0xf 0xfe202000 0 0x1000>;
193 ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
194 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
195 pcie@0 {
196 ranges = <0x02000000 0 0xe0000000
197 0x02000000 0 0xe0000000
198 0 0x20000000
199
200 0x01000000 0 0x00000000
201 0x01000000 0 0x00000000
202 0 0x00010000>;
203 };
204 };
205};
206
207/include/ "fsl/p5040si-post.dtsi"
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
index 26e541c4662b..b80bcc69d1f7 100644
--- a/arch/powerpc/configs/85xx/p1023rds_defconfig
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -112,6 +112,12 @@ CONFIG_SND=y
112CONFIG_SND_MIXER_OSS=y 112CONFIG_SND_MIXER_OSS=y
113CONFIG_SND_PCM_OSS=y 113CONFIG_SND_PCM_OSS=y
114# CONFIG_SND_SUPPORT_OLD_API is not set 114# CONFIG_SND_SUPPORT_OLD_API is not set
115CONFIG_USB=y
116CONFIG_USB_DEVICEFS=y
117CONFIG_USB_MON=y
118CONFIG_USB_EHCI_HCD=y
119CONFIG_USB_EHCI_FSL=y
120CONFIG_USB_STORAGE=y
115CONFIG_EDAC=y 121CONFIG_EDAC=y
116CONFIG_EDAC_MM_EDAC=y 122CONFIG_EDAC_MM_EDAC=y
117CONFIG_RTC_CLASS=y 123CONFIG_RTC_CLASS=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 8b3d57c1ebe8..1c0f2432ecdb 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -27,6 +27,7 @@ CONFIG_P2041_RDB=y
27CONFIG_P3041_DS=y 27CONFIG_P3041_DS=y
28CONFIG_P4080_DS=y 28CONFIG_P4080_DS=y
29CONFIG_P5020_DS=y 29CONFIG_P5020_DS=y
30CONFIG_P5040_DS=y
30CONFIG_HIGHMEM=y 31CONFIG_HIGHMEM=y
31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 32# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
32CONFIG_BINFMT_MISC=m 33CONFIG_BINFMT_MISC=m
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 0516e22ca3de..88fa5c46f66f 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -23,6 +23,7 @@ CONFIG_MODVERSIONS=y
23CONFIG_PARTITION_ADVANCED=y 23CONFIG_PARTITION_ADVANCED=y
24CONFIG_MAC_PARTITION=y 24CONFIG_MAC_PARTITION=y
25CONFIG_P5020_DS=y 25CONFIG_P5020_DS=y
26CONFIG_P5040_DS=y
26# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set 27# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
27CONFIG_BINFMT_MISC=m 28CONFIG_BINFMT_MISC=m
28CONFIG_IRQ_ALL_CPUS=y 29CONFIG_IRQ_ALL_CPUS=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 8b5bda27d248..cf815e847cdc 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -30,6 +30,7 @@ CONFIG_MPC85xx_DS=y
30CONFIG_MPC85xx_RDB=y 30CONFIG_MPC85xx_RDB=y
31CONFIG_P1010_RDB=y 31CONFIG_P1010_RDB=y
32CONFIG_P1022_DS=y 32CONFIG_P1022_DS=y
33CONFIG_P1022_RDK=y
33CONFIG_P1023_RDS=y 34CONFIG_P1023_RDS=y
34CONFIG_SOCRATES=y 35CONFIG_SOCRATES=y
35CONFIG_KSI8560=y 36CONFIG_KSI8560=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index b0974e7e98ae..502cd9e027e4 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -32,6 +32,7 @@ CONFIG_MPC85xx_DS=y
32CONFIG_MPC85xx_RDB=y 32CONFIG_MPC85xx_RDB=y
33CONFIG_P1010_RDB=y 33CONFIG_P1010_RDB=y
34CONFIG_P1022_DS=y 34CONFIG_P1022_DS=y
35CONFIG_P1022_RDK=y
35CONFIG_P1023_RDS=y 36CONFIG_P1023_RDS=y
36CONFIG_SOCRATES=y 37CONFIG_SOCRATES=y
37CONFIG_KSI8560=y 38CONFIG_KSI8560=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index de7c4c53f5cf..6d03530b7506 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -51,6 +51,7 @@ CONFIG_KEXEC=y
51CONFIG_IRQ_ALL_CPUS=y 51CONFIG_IRQ_ALL_CPUS=y
52CONFIG_MEMORY_HOTREMOVE=y 52CONFIG_MEMORY_HOTREMOVE=y
53CONFIG_SCHED_SMT=y 53CONFIG_SCHED_SMT=y
54CONFIG_PPC_DENORMALISATION=y
54CONFIG_PCCARD=y 55CONFIG_PCCARD=y
55CONFIG_ELECTRA_CF=y 56CONFIG_ELECTRA_CF=y
56CONFIG_HOTPLUG_PCI=m 57CONFIG_HOTPLUG_PCI=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 9f4a9368f51b..1f710a32ffae 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -48,6 +48,7 @@ CONFIG_MEMORY_HOTREMOVE=y
48CONFIG_PPC_64K_PAGES=y 48CONFIG_PPC_64K_PAGES=y
49CONFIG_PPC_SUBPAGE_PROT=y 49CONFIG_PPC_SUBPAGE_PROT=y
50CONFIG_SCHED_SMT=y 50CONFIG_SCHED_SMT=y
51CONFIG_PPC_DENORMALISATION=y
51CONFIG_HOTPLUG_PCI=m 52CONFIG_HOTPLUG_PCI=m
52CONFIG_HOTPLUG_PCI_RPA=m 53CONFIG_HOTPLUG_PCI_RPA=m
53CONFIG_HOTPLUG_PCI_RPA_DLPAR=m 54CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
diff --git a/arch/powerpc/include/asm/abs_addr.h b/arch/powerpc/include/asm/abs_addr.h
deleted file mode 100644
index 9d92ba04b033..000000000000
--- a/arch/powerpc/include/asm/abs_addr.h
+++ /dev/null
@@ -1,56 +0,0 @@
1#ifndef _ASM_POWERPC_ABS_ADDR_H
2#define _ASM_POWERPC_ABS_ADDR_H
3#ifdef __KERNEL__
4
5
6/*
7 * c 2001 PPC 64 Team, IBM Corp
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/memblock.h>
16
17#include <asm/types.h>
18#include <asm/page.h>
19#include <asm/prom.h>
20
21struct mschunks_map {
22 unsigned long num_chunks;
23 unsigned long chunk_size;
24 unsigned long chunk_shift;
25 unsigned long chunk_mask;
26 u32 *mapping;
27};
28
29extern struct mschunks_map mschunks_map;
30
31/* Chunks are 256 KB */
32#define MSCHUNKS_CHUNK_SHIFT (18)
33#define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT)
34#define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1)
35
36static inline unsigned long chunk_to_addr(unsigned long chunk)
37{
38 return chunk << MSCHUNKS_CHUNK_SHIFT;
39}
40
41static inline unsigned long addr_to_chunk(unsigned long addr)
42{
43 return addr >> MSCHUNKS_CHUNK_SHIFT;
44}
45
46static inline unsigned long phys_to_abs(unsigned long pa)
47{
48 return pa;
49}
50
51/* Convenience macros */
52#define virt_to_abs(va) phys_to_abs(__pa(va))
53#define abs_to_virt(aa) __va(aa)
54
55#endif /* __KERNEL__ */
56#endif /* _ASM_POWERPC_ABS_ADDR_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index ab9e402518e8..b843e35122e8 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -30,6 +30,8 @@ extern void flush_dcache_page(struct page *page);
30#define flush_dcache_mmap_lock(mapping) do { } while (0) 30#define flush_dcache_mmap_lock(mapping) do { } while (0)
31#define flush_dcache_mmap_unlock(mapping) do { } while (0) 31#define flush_dcache_mmap_unlock(mapping) do { } while (0)
32 32
33extern void __flush_disable_L1(void);
34
33extern void __flush_icache_range(unsigned long, unsigned long); 35extern void __flush_icache_range(unsigned long, unsigned long);
34static inline void flush_icache_range(unsigned long start, unsigned long stop) 36static inline void flush_icache_range(unsigned long start, unsigned long stop)
35{ 37{
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index 716d2f089eb6..32de2577bb6d 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -44,7 +44,7 @@ static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
44static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } 44static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
45#endif 45#endif
46 46
47extern int set_dabr(unsigned long dabr); 47extern int set_dabr(unsigned long dabr, unsigned long dabrx);
48#ifdef CONFIG_PPC_ADV_DEBUG_REGS 48#ifdef CONFIG_PPC_ADV_DEBUG_REGS
49extern void do_send_trap(struct pt_regs *regs, unsigned long address, 49extern void do_send_trap(struct pt_regs *regs, unsigned long address,
50 unsigned long error_code, int signal_code, int brkpt); 50 unsigned long error_code, int signal_code, int brkpt);
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index d60f99814ffb..b0ef73882b38 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -32,27 +32,62 @@ struct device_node;
32#ifdef CONFIG_EEH 32#ifdef CONFIG_EEH
33 33
34/* 34/*
35 * The struct is used to trace PE related EEH functionality.
36 * In theory, there will have one instance of the struct to
37 * be created against particular PE. In nature, PEs corelate
38 * to each other. the struct has to reflect that hierarchy in
39 * order to easily pick up those affected PEs when one particular
40 * PE has EEH errors.
41 *
42 * Also, one particular PE might be composed of PCI device, PCI
43 * bus and its subordinate components. The struct also need ship
44 * the information. Further more, one particular PE is only meaingful
45 * in the corresponding PHB. Therefore, the root PEs should be created
46 * against existing PHBs in on-to-one fashion.
47 */
48#define EEH_PE_INVALID (1 << 0) /* Invalid */
49#define EEH_PE_PHB (1 << 1) /* PHB PE */
50#define EEH_PE_DEVICE (1 << 2) /* Device PE */
51#define EEH_PE_BUS (1 << 3) /* Bus PE */
52
53#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
54#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
55
56struct eeh_pe {
57 int type; /* PE type: PHB/Bus/Device */
58 int state; /* PE EEH dependent mode */
59 int config_addr; /* Traditional PCI address */
60 int addr; /* PE configuration address */
61 struct pci_controller *phb; /* Associated PHB */
62 int check_count; /* Times of ignored error */
63 int freeze_count; /* Times of froze up */
64 int false_positives; /* Times of reported #ff's */
65 struct eeh_pe *parent; /* Parent PE */
66 struct list_head child_list; /* Link PE to the child list */
67 struct list_head edevs; /* Link list of EEH devices */
68 struct list_head child; /* Child PEs */
69};
70
71#define eeh_pe_for_each_dev(pe, edev) \
72 list_for_each_entry(edev, &pe->edevs, list)
73
74/*
35 * The struct is used to trace EEH state for the associated 75 * The struct is used to trace EEH state for the associated
36 * PCI device node or PCI device. In future, it might 76 * PCI device node or PCI device. In future, it might
37 * represent PE as well so that the EEH device to form 77 * represent PE as well so that the EEH device to form
38 * another tree except the currently existing tree of PCI 78 * another tree except the currently existing tree of PCI
39 * buses and PCI devices 79 * buses and PCI devices
40 */ 80 */
41#define EEH_MODE_SUPPORTED (1<<0) /* EEH supported on the device */ 81#define EEH_DEV_IRQ_DISABLED (1<<0) /* Interrupt disabled */
42#define EEH_MODE_NOCHECK (1<<1) /* EEH check should be skipped */
43#define EEH_MODE_ISOLATED (1<<2) /* The device has been isolated */
44#define EEH_MODE_RECOVERING (1<<3) /* Recovering the device */
45#define EEH_MODE_IRQ_DISABLED (1<<4) /* Interrupt disabled */
46 82
47struct eeh_dev { 83struct eeh_dev {
48 int mode; /* EEH mode */ 84 int mode; /* EEH mode */
49 int class_code; /* Class code of the device */ 85 int class_code; /* Class code of the device */
50 int config_addr; /* Config address */ 86 int config_addr; /* Config address */
51 int pe_config_addr; /* PE config address */ 87 int pe_config_addr; /* PE config address */
52 int check_count; /* Times of ignored error */
53 int freeze_count; /* Times of froze up */
54 int false_positives; /* Times of reported #ff's */
55 u32 config_space[16]; /* Saved PCI config space */ 88 u32 config_space[16]; /* Saved PCI config space */
89 struct eeh_pe *pe; /* Associated PE */
90 struct list_head list; /* Form link list in the PE */
56 struct pci_controller *phb; /* Associated PHB */ 91 struct pci_controller *phb; /* Associated PHB */
57 struct device_node *dn; /* Associated device node */ 92 struct device_node *dn; /* Associated device node */
58 struct pci_dev *pdev; /* Associated PCI device */ 93 struct pci_dev *pdev; /* Associated PCI device */
@@ -95,19 +130,51 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
95struct eeh_ops { 130struct eeh_ops {
96 char *name; 131 char *name;
97 int (*init)(void); 132 int (*init)(void);
98 int (*set_option)(struct device_node *dn, int option); 133 void* (*of_probe)(struct device_node *dn, void *flag);
99 int (*get_pe_addr)(struct device_node *dn); 134 void* (*dev_probe)(struct pci_dev *dev, void *flag);
100 int (*get_state)(struct device_node *dn, int *state); 135 int (*set_option)(struct eeh_pe *pe, int option);
101 int (*reset)(struct device_node *dn, int option); 136 int (*get_pe_addr)(struct eeh_pe *pe);
102 int (*wait_state)(struct device_node *dn, int max_wait); 137 int (*get_state)(struct eeh_pe *pe, int *state);
103 int (*get_log)(struct device_node *dn, int severity, char *drv_log, unsigned long len); 138 int (*reset)(struct eeh_pe *pe, int option);
104 int (*configure_bridge)(struct device_node *dn); 139 int (*wait_state)(struct eeh_pe *pe, int max_wait);
140 int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len);
141 int (*configure_bridge)(struct eeh_pe *pe);
105 int (*read_config)(struct device_node *dn, int where, int size, u32 *val); 142 int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
106 int (*write_config)(struct device_node *dn, int where, int size, u32 val); 143 int (*write_config)(struct device_node *dn, int where, int size, u32 val);
107}; 144};
108 145
109extern struct eeh_ops *eeh_ops; 146extern struct eeh_ops *eeh_ops;
110extern int eeh_subsystem_enabled; 147extern int eeh_subsystem_enabled;
148extern struct mutex eeh_mutex;
149extern int eeh_probe_mode;
150
151#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */
152#define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */
153
154static inline void eeh_probe_mode_set(int flag)
155{
156 eeh_probe_mode = flag;
157}
158
159static inline int eeh_probe_mode_devtree(void)
160{
161 return (eeh_probe_mode == EEH_PROBE_MODE_DEVTREE);
162}
163
164static inline int eeh_probe_mode_dev(void)
165{
166 return (eeh_probe_mode == EEH_PROBE_MODE_DEV);
167}
168
169static inline void eeh_lock(void)
170{
171 mutex_lock(&eeh_mutex);
172}
173
174static inline void eeh_unlock(void)
175{
176 mutex_unlock(&eeh_mutex);
177}
111 178
112/* 179/*
113 * Max number of EEH freezes allowed before we consider the device 180 * Max number of EEH freezes allowed before we consider the device
@@ -115,22 +182,26 @@ extern int eeh_subsystem_enabled;
115 */ 182 */
116#define EEH_MAX_ALLOWED_FREEZES 5 183#define EEH_MAX_ALLOWED_FREEZES 5
117 184
185typedef void *(*eeh_traverse_func)(void *data, void *flag);
186int __devinit eeh_phb_pe_create(struct pci_controller *phb);
187int eeh_add_to_parent_pe(struct eeh_dev *edev);
188int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe);
189void *eeh_pe_dev_traverse(struct eeh_pe *root,
190 eeh_traverse_func fn, void *flag);
191void eeh_pe_restore_bars(struct eeh_pe *pe);
192struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
193
118void * __devinit eeh_dev_init(struct device_node *dn, void *data); 194void * __devinit eeh_dev_init(struct device_node *dn, void *data);
119void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb); 195void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb);
120void __init eeh_dev_phb_init(void);
121void __init eeh_init(void);
122#ifdef CONFIG_PPC_PSERIES
123int __init eeh_pseries_init(void);
124#endif
125int __init eeh_ops_register(struct eeh_ops *ops); 196int __init eeh_ops_register(struct eeh_ops *ops);
126int __exit eeh_ops_unregister(const char *name); 197int __exit eeh_ops_unregister(const char *name);
127unsigned long eeh_check_failure(const volatile void __iomem *token, 198unsigned long eeh_check_failure(const volatile void __iomem *token,
128 unsigned long val); 199 unsigned long val);
129int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev); 200int eeh_dev_check_failure(struct eeh_dev *edev);
130void __init pci_addr_cache_build(void); 201void __init eeh_addr_cache_build(void);
131void eeh_add_device_tree_early(struct device_node *); 202void eeh_add_device_tree_early(struct device_node *);
132void eeh_add_device_tree_late(struct pci_bus *); 203void eeh_add_device_tree_late(struct pci_bus *);
133void eeh_remove_bus_device(struct pci_dev *); 204void eeh_remove_bus_device(struct pci_dev *, int);
134 205
135/** 206/**
136 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. 207 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
@@ -156,34 +227,24 @@ static inline void *eeh_dev_init(struct device_node *dn, void *data)
156 227
157static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { } 228static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
158 229
159static inline void eeh_dev_phb_init(void) { }
160
161static inline void eeh_init(void) { }
162
163#ifdef CONFIG_PPC_PSERIES
164static inline int eeh_pseries_init(void)
165{
166 return 0;
167}
168#endif /* CONFIG_PPC_PSERIES */
169
170static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) 230static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
171{ 231{
172 return val; 232 return val;
173} 233}
174 234
175static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) 235#define eeh_dev_check_failure(x) (0)
176{
177 return 0;
178}
179 236
180static inline void pci_addr_cache_build(void) { } 237static inline void eeh_addr_cache_build(void) { }
181 238
182static inline void eeh_add_device_tree_early(struct device_node *dn) { } 239static inline void eeh_add_device_tree_early(struct device_node *dn) { }
183 240
184static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } 241static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
185 242
186static inline void eeh_remove_bus_device(struct pci_dev *dev) { } 243static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { }
244
245static inline void eeh_lock(void) { }
246static inline void eeh_unlock(void) { }
247
187#define EEH_POSSIBLE_ERROR(val, type) (0) 248#define EEH_POSSIBLE_ERROR(val, type) (0)
188#define EEH_IO_ERROR_VALUE(size) (-1UL) 249#define EEH_IO_ERROR_VALUE(size) (-1UL)
189#endif /* CONFIG_EEH */ 250#endif /* CONFIG_EEH */
diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h
index c68b012b7797..de67d830151b 100644
--- a/arch/powerpc/include/asm/eeh_event.h
+++ b/arch/powerpc/include/asm/eeh_event.h
@@ -28,11 +28,11 @@
28 */ 28 */
29struct eeh_event { 29struct eeh_event {
30 struct list_head list; /* to form event queue */ 30 struct list_head list; /* to form event queue */
31 struct eeh_dev *edev; /* EEH device */ 31 struct eeh_pe *pe; /* EEH PE */
32}; 32};
33 33
34int eeh_send_failure_event(struct eeh_dev *edev); 34int eeh_send_failure_event(struct eeh_pe *pe);
35struct eeh_dev *handle_eeh_events(struct eeh_event *); 35void eeh_handle_event(struct eeh_pe *pe);
36 36
37#endif /* __KERNEL__ */ 37#endif /* __KERNEL__ */
38#endif /* ASM_POWERPC_EEH_EVENT_H */ 38#endif /* ASM_POWERPC_EEH_EVENT_H */
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index ac13addb8495..51fa43e536b9 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -37,6 +37,7 @@
37 * critical data 37 * critical data
38 */ 38 */
39 39
40#define PACA_EXGDBELL PACA_EXGEN
40 41
41/* We are out of SPRGs so we save some things in the PACA. The normal 42/* We are out of SPRGs so we save some things in the PACA. The normal
42 * exception frame is smaller than the CRIT or MC one though 43 * exception frame is smaller than the CRIT or MC one though
@@ -45,8 +46,9 @@
45#define EX_CR (1 * 8) 46#define EX_CR (1 * 8)
46#define EX_R10 (2 * 8) 47#define EX_R10 (2 * 8)
47#define EX_R11 (3 * 8) 48#define EX_R11 (3 * 8)
48#define EX_R14 (4 * 8) 49#define EX_R13 (4 * 8)
49#define EX_R15 (5 * 8) 50#define EX_R14 (5 * 8)
51#define EX_R15 (6 * 8)
50 52
51/* 53/*
52 * The TLB miss exception uses different slots. 54 * The TLB miss exception uses different slots.
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index aa4c488589ce..dd5ba2c22771 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -48,6 +48,8 @@ struct ccsr_guts {
48 __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ 48 __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
49 u8 res06c[0x70 - 0x6c]; 49 u8 res06c[0x70 - 0x6c];
50 __be32 devdisr; /* 0x.0070 - Device Disable Control */ 50 __be32 devdisr; /* 0x.0070 - Device Disable Control */
51#define CCSR_GUTS_DEVDISR_TB1 0x00001000
52#define CCSR_GUTS_DEVDISR_TB0 0x00004000
51 __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ 53 __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
52 u8 res078[0x7c - 0x78]; 54 u8 res078[0x7c - 0x78];
53 __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ 55 __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */
diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h
index b955012939a2..b8a4b9bc50b3 100644
--- a/arch/powerpc/include/asm/fsl_ifc.h
+++ b/arch/powerpc/include/asm/fsl_ifc.h
@@ -768,22 +768,24 @@ struct fsl_ifc_gpcm {
768 */ 768 */
769struct fsl_ifc_regs { 769struct fsl_ifc_regs {
770 __be32 ifc_rev; 770 __be32 ifc_rev;
771 u32 res1[0x3]; 771 u32 res1[0x2];
772 struct { 772 struct {
773 __be32 cspr_ext;
773 __be32 cspr; 774 __be32 cspr;
774 u32 res2[0x2]; 775 u32 res2;
775 } cspr_cs[FSL_IFC_BANK_COUNT]; 776 } cspr_cs[FSL_IFC_BANK_COUNT];
776 u32 res3[0x18]; 777 u32 res3[0x19];
777 struct { 778 struct {
778 __be32 amask; 779 __be32 amask;
779 u32 res4[0x2]; 780 u32 res4[0x2];
780 } amask_cs[FSL_IFC_BANK_COUNT]; 781 } amask_cs[FSL_IFC_BANK_COUNT];
781 u32 res5[0x18]; 782 u32 res5[0x17];
782 struct { 783 struct {
784 __be32 csor_ext;
783 __be32 csor; 785 __be32 csor;
784 u32 res6[0x2]; 786 u32 res6;
785 } csor_cs[FSL_IFC_BANK_COUNT]; 787 } csor_cs[FSL_IFC_BANK_COUNT];
786 u32 res7[0x18]; 788 u32 res7[0x19];
787 struct { 789 struct {
788 __be32 ftim[4]; 790 __be32 ftim[4];
789 u32 res8[0x8]; 791 u32 res8[0x8];
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 423cf9eaf4a4..7a867065db79 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -152,11 +152,6 @@
152#define H_VASI_RESUMED 5 152#define H_VASI_RESUMED 5
153#define H_VASI_COMPLETED 6 153#define H_VASI_COMPLETED 6
154 154
155/* DABRX flags */
156#define H_DABRX_HYPERVISOR (1UL<<(63-61))
157#define H_DABRX_KERNEL (1UL<<(63-62))
158#define H_DABRX_USER (1UL<<(63-63))
159
160/* Each control block has to be on a 4K boundary */ 155/* Each control block has to be on a 4K boundary */
161#define H_CB_ALIGNMENT 4096 156#define H_CB_ALIGNMENT 4096
162 157
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index be04330af751..423424599dad 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -27,10 +27,11 @@
27#ifdef CONFIG_HAVE_HW_BREAKPOINT 27#ifdef CONFIG_HAVE_HW_BREAKPOINT
28 28
29struct arch_hw_breakpoint { 29struct arch_hw_breakpoint {
30 bool extraneous_interrupt;
31 u8 len; /* length of the target data symbol */
32 int type;
33 unsigned long address; 30 unsigned long address;
31 unsigned long dabrx;
32 int type;
33 u8 len; /* length of the target data symbol */
34 bool extraneous_interrupt;
34}; 35};
35 36
36#include <linux/kdebug.h> 37#include <linux/kdebug.h>
@@ -61,7 +62,7 @@ extern void ptrace_triggered(struct perf_event *bp,
61 struct perf_sample_data *data, struct pt_regs *regs); 62 struct perf_sample_data *data, struct pt_regs *regs);
62static inline void hw_breakpoint_disable(void) 63static inline void hw_breakpoint_disable(void)
63{ 64{
64 set_dabr(0); 65 set_dabr(0, 0);
65} 66}
66extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs); 67extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
67 68
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index be0171afdc0f..7b6feab6fd26 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -29,21 +29,16 @@
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/ptrace.h> 30#include <linux/ptrace.h>
31#include <linux/percpu.h> 31#include <linux/percpu.h>
32#include <asm/probes.h>
32 33
33#define __ARCH_WANT_KPROBES_INSN_SLOT 34#define __ARCH_WANT_KPROBES_INSN_SLOT
34 35
35struct pt_regs; 36struct pt_regs;
36struct kprobe; 37struct kprobe;
37 38
38typedef unsigned int kprobe_opcode_t; 39typedef ppc_opcode_t kprobe_opcode_t;
39#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
40#define MAX_INSN_SIZE 1 40#define MAX_INSN_SIZE 1
41 41
42#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
43#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088)
44#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000)
45#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000)
46
47#ifdef CONFIG_PPC64 42#ifdef CONFIG_PPC64
48/* 43/*
49 * 64bit powerpc uses function descriptors. 44 * 64bit powerpc uses function descriptors.
@@ -72,12 +67,6 @@ typedef unsigned int kprobe_opcode_t;
72 addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \ 67 addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
73 } \ 68 } \
74} 69}
75
76#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
77 IS_TWI(instr) || IS_TDI(instr))
78#else
79/* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */
80#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
81#endif 70#endif
82 71
83#define flush_insn_slot(p) do { } while (0) 72#define flush_insn_slot(p) do { } while (0)
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f0e0c6a66d97..7aefdb3e1ce4 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -59,7 +59,7 @@ struct hpte_cache {
59 struct hlist_node list_vpte; 59 struct hlist_node list_vpte;
60 struct hlist_node list_vpte_long; 60 struct hlist_node list_vpte_long;
61 struct rcu_head rcu_head; 61 struct rcu_head rcu_head;
62 u64 host_va; 62 u64 host_vpn;
63 u64 pfn; 63 u64 pfn;
64 ulong slot; 64 ulong slot;
65 struct kvmppc_pte pte; 65 struct kvmppc_pte pte;
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index bfcd00c1485d..88609b23b775 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -74,7 +74,6 @@ struct kvmppc_host_state {
74 ulong vmhandler; 74 ulong vmhandler;
75 ulong scratch0; 75 ulong scratch0;
76 ulong scratch1; 76 ulong scratch1;
77 ulong sprg3;
78 u8 in_guest; 77 u8 in_guest;
79 u8 restore_hid5; 78 u8 restore_hid5;
80 u8 napping; 79 u8 napping;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index f7706d722b39..c4231973edd3 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -34,19 +34,19 @@ struct machdep_calls {
34 char *name; 34 char *name;
35#ifdef CONFIG_PPC64 35#ifdef CONFIG_PPC64
36 void (*hpte_invalidate)(unsigned long slot, 36 void (*hpte_invalidate)(unsigned long slot,
37 unsigned long va, 37 unsigned long vpn,
38 int psize, int ssize, 38 int psize, int ssize,
39 int local); 39 int local);
40 long (*hpte_updatepp)(unsigned long slot, 40 long (*hpte_updatepp)(unsigned long slot,
41 unsigned long newpp, 41 unsigned long newpp,
42 unsigned long va, 42 unsigned long vpn,
43 int psize, int ssize, 43 int psize, int ssize,
44 int local); 44 int local);
45 void (*hpte_updateboltedpp)(unsigned long newpp, 45 void (*hpte_updateboltedpp)(unsigned long newpp,
46 unsigned long ea, 46 unsigned long ea,
47 int psize, int ssize); 47 int psize, int ssize);
48 long (*hpte_insert)(unsigned long hpte_group, 48 long (*hpte_insert)(unsigned long hpte_group,
49 unsigned long va, 49 unsigned long vpn,
50 unsigned long prpn, 50 unsigned long prpn,
51 unsigned long rflags, 51 unsigned long rflags,
52 unsigned long vflags, 52 unsigned long vflags,
@@ -180,7 +180,8 @@ struct machdep_calls {
180 void (*enable_pmcs)(void); 180 void (*enable_pmcs)(void);
181 181
182 /* Set DABR for this platform, leave empty for default implemenation */ 182 /* Set DABR for this platform, leave empty for default implemenation */
183 int (*set_dabr)(unsigned long dabr); 183 int (*set_dabr)(unsigned long dabr,
184 unsigned long dabrx);
184 185
185#ifdef CONFIG_PPC32 /* XXX for now */ 186#ifdef CONFIG_PPC32 /* XXX for now */
186 /* A general init function, called by ppc_init in init/main.c. 187 /* A general init function, called by ppc_init in init/main.c.
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 1c65a59881ea..9673f73eb8db 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -16,6 +16,13 @@
16#include <asm/page.h> 16#include <asm/page.h>
17 17
18/* 18/*
19 * This is necessary to get the definition of PGTABLE_RANGE which we
20 * need for various slices related matters. Note that this isn't the
21 * complete pgtable.h but only a portion of it.
22 */
23#include <asm/pgtable-ppc64.h>
24
25/*
19 * Segment table 26 * Segment table
20 */ 27 */
21 28
@@ -154,9 +161,25 @@ struct mmu_psize_def
154#define MMU_SEGSIZE_256M 0 161#define MMU_SEGSIZE_256M 0
155#define MMU_SEGSIZE_1T 1 162#define MMU_SEGSIZE_1T 1
156 163
164/*
165 * encode page number shift.
166 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
167 * 12 bits. This enable us to address upto 76 bit va.
168 * For hpt hash from a va we can ignore the page size bits of va and for
169 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
170 * we work in all cases including 4k page size.
171 */
172#define VPN_SHIFT 12
157 173
158#ifndef __ASSEMBLY__ 174#ifndef __ASSEMBLY__
159 175
176static inline int segment_shift(int ssize)
177{
178 if (ssize == MMU_SEGSIZE_256M)
179 return SID_SHIFT;
180 return SID_SHIFT_1T;
181}
182
160/* 183/*
161 * The current system page and segment sizes 184 * The current system page and segment sizes
162 */ 185 */
@@ -180,18 +203,39 @@ extern unsigned long tce_alloc_start, tce_alloc_end;
180extern int mmu_ci_restrictions; 203extern int mmu_ci_restrictions;
181 204
182/* 205/*
206 * This computes the AVPN and B fields of the first dword of a HPTE,
207 * for use when we want to match an existing PTE. The bottom 7 bits
208 * of the returned value are zero.
209 */
210static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
211 int ssize)
212{
213 unsigned long v;
214 /*
215 * The AVA field omits the low-order 23 bits of the 78 bits VA.
216 * These bits are not needed in the PTE, because the
217 * low-order b of these bits are part of the byte offset
218 * into the virtual page and, if b < 23, the high-order
219 * 23-b of these bits are always used in selecting the
220 * PTEGs to be searched
221 */
222 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
223 v <<= HPTE_V_AVPN_SHIFT;
224 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
225 return v;
226}
227
228/*
183 * This function sets the AVPN and L fields of the HPTE appropriately 229 * This function sets the AVPN and L fields of the HPTE appropriately
184 * for the page size 230 * for the page size
185 */ 231 */
186static inline unsigned long hpte_encode_v(unsigned long va, int psize, 232static inline unsigned long hpte_encode_v(unsigned long vpn,
187 int ssize) 233 int psize, int ssize)
188{ 234{
189 unsigned long v; 235 unsigned long v;
190 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); 236 v = hpte_encode_avpn(vpn, psize, ssize);
191 v <<= HPTE_V_AVPN_SHIFT;
192 if (psize != MMU_PAGE_4K) 237 if (psize != MMU_PAGE_4K)
193 v |= HPTE_V_LARGE; 238 v |= HPTE_V_LARGE;
194 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
195 return v; 239 return v;
196} 240}
197 241
@@ -216,30 +260,37 @@ static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
216} 260}
217 261
218/* 262/*
219 * Build a VA given VSID, EA and segment size 263 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
220 */ 264 */
221static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid, 265static inline unsigned long hpt_vpn(unsigned long ea,
222 int ssize) 266 unsigned long vsid, int ssize)
223{ 267{
224 if (ssize == MMU_SEGSIZE_256M) 268 unsigned long mask;
225 return (vsid << 28) | (ea & 0xfffffffUL); 269 int s_shift = segment_shift(ssize);
226 return (vsid << 40) | (ea & 0xffffffffffUL); 270
271 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
272 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
227} 273}
228 274
229/* 275/*
230 * This hashes a virtual address 276 * This hashes a virtual address
231 */ 277 */
232 278static inline unsigned long hpt_hash(unsigned long vpn,
233static inline unsigned long hpt_hash(unsigned long va, unsigned int shift, 279 unsigned int shift, int ssize)
234 int ssize)
235{ 280{
281 int mask;
236 unsigned long hash, vsid; 282 unsigned long hash, vsid;
237 283
284 /* VPN_SHIFT can be atmost 12 */
238 if (ssize == MMU_SEGSIZE_256M) { 285 if (ssize == MMU_SEGSIZE_256M) {
239 hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift); 286 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
287 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
288 ((vpn & mask) >> (shift - VPN_SHIFT));
240 } else { 289 } else {
241 vsid = va >> 40; 290 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
242 hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift); 291 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
292 hash = vsid ^ (vsid << 25) ^
293 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
243 } 294 }
244 return hash & 0x7fffffffffUL; 295 return hash & 0x7fffffffffUL;
245} 296}
@@ -280,63 +331,61 @@ extern void slb_set_size(u16 size);
280#endif /* __ASSEMBLY__ */ 331#endif /* __ASSEMBLY__ */
281 332
282/* 333/*
283 * VSID allocation 334 * VSID allocation (256MB segment)
335 *
336 * We first generate a 38-bit "proto-VSID". For kernel addresses this
337 * is equal to the ESID | 1 << 37, for user addresses it is:
338 * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
284 * 339 *
285 * We first generate a 36-bit "proto-VSID". For kernel addresses this 340 * This splits the proto-VSID into the below range
286 * is equal to the ESID, for user addresses it is: 341 * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
287 * (context << 15) | (esid & 0x7fff) 342 * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
288 * 343 *
289 * The two forms are distinguishable because the top bit is 0 for user 344 * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
290 * addresses, whereas the top two bits are 1 for kernel addresses. 345 * That is, we assign half of the space to user processes and half
291 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for 346 * to the kernel.
292 * now.
293 * 347 *
294 * The proto-VSIDs are then scrambled into real VSIDs with the 348 * The proto-VSIDs are then scrambled into real VSIDs with the
295 * multiplicative hash: 349 * multiplicative hash:
296 * 350 *
297 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS 351 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
298 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
299 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
300 * 352 *
301 * This scramble is only well defined for proto-VSIDs below 353 * VSID_MULTIPLIER is prime, so in particular it is
302 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
303 * reserved. VSID_MULTIPLIER is prime, so in particular it is
304 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. 354 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
305 * Because the modulus is 2^n-1 we can compute it efficiently without 355 * Because the modulus is 2^n-1 we can compute it efficiently without
306 * a divide or extra multiply (see below). 356 * a divide or extra multiply (see below).
307 * 357 *
308 * This scheme has several advantages over older methods: 358 * This scheme has several advantages over older methods:
309 * 359 *
310 * - We have VSIDs allocated for every kernel address 360 * - We have VSIDs allocated for every kernel address
311 * (i.e. everything above 0xC000000000000000), except the very top 361 * (i.e. everything above 0xC000000000000000), except the very top
312 * segment, which simplifies several things. 362 * segment, which simplifies several things.
313 * 363 *
314 * - We allow for 16 significant bits of ESID and 19 bits of 364 * - We allow for USER_ESID_BITS significant bits of ESID and
315 * context for user addresses. i.e. 16T (44 bits) of address space for 365 * CONTEXT_BITS bits of context for user addresses.
316 * up to half a million contexts. 366 * i.e. 64T (46 bits) of address space for up to half a million contexts.
317 * 367 *
318 * - The scramble function gives robust scattering in the hash 368 * - The scramble function gives robust scattering in the hash
319 * table (at least based on some initial results). The previous 369 * table (at least based on some initial results). The previous
320 * method was more susceptible to pathological cases giving excessive 370 * method was more susceptible to pathological cases giving excessive
321 * hash collisions. 371 * hash collisions.
322 */ 372 */
373
323/* 374/*
324 * WARNING - If you change these you must make sure the asm 375 * This should be computed such that protovosid * vsid_mulitplier
325 * implementations in slb_allocate (slb_low.S), do_stab_bolted 376 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
326 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
327 */ 377 */
328 378#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
329#define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */ 379#define VSID_BITS_256M 38
330#define VSID_BITS_256M 36
331#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) 380#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
332 381
333#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ 382#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
334#define VSID_BITS_1T 24 383#define VSID_BITS_1T 26
335#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) 384#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
336 385
337#define CONTEXT_BITS 19 386#define CONTEXT_BITS 19
338#define USER_ESID_BITS 16 387#define USER_ESID_BITS 18
339#define USER_ESID_BITS_1T 4 388#define USER_ESID_BITS_1T 6
340 389
341#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 390#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
342 391
@@ -372,6 +421,8 @@ extern void slb_set_size(u16 size);
372 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ 421 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
373 add rt,rt,rx 422 add rt,rt,rx
374 423
424/* 4 bits per slice and we have one slice per 1TB */
425#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
375 426
376#ifndef __ASSEMBLY__ 427#ifndef __ASSEMBLY__
377 428
@@ -416,7 +467,7 @@ typedef struct {
416 467
417#ifdef CONFIG_PPC_MM_SLICES 468#ifdef CONFIG_PPC_MM_SLICES
418 u64 low_slices_psize; /* SLB page size encodings */ 469 u64 low_slices_psize; /* SLB page size encodings */
419 u64 high_slices_psize; /* 4 bits per slice for now */ 470 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
420#else 471#else
421 u16 sllp; /* SLB page size encoding */ 472 u16 sllp; /* SLB page size encoding */
422#endif 473#endif
@@ -452,12 +503,32 @@ typedef struct {
452 }) 503 })
453#endif /* 1 */ 504#endif /* 1 */
454 505
455/* This is only valid for addresses >= PAGE_OFFSET */ 506/*
507 * This is only valid for addresses >= PAGE_OFFSET
508 * The proto-VSID space is divided into two class
509 * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
510 * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
511 *
512 * With KERNEL_START at 0xc000000000000000, the proto vsid for
513 * the kernel ends up with 0xc00000000 (36 bits). With 64TB
514 * support we need to have kernel proto-VSID in the
515 * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
516 */
456static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) 517static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
457{ 518{
458 if (ssize == MMU_SEGSIZE_256M) 519 unsigned long proto_vsid;
459 return vsid_scramble(ea >> SID_SHIFT, 256M); 520 /*
460 return vsid_scramble(ea >> SID_SHIFT_1T, 1T); 521 * We need to make sure proto_vsid for the kernel is
522 * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
523 */
524 if (ssize == MMU_SEGSIZE_256M) {
525 proto_vsid = ea >> SID_SHIFT;
526 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
527 return vsid_scramble(proto_vsid, 256M);
528 }
529 proto_vsid = ea >> SID_SHIFT_1T;
530 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
531 return vsid_scramble(proto_vsid, 1T);
461} 532}
462 533
463/* Returns the segment size indicator for a user address */ 534/* Returns the segment size indicator for a user address */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e8a26db2e8f3..5e38eedea218 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -146,6 +146,15 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
146extern u64 ppc64_rma_size; 146extern u64 ppc64_rma_size;
147#endif /* CONFIG_PPC64 */ 147#endif /* CONFIG_PPC64 */
148 148
149struct mm_struct;
150#ifdef CONFIG_DEBUG_VM
151extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
152#else /* CONFIG_DEBUG_VM */
153static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
154{
155}
156#endif /* !CONFIG_DEBUG_VM */
157
149#endif /* !__ASSEMBLY__ */ 158#endif /* !__ASSEMBLY__ */
150 159
151/* The kernel use the constants below to index in the page sizes array. 160/* The kernel use the constants below to index in the page sizes array.
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 1f41382eda38..0acc7c7c28d1 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -307,6 +307,7 @@ struct mpc52xx_lpbfifo_request {
307 size_t size; 307 size_t size;
308 size_t pos; /* current position of transfer */ 308 size_t pos; /* current position of transfer */
309 int flags; 309 int flags;
310 int defer_xfer_start;
310 311
311 /* What to do when finished */ 312 /* What to do when finished */
312 void (*callback)(struct mpc52xx_lpbfifo_request *); 313 void (*callback)(struct mpc52xx_lpbfifo_request *);
@@ -323,6 +324,7 @@ struct mpc52xx_lpbfifo_request {
323extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req); 324extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
324extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req); 325extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
325extern void mpc52xx_lpbfifo_poll(void); 326extern void mpc52xx_lpbfifo_poll(void);
327extern int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req);
326 328
327/* mpc52xx_pic.c */ 329/* mpc52xx_pic.c */
328extern void mpc52xx_init_irq(void); 330extern void mpc52xx_init_irq(void);
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index c9f698a994be..c0f9ef90f0b8 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -63,6 +63,7 @@
63 */ 63 */
64#define MPIC_TIMER_BASE 0x01100 64#define MPIC_TIMER_BASE 0x01100
65#define MPIC_TIMER_STRIDE 0x40 65#define MPIC_TIMER_STRIDE 0x40
66#define MPIC_TIMER_GROUP_STRIDE 0x1000
66 67
67#define MPIC_TIMER_CURRENT_CNT 0x00000 68#define MPIC_TIMER_CURRENT_CNT 0x00000
68#define MPIC_TIMER_BASE_CNT 0x00010 69#define MPIC_TIMER_BASE_CNT 0x00010
@@ -110,10 +111,16 @@
110#define MPIC_VECPRI_SENSE_MASK 0x00400000 111#define MPIC_VECPRI_SENSE_MASK 0x00400000
111#define MPIC_IRQ_DESTINATION 0x00010 112#define MPIC_IRQ_DESTINATION 0x00010
112 113
114#define MPIC_FSL_BRR1 0x00000
115#define MPIC_FSL_BRR1_VER 0x0000ffff
116
113#define MPIC_MAX_IRQ_SOURCES 2048 117#define MPIC_MAX_IRQ_SOURCES 2048
114#define MPIC_MAX_CPUS 32 118#define MPIC_MAX_CPUS 32
115#define MPIC_MAX_ISU 32 119#define MPIC_MAX_ISU 32
116 120
121#define MPIC_MAX_ERR 32
122#define MPIC_FSL_ERR_INT 16
123
117/* 124/*
118 * Tsi108 implementation of MPIC has many differences from the original one 125 * Tsi108 implementation of MPIC has many differences from the original one
119 */ 126 */
@@ -266,6 +273,7 @@ struct mpic
266 struct irq_chip hc_ipi; 273 struct irq_chip hc_ipi;
267#endif 274#endif
268 struct irq_chip hc_tm; 275 struct irq_chip hc_tm;
276 struct irq_chip hc_err;
269 const char *name; 277 const char *name;
270 /* Flags */ 278 /* Flags */
271 unsigned int flags; 279 unsigned int flags;
@@ -279,6 +287,8 @@ struct mpic
279 /* vector numbers used for internal sources (ipi/timers) */ 287 /* vector numbers used for internal sources (ipi/timers) */
280 unsigned int ipi_vecs[4]; 288 unsigned int ipi_vecs[4];
281 unsigned int timer_vecs[8]; 289 unsigned int timer_vecs[8];
290 /* vector numbers used for FSL MPIC error interrupts */
291 unsigned int err_int_vecs[MPIC_MAX_ERR];
282 292
283 /* Spurious vector to program into unused sources */ 293 /* Spurious vector to program into unused sources */
284 unsigned int spurious_vec; 294 unsigned int spurious_vec;
@@ -296,11 +306,15 @@ struct mpic
296 phys_addr_t paddr; 306 phys_addr_t paddr;
297 307
298 /* The various ioremap'ed bases */ 308 /* The various ioremap'ed bases */
309 struct mpic_reg_bank thiscpuregs;
299 struct mpic_reg_bank gregs; 310 struct mpic_reg_bank gregs;
300 struct mpic_reg_bank tmregs; 311 struct mpic_reg_bank tmregs;
301 struct mpic_reg_bank cpuregs[MPIC_MAX_CPUS]; 312 struct mpic_reg_bank cpuregs[MPIC_MAX_CPUS];
302 struct mpic_reg_bank isus[MPIC_MAX_ISU]; 313 struct mpic_reg_bank isus[MPIC_MAX_ISU];
303 314
315 /* ioremap'ed base for error interrupt registers */
316 u32 __iomem *err_regs;
317
304 /* Protected sources */ 318 /* Protected sources */
305 unsigned long *protected; 319 unsigned long *protected;
306 320
@@ -365,6 +379,11 @@ struct mpic
365#define MPIC_NO_RESET 0x00004000 379#define MPIC_NO_RESET 0x00004000
366/* Freescale MPIC (compatible includes "fsl,mpic") */ 380/* Freescale MPIC (compatible includes "fsl,mpic") */
367#define MPIC_FSL 0x00008000 381#define MPIC_FSL 0x00008000
382/* Freescale MPIC supports EIMR (error interrupt mask register).
383 * This flag is set for MPIC version >= 4.1 (version determined
384 * from the BRR1 register).
385*/
386#define MPIC_FSL_HAS_EIMR 0x00010000
368 387
369/* MPIC HW modification ID */ 388/* MPIC HW modification ID */
370#define MPIC_REGSET_MASK 0xf0000000 389#define MPIC_REGSET_MASK 0xf0000000
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index daf813fea91f..e9e7a6999bb8 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -100,7 +100,7 @@ struct paca_struct {
100 /* SLB related definitions */ 100 /* SLB related definitions */
101 u16 vmalloc_sllp; 101 u16 vmalloc_sllp;
102 u16 slb_cache_ptr; 102 u16 slb_cache_ptr;
103 u16 slb_cache[SLB_CACHE_ENTRIES]; 103 u32 slb_cache[SLB_CACHE_ENTRIES];
104#endif /* CONFIG_PPC_STD_MMU_64 */ 104#endif /* CONFIG_PPC_STD_MMU_64 */
105 105
106#ifdef CONFIG_PPC_BOOK3E 106#ifdef CONFIG_PPC_BOOK3E
@@ -136,6 +136,7 @@ struct paca_struct {
136 u8 io_sync; /* writel() needs spin_unlock sync */ 136 u8 io_sync; /* writel() needs spin_unlock sync */
137 u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ 137 u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
138 u8 nap_state_lost; /* NV GPR values lost in power7_idle */ 138 u8 nap_state_lost; /* NV GPR values lost in power7_idle */
139 u64 sprg3; /* Saved user-visible sprg */
139 140
140#ifdef CONFIG_PPC_POWERNV 141#ifdef CONFIG_PPC_POWERNV
141 /* Pointer to OPAL machine check event structure set by the 142 /* Pointer to OPAL machine check event structure set by the
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index fed85e6290e1..cd915d6b093d 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -78,11 +78,19 @@ extern u64 ppc64_pft_size;
78#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) 78#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
79#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) 79#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
80 80
81/*
82 * 1 bit per slice and we have one slice per 1TB
83 * Right now we support only 64TB.
84 * IF we change this we will have to change the type
85 * of high_slices
86 */
87#define SLICE_MASK_SIZE 8
88
81#ifndef __ASSEMBLY__ 89#ifndef __ASSEMBLY__
82 90
83struct slice_mask { 91struct slice_mask {
84 u16 low_slices; 92 u16 low_slices;
85 u16 high_slices; 93 u64 high_slices;
86}; 94};
87 95
88struct mm_struct; 96struct mm_struct;
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 8cccbee61519..025a130729bc 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -182,14 +182,25 @@ static inline int pci_device_from_OF_node(struct device_node *np,
182#if defined(CONFIG_EEH) 182#if defined(CONFIG_EEH)
183static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn) 183static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
184{ 184{
185 /*
186 * For those OF nodes whose parent isn't PCI bridge, they
187 * don't have PCI_DN actually. So we have to skip them for
188 * any EEH operations.
189 */
190 if (!dn || !PCI_DN(dn))
191 return NULL;
192
185 return PCI_DN(dn)->edev; 193 return PCI_DN(dn)->edev;
186} 194}
195#else
196#define of_node_to_eeh_dev(x) (NULL)
187#endif 197#endif
188 198
189/** Find the bus corresponding to the indicated device node */ 199/** Find the bus corresponding to the indicated device node */
190extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); 200extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
191 201
192/** Remove all of the PCI devices under this bus */ 202/** Remove all of the PCI devices under this bus */
203extern void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe);
193extern void pcibios_remove_pci_devices(struct pci_bus *bus); 204extern void pcibios_remove_pci_devices(struct pci_bus *bus);
194 205
195/** Discover new pci devices under this bus, and add them */ 206/** Discover new pci devices under this bus, and add them */
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 078019b5b353..9710be3a2d17 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -49,6 +49,7 @@ struct power_pmu {
49#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ 49#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
50#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */ 50#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */
51#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */ 51#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */
52#define PPMU_SIAR_VALID 16 /* Processor has SIAR Valid bit */
52 53
53/* 54/*
54 * Values for flags to get_alternatives() 55 * Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
index 6eefdcffa359..12798c9d4b4b 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
@@ -7,7 +7,7 @@
7 */ 7 */
8#define PTE_INDEX_SIZE 9 8#define PTE_INDEX_SIZE 9
9#define PMD_INDEX_SIZE 7 9#define PMD_INDEX_SIZE 7
10#define PUD_INDEX_SIZE 7 10#define PUD_INDEX_SIZE 9
11#define PGD_INDEX_SIZE 9 11#define PGD_INDEX_SIZE 9
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
@@ -19,7 +19,7 @@
19 19
20#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 20#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
21#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 21#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
22#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) 22#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
23#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 23#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
24 24
25/* PMD_SHIFT determines what a second-level page table entry can map */ 25/* PMD_SHIFT determines what a second-level page table entry can map */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
index 90533ddcd703..be4e2878fbc0 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -7,7 +7,7 @@
7#define PTE_INDEX_SIZE 12 7#define PTE_INDEX_SIZE 12
8#define PMD_INDEX_SIZE 12 8#define PMD_INDEX_SIZE 12
9#define PUD_INDEX_SIZE 0 9#define PUD_INDEX_SIZE 0
10#define PGD_INDEX_SIZE 4 10#define PGD_INDEX_SIZE 6
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) 13#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index c4205616dfb5..0182c203e411 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -21,17 +21,6 @@
21#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) 21#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
22 22
23 23
24/* Some sanity checking */
25#if TASK_SIZE_USER64 > PGTABLE_RANGE
26#error TASK_SIZE_USER64 exceeds pagetable range
27#endif
28
29#ifdef CONFIG_PPC_STD_MMU_64
30#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
31#error TASK_SIZE_USER64 exceeds user VSID range
32#endif
33#endif
34
35/* 24/*
36 * Define the address range of the kernel non-linear virtual area 25 * Define the address range of the kernel non-linear virtual area
37 */ 26 */
@@ -41,7 +30,7 @@
41#else 30#else
42#define KERN_VIRT_START ASM_CONST(0xD000000000000000) 31#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
43#endif 32#endif
44#define KERN_VIRT_SIZE PGTABLE_RANGE 33#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
45 34
46/* 35/*
47 * The vmalloc space starts at the beginning of that region, and 36 * The vmalloc space starts at the beginning of that region, and
@@ -117,9 +106,6 @@
117 106
118#ifndef __ASSEMBLY__ 107#ifndef __ASSEMBLY__
119 108
120#include <linux/stddef.h>
121#include <asm/tlbflush.h>
122
123/* 109/*
124 * This is the default implementation of various PTE accessors, it's 110 * This is the default implementation of various PTE accessors, it's
125 * used in all cases except Book3S with 64K pages where we have a 111 * used in all cases except Book3S with 64K pages where we have a
@@ -198,7 +184,8 @@
198/* to find an entry in a kernel page-table-directory */ 184/* to find an entry in a kernel page-table-directory */
199/* This now only contains the vmalloc pages */ 185/* This now only contains the vmalloc pages */
200#define pgd_offset_k(address) pgd_offset(&init_mm, address) 186#define pgd_offset_k(address) pgd_offset(&init_mm, address)
201 187extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
188 pte_t *ptep, unsigned long pte, int huge);
202 189
203/* Atomic PTE updates */ 190/* Atomic PTE updates */
204static inline unsigned long pte_update(struct mm_struct *mm, 191static inline unsigned long pte_update(struct mm_struct *mm,
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 2e0e4110f7ae..a9cbd3ba5c33 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -9,14 +9,6 @@
9 9
10struct mm_struct; 10struct mm_struct;
11 11
12#ifdef CONFIG_DEBUG_VM
13extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
14#else /* CONFIG_DEBUG_VM */
15static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
16{
17}
18#endif /* !CONFIG_DEBUG_VM */
19
20#endif /* !__ASSEMBLY__ */ 12#endif /* !__ASSEMBLY__ */
21 13
22#if defined(CONFIG_PPC64) 14#if defined(CONFIG_PPC64)
@@ -27,6 +19,8 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
27 19
28#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
29 21
22#include <asm/tlbflush.h>
23
30/* Generic accessors to PTE bits */ 24/* Generic accessors to PTE bits */
31static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 25static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
32static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 26static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 4c25319f2fbc..5f73ce63fcae 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -126,6 +126,7 @@
126#define PPC_INST_TLBIVAX 0x7c000624 126#define PPC_INST_TLBIVAX 0x7c000624
127#define PPC_INST_TLBSRX_DOT 0x7c0006a5 127#define PPC_INST_TLBSRX_DOT 0x7c0006a5
128#define PPC_INST_XXLOR 0xf0000510 128#define PPC_INST_XXLOR 0xf0000510
129#define PPC_INST_XVCPSGNDP 0xf0000780
129 130
130#define PPC_INST_NAP 0x4c000364 131#define PPC_INST_NAP 0x4c000364
131#define PPC_INST_SLEEP 0x4c0003a4 132#define PPC_INST_SLEEP 0x4c0003a4
@@ -277,6 +278,8 @@
277 VSX_XX1((s), a, b)) 278 VSX_XX1((s), a, b))
278#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ 279#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
279 VSX_XX3((t), a, b)) 280 VSX_XX3((t), a, b))
281#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
282 VSX_XX3((t), (a), (b))))
280 283
281#define PPC_NAP stringify_in_c(.long PPC_INST_NAP) 284#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
282#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) 285#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 80fa704d410f..ed57fa7920c8 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -47,19 +47,17 @@ extern int rtas_setup_phb(struct pci_controller *phb);
47 47
48#ifdef CONFIG_EEH 48#ifdef CONFIG_EEH
49 49
50void pci_addr_cache_build(void); 50void eeh_addr_cache_insert_dev(struct pci_dev *dev);
51void pci_addr_cache_insert_device(struct pci_dev *dev); 51void eeh_addr_cache_rmv_dev(struct pci_dev *dev);
52void pci_addr_cache_remove_device(struct pci_dev *dev); 52struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr);
53struct pci_dev *pci_addr_cache_get_device(unsigned long addr); 53void eeh_slot_error_detail(struct eeh_pe *pe, int severity);
54void eeh_slot_error_detail(struct eeh_dev *edev, int severity); 54int eeh_pci_enable(struct eeh_pe *pe, int function);
55int eeh_pci_enable(struct eeh_dev *edev, int function); 55int eeh_reset_pe(struct eeh_pe *);
56int eeh_reset_pe(struct eeh_dev *); 56void eeh_save_bars(struct eeh_dev *edev);
57void eeh_restore_bars(struct eeh_dev *);
58int rtas_write_config(struct pci_dn *, int where, int size, u32 val); 57int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
59int rtas_read_config(struct pci_dn *, int where, int size, u32 *val); 58int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
60void eeh_mark_slot(struct device_node *dn, int mode_flag); 59void eeh_pe_state_mark(struct eeh_pe *pe, int state);
61void eeh_clear_slot(struct device_node *dn, int mode_flag); 60void eeh_pe_state_clear(struct eeh_pe *pe, int state);
62struct device_node *eeh_find_device_pe(struct device_node *dn);
63 61
64void eeh_sysfs_add_device(struct pci_dev *pdev); 62void eeh_sysfs_add_device(struct pci_dev *pdev);
65void eeh_sysfs_remove_device(struct pci_dev *pdev); 63void eeh_sysfs_remove_device(struct pci_dev *pdev);
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
new file mode 100644
index 000000000000..5f1e15b68704
--- /dev/null
+++ b/arch/powerpc/include/asm/probes.h
@@ -0,0 +1,42 @@
1#ifndef _ASM_POWERPC_PROBES_H
2#define _ASM_POWERPC_PROBES_H
3#ifdef __KERNEL__
4/*
5 * Definitions common to probes files
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * Copyright IBM Corporation, 2012
22 */
23#include <linux/types.h>
24
25typedef u32 ppc_opcode_t;
26#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
27
28/* Trap definitions per ISA */
29#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
30#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088)
31#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000)
32#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000)
33
34#ifdef CONFIG_PPC64
35#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
36 IS_TWI(instr) || IS_TDI(instr))
37#else
38#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
39#endif /* CONFIG_PPC64 */
40
41#endif /* __KERNEL__ */
42#endif /* _ASM_POWERPC_PROBES_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 54b73a28c205..9dc5cd1fde1a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -97,8 +97,8 @@ extern struct task_struct *last_task_used_spe;
97#endif 97#endif
98 98
99#ifdef CONFIG_PPC64 99#ifdef CONFIG_PPC64
100/* 64-bit user address space is 44-bits (16TB user VM) */ 100/* 64-bit user address space is 46-bits (64TB user VM) */
101#define TASK_SIZE_USER64 (0x0000100000000000UL) 101#define TASK_SIZE_USER64 (0x0000400000000000UL)
102 102
103/* 103/*
104 * 32-bit user address space is 4GB - 1 page 104 * 32-bit user address space is 4GB - 1 page
@@ -219,6 +219,8 @@ struct thread_struct {
219#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 219#endif /* CONFIG_HAVE_HW_BREAKPOINT */
220#endif 220#endif
221 unsigned long dabr; /* Data address breakpoint register */ 221 unsigned long dabr; /* Data address breakpoint register */
222 unsigned long dabrx; /* ... extension */
223 unsigned long trap_nr; /* last trap # on this thread */
222#ifdef CONFIG_ALTIVEC 224#ifdef CONFIG_ALTIVEC
223 /* Complete AltiVec register set */ 225 /* Complete AltiVec register set */
224 vector128 vr[32] __attribute__((aligned(16))); 226 vector128 vr[32] __attribute__((aligned(16)));
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 59247e816ac5..eedf427c9124 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -58,14 +58,16 @@
58/* Trick: we set __end to va + 64k, which happens works for 58/* Trick: we set __end to va + 64k, which happens works for
59 * a 16M page as well as we want only one iteration 59 * a 16M page as well as we want only one iteration
60 */ 60 */
61#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ 61#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
62 do { \ 62 do { \
63 unsigned long __end = va + PAGE_SIZE; \ 63 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
64 unsigned __split = (psize == MMU_PAGE_4K || \ 64 unsigned __split = (psize == MMU_PAGE_4K || \
65 psize == MMU_PAGE_64K_AP); \ 65 psize == MMU_PAGE_64K_AP); \
66 shift = mmu_psize_defs[psize].shift; \ 66 shift = mmu_psize_defs[psize].shift; \
67 for (index = 0; va < __end; index++, va += (1L << shift)) { \ 67 for (index = 0; vpn < __end; index++, \
68 if (!__split || __rpte_sub_valid(rpte, index)) do { \ 68 vpn += (1L << (shift - VPN_SHIFT))) { \
69 if (!__split || __rpte_sub_valid(rpte, index)) \
70 do {
69 71
70#define pte_iterate_hashed_end() } while(0); } } while(0) 72#define pte_iterate_hashed_end() } while(0); } } while(0)
71 73
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 638608677e2a..d24c14163966 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -208,6 +208,9 @@
208#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ 208#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
209#define DABRX_USER (1UL << 0) 209#define DABRX_USER (1UL << 0)
210#define DABRX_KERNEL (1UL << 1) 210#define DABRX_KERNEL (1UL << 1)
211#define DABRX_HYP (1UL << 2)
212#define DABRX_BTI (1UL << 3)
213#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
211#define SPRN_DAR 0x013 /* Data Address Register */ 214#define SPRN_DAR 0x013 /* Data Address Register */
212#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ 215#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
213#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ 216#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
@@ -521,6 +524,7 @@
521 524
522#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ 525#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
523#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ 526#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
527#define HSRR1_DENORM 0x00100000 /* Denorm exception */
524 528
525#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */ 529#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */
526#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */ 530#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */
@@ -602,6 +606,10 @@
602#define POWER6_MMCRA_SIPR 0x0000020000000000ULL 606#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
603#define POWER6_MMCRA_THRM 0x00000020UL 607#define POWER6_MMCRA_THRM 0x00000020UL
604#define POWER6_MMCRA_OTHER 0x0000000EUL 608#define POWER6_MMCRA_OTHER 0x0000000EUL
609
610#define POWER7P_MMCRA_SIAR_VALID 0x10000000 /* P7+ SIAR contents valid */
611#define POWER7P_MMCRA_SDAR_VALID 0x08000000 /* P7+ SDAR contents valid */
612
605#define SPRN_PMC1 787 613#define SPRN_PMC1 787
606#define SPRN_PMC2 788 614#define SPRN_PMC2 788
607#define SPRN_PMC3 789 615#define SPRN_PMC3 789
@@ -761,7 +769,8 @@
761 * 64-bit embedded 769 * 64-bit embedded
762 * - SPRG0 generic exception scratch 770 * - SPRG0 generic exception scratch
763 * - SPRG2 TLB exception stack 771 * - SPRG2 TLB exception stack
764 * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible) 772 * - SPRG3 critical exception scratch and
773 * CPU and NUMA node for VDSO getcpu (user visible)
765 * - SPRG4 unused (user visible) 774 * - SPRG4 unused (user visible)
766 * - SPRG6 TLB miss scratch (user visible, sorry !) 775 * - SPRG6 TLB miss scratch (user visible, sorry !)
767 * - SPRG7 critical exception scratch 776 * - SPRG7 critical exception scratch
@@ -858,11 +867,12 @@
858 867
859#ifdef CONFIG_PPC_BOOK3E_64 868#ifdef CONFIG_PPC_BOOK3E_64
860#define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8 869#define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8
861#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG7 870#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG3
862#define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9 871#define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9
863#define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 872#define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2
864#define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 873#define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6
865#define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0 874#define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0
875#define SPRN_SPRG_GDBELL_SCRATCH SPRN_SPRG_GEN_SCRATCH
866 876
867#define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX 877#define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX
868#define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA 878#define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA
@@ -937,7 +947,7 @@
937#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ 947#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
938#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ 948#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
939 949
940#define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv)) 950#define pvr_version_is(pvr) (PVR_VER(mfspr(SPRN_PVR)) == (pvr))
941 951
942/* 952/*
943 * IBM has further subdivided the standard PowerPC 16-bit version and 953 * IBM has further subdivided the standard PowerPC 16-bit version and
@@ -1002,25 +1012,25 @@
1002#define PVR_476_ISS 0x00052000 1012#define PVR_476_ISS 0x00052000
1003 1013
1004/* 64-bit processors */ 1014/* 64-bit processors */
1005/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */ 1015#define PVR_NORTHSTAR 0x0033
1006#define PV_NORTHSTAR 0x0033 1016#define PVR_PULSAR 0x0034
1007#define PV_PULSAR 0x0034 1017#define PVR_POWER4 0x0035
1008#define PV_POWER4 0x0035 1018#define PVR_ICESTAR 0x0036
1009#define PV_ICESTAR 0x0036 1019#define PVR_SSTAR 0x0037
1010#define PV_SSTAR 0x0037 1020#define PVR_POWER4p 0x0038
1011#define PV_POWER4p 0x0038 1021#define PVR_970 0x0039
1012#define PV_970 0x0039 1022#define PVR_POWER5 0x003A
1013#define PV_POWER5 0x003A 1023#define PVR_POWER5p 0x003B
1014#define PV_POWER5p 0x003B 1024#define PVR_970FX 0x003C
1015#define PV_970FX 0x003C 1025#define PVR_POWER6 0x003E
1016#define PV_POWER6 0x003E 1026#define PVR_POWER7 0x003F
1017#define PV_POWER7 0x003F 1027#define PVR_630 0x0040
1018#define PV_630 0x0040 1028#define PVR_630p 0x0041
1019#define PV_630p 0x0041 1029#define PVR_970MP 0x0044
1020#define PV_970MP 0x0044 1030#define PVR_970GX 0x0045
1021#define PV_970GX 0x0045 1031#define PVR_POWER7p 0x004A
1022#define PV_BE 0x0070 1032#define PVR_BE 0x0070
1023#define PV_PA6T 0x0090 1033#define PVR_PA6T 0x0090
1024 1034
1025/* Macros for setting and retrieving special purpose registers */ 1035/* Macros for setting and retrieving special purpose registers */
1026#ifndef __ASSEMBLY__ 1036#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index d084ce195fc3..8b9a306260b2 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -9,7 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
9extern unsigned int rtas_data; 9extern unsigned int rtas_data;
10extern int mem_init_done; /* set on boot once kmalloc can be called */ 10extern int mem_init_done; /* set on boot once kmalloc can be called */
11extern int init_bootmem_done; /* set once bootmem is available */ 11extern int init_bootmem_done; /* set once bootmem is available */
12extern phys_addr_t memory_limit; 12extern unsigned long long memory_limit;
13extern unsigned long klimit; 13extern unsigned long klimit;
14extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); 14extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
15 15
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ebc24dc5b1a1..e807e9d8e3f7 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -65,6 +65,7 @@ int generic_cpu_disable(void);
65void generic_cpu_die(unsigned int cpu); 65void generic_cpu_die(unsigned int cpu);
66void generic_mach_cpu_die(void); 66void generic_mach_cpu_die(void);
67void generic_set_cpu_dead(unsigned int cpu); 67void generic_set_cpu_dead(unsigned int cpu);
68void generic_set_cpu_up(unsigned int cpu);
68int generic_check_cpu_restart(unsigned int cpu); 69int generic_check_cpu_restart(unsigned int cpu);
69#endif 70#endif
70 71
@@ -190,6 +191,7 @@ extern unsigned long __secondary_hold_spinloop;
190extern unsigned long __secondary_hold_acknowledge; 191extern unsigned long __secondary_hold_acknowledge;
191extern char __secondary_hold; 192extern char __secondary_hold;
192 193
194extern void __early_start(void);
193#endif /* __ASSEMBLY__ */ 195#endif /* __ASSEMBLY__ */
194 196
195#endif /* __KERNEL__ */ 197#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index 0c5fa3145615..f6fc0ee813d7 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -10,8 +10,8 @@
10 */ 10 */
11#define SECTION_SIZE_BITS 24 11#define SECTION_SIZE_BITS 24
12 12
13#define MAX_PHYSADDR_BITS 44 13#define MAX_PHYSADDR_BITS 46
14#define MAX_PHYSMEM_BITS 44 14#define MAX_PHYSMEM_BITS 46
15 15
16#endif /* CONFIG_SPARSEMEM */ 16#endif /* CONFIG_SPARSEMEM */
17 17
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
index 8979d4cd3d70..de99d6e29430 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -22,4 +22,10 @@ int __init swiotlb_setup_bus_notifier(void);
22 22
23extern void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev); 23extern void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev);
24 24
25#ifdef CONFIG_SWIOTLB
26void swiotlb_detect_4g(void);
27#else
28static inline void swiotlb_detect_4g(void) {}
29#endif
30
25#endif /* __ASM_SWIOTLB_H */ 31#endif /* __ASM_SWIOTLB_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index faf93529cbf0..8ceea14d6fe4 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -102,7 +102,10 @@ static inline struct thread_info *current_thread_info(void)
102#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ 102#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
103#define TIF_NOERROR 12 /* Force successful syscall return */ 103#define TIF_NOERROR 12 /* Force successful syscall return */
104#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ 104#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
105#define TIF_UPROBE 14 /* breakpointed or single-stepping */
105#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ 106#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
107#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
108 for stack store? */
106 109
107/* as above, but as bit values */ 110/* as above, but as bit values */
108#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -118,12 +121,14 @@ static inline struct thread_info *current_thread_info(void)
118#define _TIF_RESTOREALL (1<<TIF_RESTOREALL) 121#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
119#define _TIF_NOERROR (1<<TIF_NOERROR) 122#define _TIF_NOERROR (1<<TIF_NOERROR)
120#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 123#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
124#define _TIF_UPROBE (1<<TIF_UPROBE)
121#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 125#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
126#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
122#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 127#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
123 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) 128 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
124 129
125#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 130#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
126 _TIF_NOTIFY_RESUME) 131 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
127#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) 132#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
128 133
129/* Bits in local_flags */ 134/* Bits in local_flags */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 81143fcbd113..61a59271665b 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -95,7 +95,7 @@ struct ppc64_tlb_batch {
95 unsigned long index; 95 unsigned long index;
96 struct mm_struct *mm; 96 struct mm_struct *mm;
97 real_pte_t pte[PPC64_TLB_BATCH_NR]; 97 real_pte_t pte[PPC64_TLB_BATCH_NR];
98 unsigned long vaddr[PPC64_TLB_BATCH_NR]; 98 unsigned long vpn[PPC64_TLB_BATCH_NR];
99 unsigned int psize; 99 unsigned int psize;
100 int ssize; 100 int ssize;
101}; 101};
@@ -103,9 +103,6 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
103 103
104extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 104extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
105 105
106extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
107 pte_t *ptep, unsigned long pte, int huge);
108
109#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 106#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
110 107
111static inline void arch_enter_lazy_mmu_mode(void) 108static inline void arch_enter_lazy_mmu_mode(void)
@@ -127,7 +124,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
127#define arch_flush_lazy_mmu_mode() do {} while (0) 124#define arch_flush_lazy_mmu_mode() do {} while (0)
128 125
129 126
130extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, 127extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
131 int ssize, int local); 128 int ssize, int local);
132extern void flush_hash_range(unsigned long number, int local); 129extern void flush_hash_range(unsigned long number, int local);
133 130
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 17bb40cad5bf..4db49590acf5 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -98,11 +98,6 @@ struct exception_table_entry {
98 * PowerPC, we can just do these as direct assignments. (Of course, the 98 * PowerPC, we can just do these as direct assignments. (Of course, the
99 * exception handling means that it's no longer "just"...) 99 * exception handling means that it's no longer "just"...)
100 * 100 *
101 * The "user64" versions of the user access functions are versions that
102 * allow access of 64-bit data. The "get_user" functions do not
103 * properly handle 64-bit data because the value gets down cast to a long.
104 * The "put_user" functions already handle 64-bit data properly but we add
105 * "user64" versions for completeness
106 */ 101 */
107#define get_user(x, ptr) \ 102#define get_user(x, ptr) \
108 __get_user_check((x), (ptr), sizeof(*(ptr))) 103 __get_user_check((x), (ptr), sizeof(*(ptr)))
@@ -114,12 +109,6 @@ struct exception_table_entry {
114#define __put_user(x, ptr) \ 109#define __put_user(x, ptr) \
115 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 110 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
116 111
117#ifndef __powerpc64__
118#define __get_user64(x, ptr) \
119 __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
120#define __put_user64(x, ptr) __put_user(x, ptr)
121#endif
122
123#define __get_user_inatomic(x, ptr) \ 112#define __get_user_inatomic(x, ptr) \
124 __get_user_nosleep((x), (ptr), sizeof(*(ptr))) 113 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
125#define __put_user_inatomic(x, ptr) \ 114#define __put_user_inatomic(x, ptr) \
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
new file mode 100644
index 000000000000..b532060d0916
--- /dev/null
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -0,0 +1,54 @@
1#ifndef _ASM_UPROBES_H
2#define _ASM_UPROBES_H
3/*
4 * User-space Probes (UProbes) for powerpc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright IBM Corporation, 2007-2012
21 *
22 * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
23 */
24
25#include <linux/notifier.h>
26#include <asm/probes.h>
27
28typedef ppc_opcode_t uprobe_opcode_t;
29
30#define MAX_UINSN_BYTES 4
31#define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES)
32
33/* The following alias is needed for reference from arch-agnostic code */
34#define UPROBE_SWBP_INSN BREAKPOINT_INSTRUCTION
35#define UPROBE_SWBP_INSN_SIZE 4 /* swbp insn size in bytes */
36
37struct arch_uprobe {
38 union {
39 u8 insn[MAX_UINSN_BYTES];
40 u32 ainsn;
41 };
42};
43
44struct arch_uprobe_task {
45 unsigned long saved_trap_nr;
46};
47
48extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
49extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
50extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
51extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
52extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
53extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
54#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index bb282dd81612..cde12f8a4ebc 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_MODULES) += ppc_ksyms.o
96obj-$(CONFIG_BOOTX_TEXT) += btext.o 96obj-$(CONFIG_BOOTX_TEXT) += btext.o
97obj-$(CONFIG_SMP) += smp.o 97obj-$(CONFIG_SMP) += smp.o
98obj-$(CONFIG_KPROBES) += kprobes.o 98obj-$(CONFIG_KPROBES) += kprobes.o
99obj-$(CONFIG_UPROBES) += uprobes.o
99obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o 100obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
100obj-$(CONFIG_STACKTRACE) += stacktrace.o 101obj-$(CONFIG_STACKTRACE) += stacktrace.o
101obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o 102obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e8995727b1c1..7523539cfe9f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -206,6 +206,7 @@ int main(void)
206 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 206 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
207 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 207 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
208 DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); 208 DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
209 DEFINE(PACA_SPRG3, offsetof(struct paca_struct, sprg3));
209#endif /* CONFIG_PPC64 */ 210#endif /* CONFIG_PPC64 */
210 211
211 /* RTAS */ 212 /* RTAS */
@@ -534,7 +535,6 @@ int main(void)
534 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); 535 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
535 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); 536 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
536 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); 537 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
537 HSTATE_FIELD(HSTATE_SPRG3, sprg3);
538 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); 538 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
539 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); 539 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
540 HSTATE_FIELD(HSTATE_NAPPING, napping); 540 HSTATE_FIELD(HSTATE_NAPPING, napping);
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 69fdd2322a66..dcd881937f7a 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -16,6 +16,8 @@
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/cputable.h> 17#include <asm/cputable.h>
18#include <asm/ppc_asm.h> 18#include <asm/ppc_asm.h>
19#include <asm/mmu-book3e.h>
20#include <asm/asm-offsets.h>
19 21
20_GLOBAL(__e500_icache_setup) 22_GLOBAL(__e500_icache_setup)
21 mfspr r0, SPRN_L1CSR1 23 mfspr r0, SPRN_L1CSR1
@@ -73,27 +75,81 @@ _GLOBAL(__setup_cpu_e500v2)
73 mtlr r4 75 mtlr r4
74 blr 76 blr
75_GLOBAL(__setup_cpu_e500mc) 77_GLOBAL(__setup_cpu_e500mc)
76 mr r5, r4 78_GLOBAL(__setup_cpu_e5500)
77 mflr r4 79 mflr r5
78 bl __e500_icache_setup 80 bl __e500_icache_setup
79 bl __e500_dcache_setup 81 bl __e500_dcache_setup
80 bl __setup_e500mc_ivors 82 bl __setup_e500mc_ivors
81 mtlr r4 83 /*
84 * We only want to touch IVOR38-41 if we're running on hardware
85 * that supports category E.HV. The architectural way to determine
86 * this is MMUCFG[LPIDSIZE].
87 */
88 mfspr r3, SPRN_MMUCFG
89 rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
90 beq 1f
91 bl __setup_ehv_ivors
92 b 2f
931:
94 lwz r3, CPU_SPEC_FEATURES(r4)
95 /* We need this check as cpu_setup is also called for
96 * the secondary cores. So, if we have already cleared
97 * the feature on the primary core, avoid doing it on the
98 * secondary core.
99 */
100 andis. r6, r3, CPU_FTR_EMB_HV@h
101 beq 2f
102 rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
103 stw r3, CPU_SPEC_FEATURES(r4)
1042:
105 mtlr r5
82 blr 106 blr
83#endif 107#endif
84/* Right now, restore and setup are the same thing */ 108
109#ifdef CONFIG_PPC_BOOK3E_64
85_GLOBAL(__restore_cpu_e5500) 110_GLOBAL(__restore_cpu_e5500)
86_GLOBAL(__setup_cpu_e5500)
87 mflr r4 111 mflr r4
88 bl __e500_icache_setup 112 bl __e500_icache_setup
89 bl __e500_dcache_setup 113 bl __e500_dcache_setup
90#ifdef CONFIG_PPC_BOOK3E_64
91 bl .__setup_base_ivors 114 bl .__setup_base_ivors
92 bl .setup_perfmon_ivor 115 bl .setup_perfmon_ivor
93 bl .setup_doorbell_ivors 116 bl .setup_doorbell_ivors
117 /*
118 * We only want to touch IVOR38-41 if we're running on hardware
119 * that supports category E.HV. The architectural way to determine
120 * this is MMUCFG[LPIDSIZE].
121 */
122 mfspr r10,SPRN_MMUCFG
123 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
124 beq 1f
94 bl .setup_ehv_ivors 125 bl .setup_ehv_ivors
95#else 1261:
96 bl __setup_e500mc_ivors
97#endif
98 mtlr r4 127 mtlr r4
99 blr 128 blr
129
130_GLOBAL(__setup_cpu_e5500)
131 mflr r5
132 bl __e500_icache_setup
133 bl __e500_dcache_setup
134 bl .__setup_base_ivors
135 bl .setup_perfmon_ivor
136 bl .setup_doorbell_ivors
137 /*
138 * We only want to touch IVOR38-41 if we're running on hardware
139 * that supports category E.HV. The architectural way to determine
140 * this is MMUCFG[LPIDSIZE].
141 */
142 mfspr r10,SPRN_MMUCFG
143 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
144 beq 1f
145 bl .setup_ehv_ivors
146 b 2f
1471:
148 ld r10,CPU_SPEC_FEATURES(r4)
149 LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
150 andc r10,r10,r9
151 std r10,CPU_SPEC_FEATURES(r4)
1522:
153 mtlr r5
154 blr
155#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 455faa389876..0514c21f138b 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2016,7 +2016,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
2016 .oprofile_cpu_type = "ppc/e500mc", 2016 .oprofile_cpu_type = "ppc/e500mc",
2017 .oprofile_type = PPC_OPROFILE_FSL_EMB, 2017 .oprofile_type = PPC_OPROFILE_FSL_EMB,
2018 .cpu_setup = __setup_cpu_e5500, 2018 .cpu_setup = __setup_cpu_e5500,
2019#ifndef CONFIG_PPC32
2019 .cpu_restore = __restore_cpu_e5500, 2020 .cpu_restore = __restore_cpu_e5500,
2021#endif
2020 .machine_check = machine_check_e500mc, 2022 .machine_check = machine_check_e500mc,
2021 .platform = "ppce5500", 2023 .platform = "ppce5500",
2022 }, 2024 },
@@ -2034,7 +2036,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
2034 .oprofile_cpu_type = "ppc/e6500", 2036 .oprofile_cpu_type = "ppc/e6500",
2035 .oprofile_type = PPC_OPROFILE_FSL_EMB, 2037 .oprofile_type = PPC_OPROFILE_FSL_EMB,
2036 .cpu_setup = __setup_cpu_e5500, 2038 .cpu_setup = __setup_cpu_e5500,
2039#ifndef CONFIG_PPC32
2037 .cpu_restore = __restore_cpu_e5500, 2040 .cpu_restore = __restore_cpu_e5500,
2041#endif
2038 .machine_check = machine_check_e500mc, 2042 .machine_check = machine_check_e500mc,
2039 .platform = "ppce6500", 2043 .platform = "ppce6500",
2040 }, 2044 },
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 46943651da23..bd1a2aba599f 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
15#include <linux/memblock.h>
15#include <linux/pfn.h> 16#include <linux/pfn.h>
16#include <linux/of_platform.h> 17#include <linux/of_platform.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
@@ -20,7 +21,6 @@
20#include <asm/machdep.h> 21#include <asm/machdep.h>
21#include <asm/swiotlb.h> 22#include <asm/swiotlb.h>
22#include <asm/dma.h> 23#include <asm/dma.h>
23#include <asm/abs_addr.h>
24 24
25unsigned int ppc_swiotlb_enable; 25unsigned int ppc_swiotlb_enable;
26 26
@@ -105,3 +105,23 @@ int __init swiotlb_setup_bus_notifier(void)
105 &ppc_swiotlb_plat_bus_notifier); 105 &ppc_swiotlb_plat_bus_notifier);
106 return 0; 106 return 0;
107} 107}
108
109void swiotlb_detect_4g(void)
110{
111 if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
112 ppc_swiotlb_enable = 1;
113}
114
115static int __init swiotlb_late_init(void)
116{
117 if (ppc_swiotlb_enable) {
118 swiotlb_print_info();
119 set_pci_dma_ops(&swiotlb_dma_ops);
120 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
121 } else {
122 swiotlb_free();
123 }
124
125 return 0;
126}
127subsys_initcall(swiotlb_late_init);
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 355b9d84b0f8..8032b97ccdcb 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -14,7 +14,6 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <asm/vio.h> 15#include <asm/vio.h>
16#include <asm/bug.h> 16#include <asm/bug.h>
17#include <asm/abs_addr.h>
18#include <asm/machdep.h> 17#include <asm/machdep.h>
19 18
20/* 19/*
@@ -50,7 +49,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
50 return NULL; 49 return NULL;
51 ret = page_address(page); 50 ret = page_address(page);
52 memset(ret, 0, size); 51 memset(ret, 0, size);
53 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev); 52 *dma_handle = __pa(ret) + get_dma_offset(dev);
54 53
55 return ret; 54 return ret;
56#endif 55#endif
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ead5016b02d0..af37528da49f 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -831,19 +831,56 @@ restore_user:
831 bnel- load_dbcr0 831 bnel- load_dbcr0
832#endif 832#endif
833 833
834#ifdef CONFIG_PREEMPT
835 b restore 834 b restore
836 835
837/* N.B. the only way to get here is from the beq following ret_from_except. */ 836/* N.B. the only way to get here is from the beq following ret_from_except. */
838resume_kernel: 837resume_kernel:
839 /* check current_thread_info->preempt_count */ 838 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
840 CURRENT_THREAD_INFO(r9, r1) 839 CURRENT_THREAD_INFO(r9, r1)
840 lwz r8,TI_FLAGS(r9)
841 andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
842 beq+ 1f
843
844 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
845
846 lwz r3,GPR1(r1)
847 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
848 mr r4,r1 /* src: current exception frame */
849 mr r1,r3 /* Reroute the trampoline frame to r1 */
850
851 /* Copy from the original to the trampoline. */
852 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
853 li r6,0 /* start offset: 0 */
854 mtctr r5
8552: lwzx r0,r6,r4
856 stwx r0,r6,r3
857 addi r6,r6,4
858 bdnz 2b
859
860 /* Do real store operation to complete stwu */
861 lwz r5,GPR1(r1)
862 stw r8,0(r5)
863
864 /* Clear _TIF_EMULATE_STACK_STORE flag */
865 lis r11,_TIF_EMULATE_STACK_STORE@h
866 addi r5,r9,TI_FLAGS
8670: lwarx r8,0,r5
868 andc r8,r8,r11
869#ifdef CONFIG_IBM405_ERR77
870 dcbt 0,r5
871#endif
872 stwcx. r8,0,r5
873 bne- 0b
8741:
875
876#ifdef CONFIG_PREEMPT
877 /* check current_thread_info->preempt_count */
841 lwz r0,TI_PREEMPT(r9) 878 lwz r0,TI_PREEMPT(r9)
842 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ 879 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
843 bne restore 880 bne restore
844 lwz r0,TI_FLAGS(r9) 881 andi. r8,r8,_TIF_NEED_RESCHED
845 andi. r0,r0,_TIF_NEED_RESCHED
846 beq+ restore 882 beq+ restore
883 lwz r3,_MSR(r1)
847 andi. r0,r3,MSR_EE /* interrupts off? */ 884 andi. r0,r3,MSR_EE /* interrupts off? */
848 beq restore /* don't schedule if so */ 885 beq restore /* don't schedule if so */
849#ifdef CONFIG_TRACE_IRQFLAGS 886#ifdef CONFIG_TRACE_IRQFLAGS
@@ -864,8 +901,6 @@ resume_kernel:
864 */ 901 */
865 bl trace_hardirqs_on 902 bl trace_hardirqs_on
866#endif 903#endif
867#else
868resume_kernel:
869#endif /* CONFIG_PREEMPT */ 904#endif /* CONFIG_PREEMPT */
870 905
871 /* interrupts are hard-disabled at this point */ 906 /* interrupts are hard-disabled at this point */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index b40e0b4815b3..0e931aaffca2 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -593,6 +593,41 @@ _GLOBAL(ret_from_except_lite)
593 b .ret_from_except 593 b .ret_from_except
594 594
595resume_kernel: 595resume_kernel:
596 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
597 CURRENT_THREAD_INFO(r9, r1)
598 ld r8,TI_FLAGS(r9)
599 andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
600 beq+ 1f
601
602 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
603
604 lwz r3,GPR1(r1)
605 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
606 mr r4,r1 /* src: current exception frame */
607 mr r1,r3 /* Reroute the trampoline frame to r1 */
608
609 /* Copy from the original to the trampoline. */
610 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
611 li r6,0 /* start offset: 0 */
612 mtctr r5
6132: ldx r0,r6,r4
614 stdx r0,r6,r3
615 addi r6,r6,8
616 bdnz 2b
617
618 /* Do real store operation to complete stwu */
619 lwz r5,GPR1(r1)
620 std r8,0(r5)
621
622 /* Clear _TIF_EMULATE_STACK_STORE flag */
623 lis r11,_TIF_EMULATE_STACK_STORE@h
624 addi r5,r9,TI_FLAGS
625 ldarx r4,0,r5
626 andc r4,r4,r11
627 stdcx. r4,0,r5
628 bne- 0b
6291:
630
596#ifdef CONFIG_PREEMPT 631#ifdef CONFIG_PREEMPT
597 /* Check if we need to preempt */ 632 /* Check if we need to preempt */
598 andi. r0,r4,_TIF_NEED_RESCHED 633 andi. r0,r4,_TIF_NEED_RESCHED
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 98be7f0cd227..4684e33a26c3 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -25,6 +25,8 @@
25#include <asm/ppc-opcode.h> 25#include <asm/ppc-opcode.h>
26#include <asm/mmu.h> 26#include <asm/mmu.h>
27#include <asm/hw_irq.h> 27#include <asm/hw_irq.h>
28#include <asm/kvm_asm.h>
29#include <asm/kvm_booke_hv_asm.h>
28 30
29/* XXX This will ultimately add space for a special exception save 31/* XXX This will ultimately add space for a special exception save
30 * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... 32 * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -35,16 +37,18 @@
35#define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE 37#define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE
36 38
37/* Exception prolog code for all exceptions */ 39/* Exception prolog code for all exceptions */
38#define EXCEPTION_PROLOG(n, type, addition) \ 40#define EXCEPTION_PROLOG(n, intnum, type, addition) \
39 mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ 41 mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \
40 mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ 42 mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \
41 std r10,PACA_EX##type+EX_R10(r13); \ 43 std r10,PACA_EX##type+EX_R10(r13); \
42 std r11,PACA_EX##type+EX_R11(r13); \ 44 std r11,PACA_EX##type+EX_R11(r13); \
45 PROLOG_STORE_RESTORE_SCRATCH_##type; \
43 mfcr r10; /* save CR */ \ 46 mfcr r10; /* save CR */ \
47 mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
48 DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \
49 stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
44 addition; /* additional code for that exc. */ \ 50 addition; /* additional code for that exc. */ \
45 std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ 51 std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \
46 stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
47 mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
48 type##_SET_KSTACK; /* get special stack if necessary */\ 52 type##_SET_KSTACK; /* get special stack if necessary */\
49 andi. r10,r11,MSR_PR; /* save stack pointer */ \ 53 andi. r10,r11,MSR_PR; /* save stack pointer */ \
50 beq 1f; /* branch around if supervisor */ \ 54 beq 1f; /* branch around if supervisor */ \
@@ -59,6 +63,10 @@
59#define SPRN_GEN_SRR0 SPRN_SRR0 63#define SPRN_GEN_SRR0 SPRN_SRR0
60#define SPRN_GEN_SRR1 SPRN_SRR1 64#define SPRN_GEN_SRR1 SPRN_SRR1
61 65
66#define GDBELL_SET_KSTACK GEN_SET_KSTACK
67#define SPRN_GDBELL_SRR0 SPRN_GSRR0
68#define SPRN_GDBELL_SRR1 SPRN_GSRR1
69
62#define CRIT_SET_KSTACK \ 70#define CRIT_SET_KSTACK \
63 ld r1,PACA_CRIT_STACK(r13); \ 71 ld r1,PACA_CRIT_STACK(r13); \
64 subi r1,r1,SPECIAL_EXC_FRAME_SIZE; 72 subi r1,r1,SPECIAL_EXC_FRAME_SIZE;
@@ -77,29 +85,46 @@
77#define SPRN_MC_SRR0 SPRN_MCSRR0 85#define SPRN_MC_SRR0 SPRN_MCSRR0
78#define SPRN_MC_SRR1 SPRN_MCSRR1 86#define SPRN_MC_SRR1 SPRN_MCSRR1
79 87
80#define NORMAL_EXCEPTION_PROLOG(n, addition) \ 88#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
81 EXCEPTION_PROLOG(n, GEN, addition##_GEN(n)) 89 EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
90
91#define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \
92 EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))
82 93
83#define CRIT_EXCEPTION_PROLOG(n, addition) \ 94#define DBG_EXCEPTION_PROLOG(n, intnum, addition) \
84 EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n)) 95 EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))
85 96
86#define DBG_EXCEPTION_PROLOG(n, addition) \ 97#define MC_EXCEPTION_PROLOG(n, intnum, addition) \
87 EXCEPTION_PROLOG(n, DBG, addition##_DBG(n)) 98 EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))
88 99
89#define MC_EXCEPTION_PROLOG(n, addition) \ 100#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \
90 EXCEPTION_PROLOG(n, MC, addition##_MC(n)) 101 EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))
91 102
103/*
104 * Store user-visible scratch in PACA exception slots and restore proper value
105 */
106#define PROLOG_STORE_RESTORE_SCRATCH_GEN
107#define PROLOG_STORE_RESTORE_SCRATCH_GDBELL
108#define PROLOG_STORE_RESTORE_SCRATCH_DBG
109#define PROLOG_STORE_RESTORE_SCRATCH_MC
110
111#define PROLOG_STORE_RESTORE_SCRATCH_CRIT \
112 mfspr r10,SPRN_SPRG_CRIT_SCRATCH; /* get r13 */ \
113 std r10,PACA_EXCRIT+EX_R13(r13); \
114 ld r11,PACA_SPRG3(r13); \
115 mtspr SPRN_SPRG_CRIT_SCRATCH,r11;
92 116
93/* Variants of the "addition" argument for the prolog 117/* Variants of the "addition" argument for the prolog
94 */ 118 */
95#define PROLOG_ADDITION_NONE_GEN(n) 119#define PROLOG_ADDITION_NONE_GEN(n)
120#define PROLOG_ADDITION_NONE_GDBELL(n)
96#define PROLOG_ADDITION_NONE_CRIT(n) 121#define PROLOG_ADDITION_NONE_CRIT(n)
97#define PROLOG_ADDITION_NONE_DBG(n) 122#define PROLOG_ADDITION_NONE_DBG(n)
98#define PROLOG_ADDITION_NONE_MC(n) 123#define PROLOG_ADDITION_NONE_MC(n)
99 124
100#define PROLOG_ADDITION_MASKABLE_GEN(n) \ 125#define PROLOG_ADDITION_MASKABLE_GEN(n) \
101 lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ 126 lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
102 cmpwi cr0,r11,0; /* yes -> go out of line */ \ 127 cmpwi cr0,r10,0; /* yes -> go out of line */ \
103 beq masked_interrupt_book3e_##n 128 beq masked_interrupt_book3e_##n
104 129
105#define PROLOG_ADDITION_2REGS_GEN(n) \ 130#define PROLOG_ADDITION_2REGS_GEN(n) \
@@ -233,9 +258,9 @@ exc_##n##_bad_stack: \
2331: 2581:
234 259
235 260
236#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ 261#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \
237 START_EXCEPTION(label); \ 262 START_EXCEPTION(label); \
238 NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ 263 NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
239 EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \ 264 EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \
240 ack(r8); \ 265 ack(r8); \
241 CHECK_NAPPING(); \ 266 CHECK_NAPPING(); \
@@ -286,7 +311,8 @@ interrupt_end_book3e:
286 311
287/* Critical Input Interrupt */ 312/* Critical Input Interrupt */
288 START_EXCEPTION(critical_input); 313 START_EXCEPTION(critical_input);
289 CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) 314 CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
315 PROLOG_ADDITION_NONE)
290// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE) 316// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE)
291// bl special_reg_save_crit 317// bl special_reg_save_crit
292// CHECK_NAPPING(); 318// CHECK_NAPPING();
@@ -297,7 +323,8 @@ interrupt_end_book3e:
297 323
298/* Machine Check Interrupt */ 324/* Machine Check Interrupt */
299 START_EXCEPTION(machine_check); 325 START_EXCEPTION(machine_check);
300 CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) 326 MC_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_MACHINE_CHECK,
327 PROLOG_ADDITION_NONE)
301// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE) 328// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE)
302// bl special_reg_save_mc 329// bl special_reg_save_mc
303// addi r3,r1,STACK_FRAME_OVERHEAD 330// addi r3,r1,STACK_FRAME_OVERHEAD
@@ -308,7 +335,8 @@ interrupt_end_book3e:
308 335
309/* Data Storage Interrupt */ 336/* Data Storage Interrupt */
310 START_EXCEPTION(data_storage) 337 START_EXCEPTION(data_storage)
311 NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) 338 NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,
339 PROLOG_ADDITION_2REGS)
312 mfspr r14,SPRN_DEAR 340 mfspr r14,SPRN_DEAR
313 mfspr r15,SPRN_ESR 341 mfspr r15,SPRN_ESR
314 EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE) 342 EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE)
@@ -316,18 +344,21 @@ interrupt_end_book3e:
316 344
317/* Instruction Storage Interrupt */ 345/* Instruction Storage Interrupt */
318 START_EXCEPTION(instruction_storage); 346 START_EXCEPTION(instruction_storage);
319 NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) 347 NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,
348 PROLOG_ADDITION_2REGS)
320 li r15,0 349 li r15,0
321 mr r14,r10 350 mr r14,r10
322 EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE) 351 EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE)
323 b storage_fault_common 352 b storage_fault_common
324 353
325/* External Input Interrupt */ 354/* External Input Interrupt */
326 MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE) 355 MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
356 external_input, .do_IRQ, ACK_NONE)
327 357
328/* Alignment */ 358/* Alignment */
329 START_EXCEPTION(alignment); 359 START_EXCEPTION(alignment);
330 NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS) 360 NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
361 PROLOG_ADDITION_2REGS)
331 mfspr r14,SPRN_DEAR 362 mfspr r14,SPRN_DEAR
332 mfspr r15,SPRN_ESR 363 mfspr r15,SPRN_ESR
333 EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP) 364 EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP)
@@ -335,7 +366,8 @@ interrupt_end_book3e:
335 366
336/* Program Interrupt */ 367/* Program Interrupt */
337 START_EXCEPTION(program); 368 START_EXCEPTION(program);
338 NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) 369 NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
370 PROLOG_ADDITION_1REG)
339 mfspr r14,SPRN_ESR 371 mfspr r14,SPRN_ESR
340 EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE) 372 EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE)
341 std r14,_DSISR(r1) 373 std r14,_DSISR(r1)
@@ -347,7 +379,8 @@ interrupt_end_book3e:
347 379
348/* Floating Point Unavailable Interrupt */ 380/* Floating Point Unavailable Interrupt */
349 START_EXCEPTION(fp_unavailable); 381 START_EXCEPTION(fp_unavailable);
350 NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE) 382 NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,
383 PROLOG_ADDITION_NONE)
351 /* we can probably do a shorter exception entry for that one... */ 384 /* we can probably do a shorter exception entry for that one... */
352 EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) 385 EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP)
353 ld r12,_MSR(r1) 386 ld r12,_MSR(r1)
@@ -362,14 +395,17 @@ interrupt_end_book3e:
362 b .ret_from_except 395 b .ret_from_except
363 396
364/* Decrementer Interrupt */ 397/* Decrementer Interrupt */
365 MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC) 398 MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
399 decrementer, .timer_interrupt, ACK_DEC)
366 400
367/* Fixed Interval Timer Interrupt */ 401/* Fixed Interval Timer Interrupt */
368 MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT) 402 MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
403 fixed_interval, .unknown_exception, ACK_FIT)
369 404
370/* Watchdog Timer Interrupt */ 405/* Watchdog Timer Interrupt */
371 START_EXCEPTION(watchdog); 406 START_EXCEPTION(watchdog);
372 CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) 407 CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
408 PROLOG_ADDITION_NONE)
373// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE) 409// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE)
374// bl special_reg_save_crit 410// bl special_reg_save_crit
375// CHECK_NAPPING(); 411// CHECK_NAPPING();
@@ -388,7 +424,8 @@ interrupt_end_book3e:
388 424
389/* Auxiliary Processor Unavailable Interrupt */ 425/* Auxiliary Processor Unavailable Interrupt */
390 START_EXCEPTION(ap_unavailable); 426 START_EXCEPTION(ap_unavailable);
391 NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) 427 NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
428 PROLOG_ADDITION_NONE)
392 EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE) 429 EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE)
393 bl .save_nvgprs 430 bl .save_nvgprs
394 addi r3,r1,STACK_FRAME_OVERHEAD 431 addi r3,r1,STACK_FRAME_OVERHEAD
@@ -397,7 +434,8 @@ interrupt_end_book3e:
397 434
398/* Debug exception as a critical interrupt*/ 435/* Debug exception as a critical interrupt*/
399 START_EXCEPTION(debug_crit); 436 START_EXCEPTION(debug_crit);
400 CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) 437 CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
438 PROLOG_ADDITION_2REGS)
401 439
402 /* 440 /*
403 * If there is a single step or branch-taken exception in an 441 * If there is a single step or branch-taken exception in an
@@ -431,7 +469,7 @@ interrupt_end_book3e:
431 mtcr r10 469 mtcr r10
432 ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ 470 ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */
433 ld r11,PACA_EXCRIT+EX_R11(r13) 471 ld r11,PACA_EXCRIT+EX_R11(r13)
434 mfspr r13,SPRN_SPRG_CRIT_SCRATCH 472 ld r13,PACA_EXCRIT+EX_R13(r13)
435 rfci 473 rfci
436 474
437 /* Normal debug exception */ 475 /* Normal debug exception */
@@ -444,7 +482,7 @@ interrupt_end_book3e:
444 /* Now we mash up things to make it look like we are coming on a 482 /* Now we mash up things to make it look like we are coming on a
445 * normal exception 483 * normal exception
446 */ 484 */
447 mfspr r15,SPRN_SPRG_CRIT_SCRATCH 485 ld r15,PACA_EXCRIT+EX_R13(r13)
448 mtspr SPRN_SPRG_GEN_SCRATCH,r15 486 mtspr SPRN_SPRG_GEN_SCRATCH,r15
449 mfspr r14,SPRN_DBSR 487 mfspr r14,SPRN_DBSR
450 EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE) 488 EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE)
@@ -462,7 +500,8 @@ kernel_dbg_exc:
462 500
463/* Debug exception as a debug interrupt*/ 501/* Debug exception as a debug interrupt*/
464 START_EXCEPTION(debug_debug); 502 START_EXCEPTION(debug_debug);
465 DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS) 503 DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
504 PROLOG_ADDITION_2REGS)
466 505
467 /* 506 /*
468 * If there is a single step or branch-taken exception in an 507 * If there is a single step or branch-taken exception in an
@@ -523,18 +562,21 @@ kernel_dbg_exc:
523 b .ret_from_except 562 b .ret_from_except
524 563
525 START_EXCEPTION(perfmon); 564 START_EXCEPTION(perfmon);
526 NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE) 565 NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
566 PROLOG_ADDITION_NONE)
527 EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) 567 EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE)
528 addi r3,r1,STACK_FRAME_OVERHEAD 568 addi r3,r1,STACK_FRAME_OVERHEAD
529 bl .performance_monitor_exception 569 bl .performance_monitor_exception
530 b .ret_from_except_lite 570 b .ret_from_except_lite
531 571
532/* Doorbell interrupt */ 572/* Doorbell interrupt */
533 MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE) 573 MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
574 doorbell, .doorbell_exception, ACK_NONE)
534 575
535/* Doorbell critical Interrupt */ 576/* Doorbell critical Interrupt */
536 START_EXCEPTION(doorbell_crit); 577 START_EXCEPTION(doorbell_crit);
537 CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE) 578 CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
579 PROLOG_ADDITION_NONE)
538// EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE) 580// EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE)
539// bl special_reg_save_crit 581// bl special_reg_save_crit
540// CHECK_NAPPING(); 582// CHECK_NAPPING();
@@ -543,12 +585,24 @@ kernel_dbg_exc:
543// b ret_from_crit_except 585// b ret_from_crit_except
544 b . 586 b .
545 587
546/* Guest Doorbell */ 588/*
547 MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) 589 * Guest doorbell interrupt
590 * This general exception use GSRRx save/restore registers
591 */
592 START_EXCEPTION(guest_doorbell);
593 GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
594 PROLOG_ADDITION_NONE)
595 EXCEPTION_COMMON(0x2c0, PACA_EXGEN, INTS_KEEP)
596 addi r3,r1,STACK_FRAME_OVERHEAD
597 bl .save_nvgprs
598 INTS_RESTORE_HARD
599 bl .unknown_exception
600 b .ret_from_except
548 601
549/* Guest Doorbell critical Interrupt */ 602/* Guest Doorbell critical Interrupt */
550 START_EXCEPTION(guest_doorbell_crit); 603 START_EXCEPTION(guest_doorbell_crit);
551 CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE) 604 CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
605 PROLOG_ADDITION_NONE)
552// EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE) 606// EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE)
553// bl special_reg_save_crit 607// bl special_reg_save_crit
554// CHECK_NAPPING(); 608// CHECK_NAPPING();
@@ -559,7 +613,8 @@ kernel_dbg_exc:
559 613
560/* Hypervisor call */ 614/* Hypervisor call */
561 START_EXCEPTION(hypercall); 615 START_EXCEPTION(hypercall);
562 NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE) 616 NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
617 PROLOG_ADDITION_NONE)
563 EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP) 618 EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP)
564 addi r3,r1,STACK_FRAME_OVERHEAD 619 addi r3,r1,STACK_FRAME_OVERHEAD
565 bl .save_nvgprs 620 bl .save_nvgprs
@@ -569,7 +624,8 @@ kernel_dbg_exc:
569 624
570/* Embedded Hypervisor priviledged */ 625/* Embedded Hypervisor priviledged */
571 START_EXCEPTION(ehpriv); 626 START_EXCEPTION(ehpriv);
572 NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE) 627 NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
628 PROLOG_ADDITION_NONE)
573 EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP) 629 EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP)
574 addi r3,r1,STACK_FRAME_OVERHEAD 630 addi r3,r1,STACK_FRAME_OVERHEAD
575 bl .save_nvgprs 631 bl .save_nvgprs
@@ -582,44 +638,42 @@ kernel_dbg_exc:
582 * accordingly and if the interrupt is level sensitive, we hard disable 638 * accordingly and if the interrupt is level sensitive, we hard disable
583 */ 639 */
584 640
641.macro masked_interrupt_book3e paca_irq full_mask
642 lbz r10,PACAIRQHAPPENED(r13)
643 ori r10,r10,\paca_irq
644 stb r10,PACAIRQHAPPENED(r13)
645
646 .if \full_mask == 1
647 rldicl r10,r11,48,1 /* clear MSR_EE */
648 rotldi r11,r10,16
649 mtspr SPRN_SRR1,r11
650 .endif
651
652 lwz r11,PACA_EXGEN+EX_CR(r13)
653 mtcr r11
654 ld r10,PACA_EXGEN+EX_R10(r13)
655 ld r11,PACA_EXGEN+EX_R11(r13)
656 mfspr r13,SPRN_SPRG_GEN_SCRATCH
657 rfi
658 b .
659.endm
660
585masked_interrupt_book3e_0x500: 661masked_interrupt_book3e_0x500:
586 /* XXX When adding support for EPR, use PACA_IRQ_EE_EDGE */ 662 // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE
587 li r11,PACA_IRQ_EE 663 masked_interrupt_book3e PACA_IRQ_EE 1
588 b masked_interrupt_book3e_full_mask
589 664
590masked_interrupt_book3e_0x900: 665masked_interrupt_book3e_0x900:
591 ACK_DEC(r11); 666 ACK_DEC(r10);
592 li r11,PACA_IRQ_DEC 667 masked_interrupt_book3e PACA_IRQ_DEC 0
593 b masked_interrupt_book3e_no_mask 668
594masked_interrupt_book3e_0x980: 669masked_interrupt_book3e_0x980:
595 ACK_FIT(r11); 670 ACK_FIT(r10);
596 li r11,PACA_IRQ_DEC 671 masked_interrupt_book3e PACA_IRQ_DEC 0
597 b masked_interrupt_book3e_no_mask 672
598masked_interrupt_book3e_0x280: 673masked_interrupt_book3e_0x280:
599masked_interrupt_book3e_0x2c0: 674masked_interrupt_book3e_0x2c0:
600 li r11,PACA_IRQ_DBELL 675 masked_interrupt_book3e PACA_IRQ_DBELL 0
601 b masked_interrupt_book3e_no_mask
602 676
603masked_interrupt_book3e_no_mask:
604 mtcr r10
605 lbz r10,PACAIRQHAPPENED(r13)
606 or r10,r10,r11
607 stb r10,PACAIRQHAPPENED(r13)
608 b 1f
609masked_interrupt_book3e_full_mask:
610 mtcr r10
611 lbz r10,PACAIRQHAPPENED(r13)
612 or r10,r10,r11
613 stb r10,PACAIRQHAPPENED(r13)
614 mfspr r10,SPRN_SRR1
615 rldicl r11,r10,48,1 /* clear MSR_EE */
616 rotldi r10,r11,16
617 mtspr SPRN_SRR1,r10
6181: ld r10,PACA_EXGEN+EX_R10(r13);
619 ld r11,PACA_EXGEN+EX_R11(r13);
620 mfspr r13,SPRN_SPRG_GEN_SCRATCH;
621 rfi
622 b .
623/* 677/*
624 * Called from arch_local_irq_enable when an interrupt needs 678 * Called from arch_local_irq_enable when an interrupt needs
625 * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280 679 * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
@@ -1302,25 +1356,11 @@ _GLOBAL(setup_perfmon_ivor)
1302_GLOBAL(setup_doorbell_ivors) 1356_GLOBAL(setup_doorbell_ivors)
1303 SET_IVOR(36, 0x280) /* Processor Doorbell */ 1357 SET_IVOR(36, 0x280) /* Processor Doorbell */
1304 SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */ 1358 SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
1305
1306 /* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */
1307 mfspr r10,SPRN_MMUCFG
1308 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
1309 beqlr
1310
1311 SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
1312 SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
1313 blr 1359 blr
1314 1360
1315_GLOBAL(setup_ehv_ivors) 1361_GLOBAL(setup_ehv_ivors)
1316 /*
1317 * We may be running as a guest and lack E.HV even on a chip
1318 * that normally has it.
1319 */
1320 mfspr r10,SPRN_MMUCFG
1321 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
1322 beqlr
1323
1324 SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */ 1362 SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
1325 SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ 1363 SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
1364 SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
1365 SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
1326 blr 1366 blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 39aa97d3ff88..10b658ad65e1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -275,6 +275,31 @@ vsx_unavailable_pSeries_1:
275 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 275 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
276 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 276 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
277 277
278 . = 0x1500
279 .global denorm_Hypervisor
280denorm_exception_hv:
281 HMT_MEDIUM
282 mtspr SPRN_SPRG_HSCRATCH0,r13
283 mfspr r13,SPRN_SPRG_HPACA
284 std r9,PACA_EXGEN+EX_R9(r13)
285 std r10,PACA_EXGEN+EX_R10(r13)
286 std r11,PACA_EXGEN+EX_R11(r13)
287 std r12,PACA_EXGEN+EX_R12(r13)
288 mfspr r9,SPRN_SPRG_HSCRATCH0
289 std r9,PACA_EXGEN+EX_R13(r13)
290 mfcr r9
291
292#ifdef CONFIG_PPC_DENORMALISATION
293 mfspr r10,SPRN_HSRR1
294 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
295 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
296 addi r11,r11,-4 /* HSRR0 is next instruction */
297 bne+ denorm_assist
298#endif
299
300 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
301 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
302
278#ifdef CONFIG_CBE_RAS 303#ifdef CONFIG_CBE_RAS
279 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 304 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
280 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 305 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
@@ -336,6 +361,103 @@ do_stab_bolted_pSeries:
336 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 361 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
337 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 362 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
338 363
364#ifdef CONFIG_PPC_DENORMALISATION
365denorm_assist:
366BEGIN_FTR_SECTION
367/*
368 * To denormalise we need to move a copy of the register to itself.
369 * For POWER6 do that here for all FP regs.
370 */
371 mfmsr r10
372 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
373 xori r10,r10,(MSR_FE0|MSR_FE1)
374 mtmsrd r10
375 sync
376 fmr 0,0
377 fmr 1,1
378 fmr 2,2
379 fmr 3,3
380 fmr 4,4
381 fmr 5,5
382 fmr 6,6
383 fmr 7,7
384 fmr 8,8
385 fmr 9,9
386 fmr 10,10
387 fmr 11,11
388 fmr 12,12
389 fmr 13,13
390 fmr 14,14
391 fmr 15,15
392 fmr 16,16
393 fmr 17,17
394 fmr 18,18
395 fmr 19,19
396 fmr 20,20
397 fmr 21,21
398 fmr 22,22
399 fmr 23,23
400 fmr 24,24
401 fmr 25,25
402 fmr 26,26
403 fmr 27,27
404 fmr 28,28
405 fmr 29,29
406 fmr 30,30
407 fmr 31,31
408FTR_SECTION_ELSE
409/*
410 * To denormalise we need to move a copy of the register to itself.
411 * For POWER7 do that here for the first 32 VSX registers only.
412 */
413 mfmsr r10
414 oris r10,r10,MSR_VSX@h
415 mtmsrd r10
416 sync
417 XVCPSGNDP(0,0,0)
418 XVCPSGNDP(1,1,1)
419 XVCPSGNDP(2,2,2)
420 XVCPSGNDP(3,3,3)
421 XVCPSGNDP(4,4,4)
422 XVCPSGNDP(5,5,5)
423 XVCPSGNDP(6,6,6)
424 XVCPSGNDP(7,7,7)
425 XVCPSGNDP(8,8,8)
426 XVCPSGNDP(9,9,9)
427 XVCPSGNDP(10,10,10)
428 XVCPSGNDP(11,11,11)
429 XVCPSGNDP(12,12,12)
430 XVCPSGNDP(13,13,13)
431 XVCPSGNDP(14,14,14)
432 XVCPSGNDP(15,15,15)
433 XVCPSGNDP(16,16,16)
434 XVCPSGNDP(17,17,17)
435 XVCPSGNDP(18,18,18)
436 XVCPSGNDP(19,19,19)
437 XVCPSGNDP(20,20,20)
438 XVCPSGNDP(21,21,21)
439 XVCPSGNDP(22,22,22)
440 XVCPSGNDP(23,23,23)
441 XVCPSGNDP(24,24,24)
442 XVCPSGNDP(25,25,25)
443 XVCPSGNDP(26,26,26)
444 XVCPSGNDP(27,27,27)
445 XVCPSGNDP(28,28,28)
446 XVCPSGNDP(29,29,29)
447 XVCPSGNDP(30,30,30)
448 XVCPSGNDP(31,31,31)
449ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
450 mtspr SPRN_HSRR0,r11
451 mtcrf 0x80,r9
452 ld r9,PACA_EXGEN+EX_R9(r13)
453 ld r10,PACA_EXGEN+EX_R10(r13)
454 ld r11,PACA_EXGEN+EX_R11(r13)
455 ld r12,PACA_EXGEN+EX_R12(r13)
456 ld r13,PACA_EXGEN+EX_R13(r13)
457 HRFID
458 b .
459#endif
460
339 .align 7 461 .align 7
340 /* moved from 0xe00 */ 462 /* moved from 0xe00 */
341 STD_EXCEPTION_HV(., 0xe02, h_data_storage) 463 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
@@ -495,6 +617,7 @@ machine_check_common:
495 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 617 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
496 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) 618 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
497 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 619 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
620 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
498#ifdef CONFIG_ALTIVEC 621#ifdef CONFIG_ALTIVEC
499 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 622 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
500#else 623#else
@@ -960,7 +1083,9 @@ _GLOBAL(do_stab_bolted)
960 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1083 rldimi r10,r11,7,52 /* r10 = first ste of the group */
961 1084
962 /* Calculate VSID */ 1085 /* Calculate VSID */
963 /* This is a kernel address, so protovsid = ESID */ 1086 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1087 li r9,0x1
1088 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
964 ASM_VSID_SCRAMBLE(r11, r9, 256M) 1089 ASM_VSID_SCRAMBLE(r11, r9, 256M)
965 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 1090 rldic r9,r11,12,16 /* r9 = vsid << 12 */
966 1091
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 18bdf74fa164..06c8202a69cf 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -289,8 +289,7 @@ int __init fadump_reserve_mem(void)
289 else 289 else
290 memory_limit = memblock_end_of_DRAM(); 290 memory_limit = memblock_end_of_DRAM();
291 printk(KERN_INFO "Adjusted memory_limit for firmware-assisted" 291 printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
292 " dump, now %#016llx\n", 292 " dump, now %#016llx\n", memory_limit);
293 (unsigned long long)memory_limit);
294 } 293 }
295 if (memory_limit) 294 if (memory_limit)
296 memory_boundary = memory_limit; 295 memory_boundary = memory_limit;
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 0f59863c3ade..6f62a737f607 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -895,15 +895,11 @@ _GLOBAL(__setup_e500mc_ivors)
895 mtspr SPRN_IVOR36,r3 895 mtspr SPRN_IVOR36,r3
896 li r3,CriticalDoorbell@l 896 li r3,CriticalDoorbell@l
897 mtspr SPRN_IVOR37,r3 897 mtspr SPRN_IVOR37,r3
898 sync
899 blr
898 900
899 /* 901/* setup ehv ivors for */
900 * We only want to touch IVOR38-41 if we're running on hardware 902_GLOBAL(__setup_ehv_ivors)
901 * that supports category E.HV. The architectural way to determine
902 * this is MMUCFG[LPIDSIZE].
903 */
904 mfspr r3, SPRN_MMUCFG
905 andis. r3, r3, MMUCFG_LPIDSIZE@h
906 beq no_hv
907 li r3,GuestDoorbell@l 903 li r3,GuestDoorbell@l
908 mtspr SPRN_IVOR38,r3 904 mtspr SPRN_IVOR38,r3
909 li r3,CriticalGuestDoorbell@l 905 li r3,CriticalGuestDoorbell@l
@@ -912,14 +908,8 @@ _GLOBAL(__setup_e500mc_ivors)
912 mtspr SPRN_IVOR40,r3 908 mtspr SPRN_IVOR40,r3
913 li r3,Ehvpriv@l 909 li r3,Ehvpriv@l
914 mtspr SPRN_IVOR41,r3 910 mtspr SPRN_IVOR41,r3
915skip_hv_ivors:
916 sync 911 sync
917 blr 912 blr
918no_hv:
919 lwz r3, CPU_SPEC_FEATURES(r5)
920 rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
921 stw r3, CPU_SPEC_FEATURES(r5)
922 b skip_hv_ivors
923 913
924#ifdef CONFIG_SPE 914#ifdef CONFIG_SPE
925/* 915/*
@@ -1043,6 +1033,34 @@ _GLOBAL(flush_dcache_L1)
1043 1033
1044 blr 1034 blr
1045 1035
1036/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
1037_GLOBAL(__flush_disable_L1)
1038 mflr r10
1039 bl flush_dcache_L1 /* Flush L1 d-cache */
1040 mtlr r10
1041
1042 mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */
1043 li r5, 2
1044 rlwimi r4, r5, 0, 3
1045
1046 msync
1047 isync
1048 mtspr SPRN_L1CSR0, r4
1049 isync
1050
10511: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */
1052 andi. r4, r4, 2
1053 bne 1b
1054
1055 mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */
1056 li r5, 2
1057 rlwimi r4, r5, 0, 3
1058
1059 mtspr SPRN_L1CSR1, r4
1060 isync
1061
1062 blr
1063
1046#ifdef CONFIG_SMP 1064#ifdef CONFIG_SMP
1047/* When we get here, r24 needs to hold the CPU # */ 1065/* When we get here, r24 needs to hold the CPU # */
1048 .globl __secondary_start 1066 .globl __secondary_start
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 956a4c496de9..a89cae481b04 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -73,7 +73,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
73 * If so, DABR will be populated in single_step_dabr_instruction(). 73 * If so, DABR will be populated in single_step_dabr_instruction().
74 */ 74 */
75 if (current->thread.last_hit_ubp != bp) 75 if (current->thread.last_hit_ubp != bp)
76 set_dabr(info->address | info->type | DABR_TRANSLATION); 76 set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
77 77
78 return 0; 78 return 0;
79} 79}
@@ -97,7 +97,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
97 } 97 }
98 98
99 *slot = NULL; 99 *slot = NULL;
100 set_dabr(0); 100 set_dabr(0, 0);
101} 101}
102 102
103/* 103/*
@@ -170,6 +170,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
170 170
171 info->address = bp->attr.bp_addr; 171 info->address = bp->attr.bp_addr;
172 info->len = bp->attr.bp_len; 172 info->len = bp->attr.bp_len;
173 info->dabrx = DABRX_ALL;
174 if (bp->attr.exclude_user)
175 info->dabrx &= ~DABRX_USER;
176 if (bp->attr.exclude_kernel)
177 info->dabrx &= ~DABRX_KERNEL;
178 if (bp->attr.exclude_hv)
179 info->dabrx &= ~DABRX_HYP;
173 180
174 /* 181 /*
175 * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) 182 * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
@@ -197,7 +204,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
197 204
198 info = counter_arch_bp(tsk->thread.last_hit_ubp); 205 info = counter_arch_bp(tsk->thread.last_hit_ubp);
199 regs->msr &= ~MSR_SE; 206 regs->msr &= ~MSR_SE;
200 set_dabr(info->address | info->type | DABR_TRANSLATION); 207 set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
201 tsk->thread.last_hit_ubp = NULL; 208 tsk->thread.last_hit_ubp = NULL;
202} 209}
203 210
@@ -215,7 +222,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
215 unsigned long dar = regs->dar; 222 unsigned long dar = regs->dar;
216 223
217 /* Disable breakpoints during exception handling */ 224 /* Disable breakpoints during exception handling */
218 set_dabr(0); 225 set_dabr(0, 0);
219 226
220 /* 227 /*
221 * The counter may be concurrently released but that can only 228 * The counter may be concurrently released but that can only
@@ -281,7 +288,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
281 if (!info->extraneous_interrupt) 288 if (!info->extraneous_interrupt)
282 perf_bp_event(bp, regs); 289 perf_bp_event(bp, regs);
283 290
284 set_dabr(info->address | info->type | DABR_TRANSLATION); 291 set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
285out: 292out:
286 rcu_read_unlock(); 293 rcu_read_unlock();
287 return rc; 294 return rc;
@@ -294,7 +301,7 @@ int __kprobes single_step_dabr_instruction(struct die_args *args)
294{ 301{
295 struct pt_regs *regs = args->regs; 302 struct pt_regs *regs = args->regs;
296 struct perf_event *bp = NULL; 303 struct perf_event *bp = NULL;
297 struct arch_hw_breakpoint *bp_info; 304 struct arch_hw_breakpoint *info;
298 305
299 bp = current->thread.last_hit_ubp; 306 bp = current->thread.last_hit_ubp;
300 /* 307 /*
@@ -304,16 +311,16 @@ int __kprobes single_step_dabr_instruction(struct die_args *args)
304 if (!bp) 311 if (!bp)
305 return NOTIFY_DONE; 312 return NOTIFY_DONE;
306 313
307 bp_info = counter_arch_bp(bp); 314 info = counter_arch_bp(bp);
308 315
309 /* 316 /*
310 * We shall invoke the user-defined callback function in the single 317 * We shall invoke the user-defined callback function in the single
311 * stepping handler to confirm to 'trigger-after-execute' semantics 318 * stepping handler to confirm to 'trigger-after-execute' semantics
312 */ 319 */
313 if (!bp_info->extraneous_interrupt) 320 if (!info->extraneous_interrupt)
314 perf_bp_event(bp, regs); 321 perf_bp_event(bp, regs);
315 322
316 set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION); 323 set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
317 current->thread.last_hit_ubp = NULL; 324 current->thread.last_hit_ubp = NULL;
318 325
319 /* 326 /*
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index b01d14eeca8d..8220baa46faf 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -47,7 +47,6 @@
47#include <linux/stat.h> 47#include <linux/stat.h>
48#include <linux/of_platform.h> 48#include <linux/of_platform.h>
49#include <asm/ibmebus.h> 49#include <asm/ibmebus.h>
50#include <asm/abs_addr.h>
51 50
52static struct device ibmebus_bus_device = { /* fake "parent" device */ 51static struct device ibmebus_bus_device = { /* fake "parent" device */
53 .init_name = "ibmebus", 52 .init_name = "ibmebus",
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ff5a6ce027b8..8226c6cb348a 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -215,7 +215,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
215 spin_lock_irqsave(&(pool->lock), flags); 215 spin_lock_irqsave(&(pool->lock), flags);
216 216
217again: 217again:
218 if ((pass == 0) && handle && *handle) 218 if ((pass == 0) && handle && *handle &&
219 (*handle >= pool->start) && (*handle < pool->end))
219 start = *handle; 220 start = *handle;
220 else 221 else
221 start = pool->hint; 222 start = pool->hint;
@@ -236,7 +237,9 @@ again:
236 * but on second pass, start at 0 in pool 0. 237 * but on second pass, start at 0 in pool 0.
237 */ 238 */
238 if ((start & mask) >= limit || pass > 0) { 239 if ((start & mask) >= limit || pass > 0) {
240 spin_unlock(&(pool->lock));
239 pool = &(tbl->pools[0]); 241 pool = &(tbl->pools[0]);
242 spin_lock(&(pool->lock));
240 start = pool->start; 243 start = pool->start;
241 } else { 244 } else {
242 start &= mask; 245 start &= mask;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 1f017bb7a7ce..71413f41278f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -489,10 +489,10 @@ void do_IRQ(struct pt_regs *regs)
489 struct pt_regs *old_regs = set_irq_regs(regs); 489 struct pt_regs *old_regs = set_irq_regs(regs);
490 unsigned int irq; 490 unsigned int irq;
491 491
492 trace_irq_entry(regs);
493
494 irq_enter(); 492 irq_enter();
495 493
494 trace_irq_entry(regs);
495
496 check_stack_overflow(); 496 check_stack_overflow();
497 497
498 /* 498 /*
@@ -511,10 +511,10 @@ void do_IRQ(struct pt_regs *regs)
511 else 511 else
512 __get_cpu_var(irq_stat).spurious_irqs++; 512 __get_cpu_var(irq_stat).spurious_irqs++;
513 513
514 trace_irq_exit(regs);
515
514 irq_exit(); 516 irq_exit();
515 set_irq_regs(old_regs); 517 set_irq_regs(old_regs);
516
517 trace_irq_exit(regs);
518} 518}
519 519
520void __init init_IRQ(void) 520void __init init_IRQ(void)
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 5df777794403..fa9f6c72f557 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -165,7 +165,7 @@ void __init reserve_crashkernel(void)
165 if (memory_limit && memory_limit <= crashk_res.end) { 165 if (memory_limit && memory_limit <= crashk_res.end) {
166 memory_limit = crashk_res.end + 1; 166 memory_limit = crashk_res.end + 1;
167 printk("Adjusted memory limit for crashkernel, now 0x%llx\n", 167 printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
168 (unsigned long long)memory_limit); 168 memory_limit);
169 } 169 }
170 170
171 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 171 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
@@ -204,6 +204,12 @@ static struct property crashk_size_prop = {
204 .value = &crashk_size, 204 .value = &crashk_size,
205}; 205};
206 206
207static struct property memory_limit_prop = {
208 .name = "linux,memory-limit",
209 .length = sizeof(unsigned long long),
210 .value = &memory_limit,
211};
212
207static void __init export_crashk_values(struct device_node *node) 213static void __init export_crashk_values(struct device_node *node)
208{ 214{
209 struct property *prop; 215 struct property *prop;
@@ -223,6 +229,12 @@ static void __init export_crashk_values(struct device_node *node)
223 crashk_size = resource_size(&crashk_res); 229 crashk_size = resource_size(&crashk_res);
224 prom_add_property(node, &crashk_size_prop); 230 prom_add_property(node, &crashk_size_prop);
225 } 231 }
232
233 /*
234 * memory_limit is required by the kexec-tools to limit the
235 * crash regions to the actual memory used.
236 */
237 prom_update_property(node, &memory_limit_prop);
226} 238}
227 239
228static int __init kexec_setup(void) 240static int __init kexec_setup(void)
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index fbe1a12dc7f1..cd6da855090c 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -142,6 +142,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
142 new_paca->hw_cpu_id = 0xffff; 142 new_paca->hw_cpu_id = 0xffff;
143 new_paca->kexec_state = KEXEC_STATE_NONE; 143 new_paca->kexec_state = KEXEC_STATE_NONE;
144 new_paca->__current = &init_task; 144 new_paca->__current = &init_task;
145 new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
145#ifdef CONFIG_PPC_STD_MMU_64 146#ifdef CONFIG_PPC_STD_MMU_64
146 new_paca->slb_shadow_ptr = &slb_shadow[cpu]; 147 new_paca->slb_shadow_ptr = &slb_shadow[cpu];
147#endif /* CONFIG_PPC_STD_MMU_64 */ 148#endif /* CONFIG_PPC_STD_MMU_64 */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 43fea543d686..7f94f760dd0c 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -980,13 +980,14 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
980 if (i >= 3 && bus->self->transparent) 980 if (i >= 3 && bus->self->transparent)
981 continue; 981 continue;
982 982
983 /* If we are going to re-assign everything, mark the resource 983 /* If we're going to reassign everything, we can
984 * as unset and move it down to 0 984 * shrink the P2P resource to have size as being
985 * of 0 in order to save space.
985 */ 986 */
986 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { 987 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
987 res->flags |= IORESOURCE_UNSET; 988 res->flags |= IORESOURCE_UNSET;
988 res->end -= res->start;
989 res->start = 0; 989 res->start = 0;
990 res->end = -1;
990 continue; 991 continue;
991 } 992 }
992 993
@@ -1248,7 +1249,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
1248 pr_warning("PCI: Cannot allocate resource region " 1249 pr_warning("PCI: Cannot allocate resource region "
1249 "%d of PCI bridge %d, will remap\n", i, bus->number); 1250 "%d of PCI bridge %d, will remap\n", i, bus->number);
1250 clear_resource: 1251 clear_resource:
1251 res->start = res->end = 0; 1252 /* The resource might be figured out when doing
1253 * reassignment based on the resources required
1254 * by the downstream PCI devices. Here we set
1255 * the size of the resource to be 0 in order to
1256 * save more space.
1257 */
1258 res->start = 0;
1259 res->end = -1;
1252 res->flags = 0; 1260 res->flags = 0;
1253 } 1261 }
1254 1262
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e9cb51f5f801..d5ad666efd8b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -258,6 +258,7 @@ void do_send_trap(struct pt_regs *regs, unsigned long address,
258{ 258{
259 siginfo_t info; 259 siginfo_t info;
260 260
261 current->thread.trap_nr = signal_code;
261 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 262 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
262 11, SIGSEGV) == NOTIFY_STOP) 263 11, SIGSEGV) == NOTIFY_STOP)
263 return; 264 return;
@@ -275,6 +276,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
275{ 276{
276 siginfo_t info; 277 siginfo_t info;
277 278
279 current->thread.trap_nr = TRAP_HWBKPT;
278 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 280 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
279 11, SIGSEGV) == NOTIFY_STOP) 281 11, SIGSEGV) == NOTIFY_STOP)
280 return; 282 return;
@@ -283,7 +285,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
283 return; 285 return;
284 286
285 /* Clear the DABR */ 287 /* Clear the DABR */
286 set_dabr(0); 288 set_dabr(0, 0);
287 289
288 /* Deliver the signal to userspace */ 290 /* Deliver the signal to userspace */
289 info.si_signo = SIGTRAP; 291 info.si_signo = SIGTRAP;
@@ -364,18 +366,19 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
364{ 366{
365 if (thread->dabr) { 367 if (thread->dabr) {
366 thread->dabr = 0; 368 thread->dabr = 0;
367 set_dabr(0); 369 thread->dabrx = 0;
370 set_dabr(0, 0);
368 } 371 }
369} 372}
370#endif /* !CONFIG_HAVE_HW_BREAKPOINT */ 373#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
371#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 374#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
372 375
373int set_dabr(unsigned long dabr) 376int set_dabr(unsigned long dabr, unsigned long dabrx)
374{ 377{
375 __get_cpu_var(current_dabr) = dabr; 378 __get_cpu_var(current_dabr) = dabr;
376 379
377 if (ppc_md.set_dabr) 380 if (ppc_md.set_dabr)
378 return ppc_md.set_dabr(dabr); 381 return ppc_md.set_dabr(dabr, dabrx);
379 382
380 /* XXX should we have a CPU_FTR_HAS_DABR ? */ 383 /* XXX should we have a CPU_FTR_HAS_DABR ? */
381#ifdef CONFIG_PPC_ADV_DEBUG_REGS 384#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -385,9 +388,8 @@ int set_dabr(unsigned long dabr)
385#endif 388#endif
386#elif defined(CONFIG_PPC_BOOK3S) 389#elif defined(CONFIG_PPC_BOOK3S)
387 mtspr(SPRN_DABR, dabr); 390 mtspr(SPRN_DABR, dabr);
391 mtspr(SPRN_DABRX, dabrx);
388#endif 392#endif
389
390
391 return 0; 393 return 0;
392} 394}
393 395
@@ -480,7 +482,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
480 */ 482 */
481#ifndef CONFIG_HAVE_HW_BREAKPOINT 483#ifndef CONFIG_HAVE_HW_BREAKPOINT
482 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 484 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
483 set_dabr(new->thread.dabr); 485 set_dabr(new->thread.dabr, new->thread.dabrx);
484#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 486#endif /* CONFIG_HAVE_HW_BREAKPOINT */
485#endif 487#endif
486 488
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f191bf02943a..37725e86651e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -78,7 +78,7 @@ static int __init early_parse_mem(char *p)
78 return 1; 78 return 1;
79 79
80 memory_limit = PAGE_ALIGN(memparse(p, &p)); 80 memory_limit = PAGE_ALIGN(memparse(p, &p));
81 DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit); 81 DBG("memory limit = 0x%llx\n", memory_limit);
82 82
83 return 0; 83 return 0;
84} 84}
@@ -661,7 +661,7 @@ void __init early_init_devtree(void *params)
661 661
662 /* make sure we've parsed cmdline for mem= before this */ 662 /* make sure we've parsed cmdline for mem= before this */
663 if (memory_limit) 663 if (memory_limit)
664 first_memblock_size = min(first_memblock_size, memory_limit); 664 first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
665 setup_initial_memory_limit(memstart_addr, first_memblock_size); 665 setup_initial_memory_limit(memstart_addr, first_memblock_size);
666 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ 666 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
667 memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 667 memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 47834a3f4938..cb6c123722a2 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1748,7 +1748,7 @@ static void __init prom_initialize_tce_table(void)
1748 * else will impact performance, so we always allocate 8MB. 1748 * else will impact performance, so we always allocate 8MB.
1749 * Anton 1749 * Anton
1750 */ 1750 */
1751 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p)) 1751 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1752 minsize = 8UL << 20; 1752 minsize = 8UL << 20;
1753 else 1753 else
1754 minsize = 4UL << 20; 1754 minsize = 4UL << 20;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index c10fc28b9092..79d8e56470df 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -960,6 +960,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
960 thread->ptrace_bps[0] = bp; 960 thread->ptrace_bps[0] = bp;
961 ptrace_put_breakpoints(task); 961 ptrace_put_breakpoints(task);
962 thread->dabr = data; 962 thread->dabr = data;
963 thread->dabrx = DABRX_ALL;
963 return 0; 964 return 0;
964 } 965 }
965 966
@@ -983,6 +984,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
983 984
984 /* Move contents to the DABR register */ 985 /* Move contents to the DABR register */
985 task->thread.dabr = data; 986 task->thread.dabr = data;
987 task->thread.dabrx = DABRX_ALL;
986#else /* CONFIG_PPC_ADV_DEBUG_REGS */ 988#else /* CONFIG_PPC_ADV_DEBUG_REGS */
987 /* As described above, it was assumed 3 bits were passed with the data 989 /* As described above, it was assumed 3 bits were passed with the data
988 * address, but we will assume only the mode bits will be passed 990 * address, but we will assume only the mode bits will be passed
@@ -1397,6 +1399,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
1397 dabr |= DABR_DATA_WRITE; 1399 dabr |= DABR_DATA_WRITE;
1398 1400
1399 child->thread.dabr = dabr; 1401 child->thread.dabr = dabr;
1402 child->thread.dabrx = DABRX_ALL;
1400 1403
1401 return 1; 1404 return 1;
1402#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ 1405#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 2c0ee6405633..20b0120db0c3 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -21,7 +21,6 @@
21#include <asm/delay.h> 21#include <asm/delay.h>
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include <asm/rtas.h> 23#include <asm/rtas.h>
24#include <asm/abs_addr.h>
25 24
26#define MODULE_VERS "1.0" 25#define MODULE_VERS "1.0"
27#define MODULE_NAME "rtas_flash" 26#define MODULE_NAME "rtas_flash"
@@ -582,7 +581,7 @@ static void rtas_flash_firmware(int reboot_type)
582 flist = (struct flash_block_list *)&rtas_data_buf[0]; 581 flist = (struct flash_block_list *)&rtas_data_buf[0];
583 flist->num_blocks = 0; 582 flist->num_blocks = 0;
584 flist->next = rtas_firmware_flash_list; 583 flist->next = rtas_firmware_flash_list;
585 rtas_block_list = virt_to_abs(flist); 584 rtas_block_list = __pa(flist);
586 if (rtas_block_list >= 4UL*1024*1024*1024) { 585 if (rtas_block_list >= 4UL*1024*1024*1024) {
587 printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n"); 586 printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
588 spin_unlock(&rtas_data_buf_lock); 587 spin_unlock(&rtas_data_buf_lock);
@@ -596,13 +595,13 @@ static void rtas_flash_firmware(int reboot_type)
596 for (f = flist; f; f = next) { 595 for (f = flist; f; f = next) {
597 /* Translate data addrs to absolute */ 596 /* Translate data addrs to absolute */
598 for (i = 0; i < f->num_blocks; i++) { 597 for (i = 0; i < f->num_blocks; i++) {
599 f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data); 598 f->blocks[i].data = (char *)__pa(f->blocks[i].data);
600 image_size += f->blocks[i].length; 599 image_size += f->blocks[i].length;
601 } 600 }
602 next = f->next; 601 next = f->next;
603 /* Don't translate NULL pointer for last entry */ 602 /* Don't translate NULL pointer for last entry */
604 if (f->next) 603 if (f->next)
605 f->next = (struct flash_block_list *)virt_to_abs(f->next); 604 f->next = (struct flash_block_list *)__pa(f->next);
606 else 605 else
607 f->next = NULL; 606 f->next = NULL;
608 /* make num_blocks into the version/length field */ 607 /* make num_blocks into the version/length field */
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 179af906dcda..6de63e3250bb 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -81,7 +81,7 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
81 return PCIBIOS_DEVICE_NOT_FOUND; 81 return PCIBIOS_DEVICE_NOT_FOUND;
82 82
83 if (returnval == EEH_IO_ERROR_VALUE(size) && 83 if (returnval == EEH_IO_ERROR_VALUE(size) &&
84 eeh_dn_check_failure (pdn->node, NULL)) 84 eeh_dev_check_failure(of_node_to_eeh_dev(pdn->node)))
85 return PCIBIOS_DEVICE_NOT_FOUND; 85 return PCIBIOS_DEVICE_NOT_FOUND;
86 86
87 return PCIBIOS_SUCCESSFUL; 87 return PCIBIOS_SUCCESSFUL;
@@ -275,9 +275,6 @@ void __init find_and_init_phbs(void)
275 of_node_put(root); 275 of_node_put(root);
276 pci_devs_phb_init(); 276 pci_devs_phb_init();
277 277
278 /* Create EEH devices for all PHBs */
279 eeh_dev_phb_init();
280
281 /* 278 /*
282 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties 279 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
283 * in chosen. 280 * in chosen.
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 389bd4f0cdb1..efb6a41b3131 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -208,6 +208,8 @@ void __init early_setup(unsigned long dt_ptr)
208 208
209 /* Fix up paca fields required for the boot cpu */ 209 /* Fix up paca fields required for the boot cpu */
210 get_paca()->cpu_start = 1; 210 get_paca()->cpu_start = 1;
211 /* Allow percpu accesses to "work" until we setup percpu data */
212 get_paca()->data_offset = 0;
211 213
212 /* Probe the machine type */ 214 /* Probe the machine type */
213 probe_machine(); 215 probe_machine();
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 5c023c9cf16e..a2dc75793bd5 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/tracehook.h> 12#include <linux/tracehook.h>
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <linux/uprobes.h>
14#include <linux/key.h> 15#include <linux/key.h>
15#include <asm/hw_breakpoint.h> 16#include <asm/hw_breakpoint.h>
16#include <asm/uaccess.h> 17#include <asm/uaccess.h>
@@ -130,7 +131,7 @@ static int do_signal(struct pt_regs *regs)
130 * triggered inside the kernel. 131 * triggered inside the kernel.
131 */ 132 */
132 if (current->thread.dabr) 133 if (current->thread.dabr)
133 set_dabr(current->thread.dabr); 134 set_dabr(current->thread.dabr, current->thread.dabrx);
134#endif 135#endif
135 /* Re-enable the breakpoints for the signal stack */ 136 /* Re-enable the breakpoints for the signal stack */
136 thread_change_pc(current, regs); 137 thread_change_pc(current, regs);
@@ -157,6 +158,11 @@ static int do_signal(struct pt_regs *regs)
157 158
158void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) 159void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
159{ 160{
161 if (thread_info_flags & _TIF_UPROBE) {
162 clear_thread_flag(TIF_UPROBE);
163 uprobe_notify_resume(regs);
164 }
165
160 if (thread_info_flags & _TIF_SIGPENDING) 166 if (thread_info_flags & _TIF_SIGPENDING)
161 do_signal(regs); 167 do_signal(regs);
162 168
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8d4214afc21d..2b952b5386fd 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -102,7 +102,7 @@ int __devinit smp_generic_kick_cpu(int nr)
102 * Ok it's not there, so it might be soft-unplugged, let's 102 * Ok it's not there, so it might be soft-unplugged, let's
103 * try to bring it back 103 * try to bring it back
104 */ 104 */
105 per_cpu(cpu_state, nr) = CPU_UP_PREPARE; 105 generic_set_cpu_up(nr);
106 smp_wmb(); 106 smp_wmb();
107 smp_send_reschedule(nr); 107 smp_send_reschedule(nr);
108#endif /* CONFIG_HOTPLUG_CPU */ 108#endif /* CONFIG_HOTPLUG_CPU */
@@ -171,7 +171,7 @@ int smp_request_message_ipi(int virq, int msg)
171 } 171 }
172#endif 172#endif
173 err = request_irq(virq, smp_ipi_action[msg], 173 err = request_irq(virq, smp_ipi_action[msg],
174 IRQF_PERCPU | IRQF_NO_THREAD, 174 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
175 smp_ipi_name[msg], 0); 175 smp_ipi_name[msg], 0);
176 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 176 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
177 virq, smp_ipi_name[msg], err); 177 virq, smp_ipi_name[msg], err);
@@ -413,6 +413,16 @@ void generic_set_cpu_dead(unsigned int cpu)
413 per_cpu(cpu_state, cpu) = CPU_DEAD; 413 per_cpu(cpu_state, cpu) = CPU_DEAD;
414} 414}
415 415
416/*
417 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
418 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
419 * which makes the delay in generic_cpu_die() not happen.
420 */
421void generic_set_cpu_up(unsigned int cpu)
422{
423 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
424}
425
416int generic_check_cpu_restart(unsigned int cpu) 426int generic_check_cpu_restart(unsigned int cpu)
417{ 427{
418 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; 428 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index eaa9d0e6abca..c9986fd400d8 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -508,8 +508,6 @@ void timer_interrupt(struct pt_regs * regs)
508 */ 508 */
509 may_hard_irq_enable(); 509 may_hard_irq_enable();
510 510
511 trace_timer_interrupt_entry(regs);
512
513 __get_cpu_var(irq_stat).timer_irqs++; 511 __get_cpu_var(irq_stat).timer_irqs++;
514 512
515#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 513#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
@@ -520,6 +518,8 @@ void timer_interrupt(struct pt_regs * regs)
520 old_regs = set_irq_regs(regs); 518 old_regs = set_irq_regs(regs);
521 irq_enter(); 519 irq_enter();
522 520
521 trace_timer_interrupt_entry(regs);
522
523 if (test_irq_work_pending()) { 523 if (test_irq_work_pending()) {
524 clear_irq_work_pending(); 524 clear_irq_work_pending();
525 irq_work_run(); 525 irq_work_run();
@@ -544,10 +544,10 @@ void timer_interrupt(struct pt_regs * regs)
544 } 544 }
545#endif 545#endif
546 546
547 trace_timer_interrupt_exit(regs);
548
547 irq_exit(); 549 irq_exit();
548 set_irq_regs(old_regs); 550 set_irq_regs(old_regs);
549
550 trace_timer_interrupt_exit(regs);
551} 551}
552 552
553/* 553/*
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index ae0843fa7a61..32518401af68 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -251,6 +251,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
251 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 251 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
252 local_irq_enable(); 252 local_irq_enable();
253 253
254 current->thread.trap_nr = code;
254 memset(&info, 0, sizeof(info)); 255 memset(&info, 0, sizeof(info));
255 info.si_signo = signr; 256 info.si_signo = signr;
256 info.si_code = code; 257 info.si_code = code;
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
new file mode 100644
index 000000000000..d2d46d1014f8
--- /dev/null
+++ b/arch/powerpc/kernel/uprobes.c
@@ -0,0 +1,184 @@
1/*
2 * User-space Probes (UProbes) for powerpc
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2007-2012
19 *
20 * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
21 */
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/ptrace.h>
25#include <linux/uprobes.h>
26#include <linux/uaccess.h>
27#include <linux/kdebug.h>
28
29#include <asm/sstep.h>
30
31#define UPROBE_TRAP_NR UINT_MAX
32
33/**
34 * arch_uprobe_analyze_insn
35 * @mm: the probed address space.
36 * @arch_uprobe: the probepoint information.
37 * @addr: vaddr to probe.
38 * Return 0 on success or a -ve number on error.
39 */
40int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
41 struct mm_struct *mm, unsigned long addr)
42{
43 if (addr & 0x03)
44 return -EINVAL;
45
46 /*
47 * We currently don't support a uprobe on an already
48 * existing breakpoint instruction underneath
49 */
50 if (is_trap(auprobe->ainsn))
51 return -ENOTSUPP;
52 return 0;
53}
54
55/*
56 * arch_uprobe_pre_xol - prepare to execute out of line.
57 * @auprobe: the probepoint information.
58 * @regs: reflects the saved user state of current task.
59 */
60int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
61{
62 struct arch_uprobe_task *autask = &current->utask->autask;
63
64 autask->saved_trap_nr = current->thread.trap_nr;
65 current->thread.trap_nr = UPROBE_TRAP_NR;
66 regs->nip = current->utask->xol_vaddr;
67 return 0;
68}
69
70/**
71 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
72 * @regs: Reflects the saved state of the task after it has hit a breakpoint
73 * instruction.
74 * Return the address of the breakpoint instruction.
75 */
76unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
77{
78 return instruction_pointer(regs);
79}
80
81/*
82 * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc),
83 * then detect the case where a singlestepped instruction jumps back to its
84 * own address. It is assumed that anything like do_page_fault/do_trap/etc
85 * sets thread.trap_nr != UINT_MAX.
86 *
87 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
88 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
89 * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol().
90 */
91bool arch_uprobe_xol_was_trapped(struct task_struct *t)
92{
93 if (t->thread.trap_nr != UPROBE_TRAP_NR)
94 return true;
95
96 return false;
97}
98
99/*
100 * Called after single-stepping. To avoid the SMP problems that can
101 * occur when we temporarily put back the original opcode to
102 * single-step, we single-stepped a copy of the instruction.
103 *
104 * This function prepares to resume execution after the single-step.
105 */
106int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
107{
108 struct uprobe_task *utask = current->utask;
109
110 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
111
112 current->thread.trap_nr = utask->autask.saved_trap_nr;
113
114 /*
115 * On powerpc, except for loads and stores, most instructions
116 * including ones that alter code flow (branches, calls, returns)
117 * are emulated in the kernel. We get here only if the emulation
118 * support doesn't exist and have to fix-up the next instruction
119 * to be executed.
120 */
121 regs->nip = utask->vaddr + MAX_UINSN_BYTES;
122 return 0;
123}
124
125/* callback routine for handling exceptions. */
126int arch_uprobe_exception_notify(struct notifier_block *self,
127 unsigned long val, void *data)
128{
129 struct die_args *args = data;
130 struct pt_regs *regs = args->regs;
131
132 /* regs == NULL is a kernel bug */
133 if (WARN_ON(!regs))
134 return NOTIFY_DONE;
135
136 /* We are only interested in userspace traps */
137 if (!user_mode(regs))
138 return NOTIFY_DONE;
139
140 switch (val) {
141 case DIE_BPT:
142 if (uprobe_pre_sstep_notifier(regs))
143 return NOTIFY_STOP;
144 break;
145 case DIE_SSTEP:
146 if (uprobe_post_sstep_notifier(regs))
147 return NOTIFY_STOP;
148 default:
149 break;
150 }
151 return NOTIFY_DONE;
152}
153
154/*
155 * This function gets called when XOL instruction either gets trapped or
156 * the thread has a fatal signal, so reset the instruction pointer to its
157 * probed address.
158 */
159void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
160{
161 struct uprobe_task *utask = current->utask;
162
163 current->thread.trap_nr = utask->autask.saved_trap_nr;
164 instruction_pointer_set(regs, utask->vaddr);
165}
166
167/*
168 * See if the instruction can be emulated.
169 * Returns true if instruction was emulated, false otherwise.
170 */
171bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
172{
173 int ret;
174
175 /*
176 * emulate_step() returns 1 if the insn was successfully emulated.
177 * For all other cases, we need to single-step in hardware.
178 */
179 ret = emulate_step(regs, auprobe->ainsn);
180 if (ret > 0)
181 return true;
182
183 return false;
184}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b67db22e102d..1b2076f049ce 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -723,9 +723,7 @@ int __cpuinit vdso_getcpu_init(void)
723 723
724 val = (cpu & 0xfff) | ((node & 0xffff) << 16); 724 val = (cpu & 0xfff) | ((node & 0xffff) << 16);
725 mtspr(SPRN_SPRG3, val); 725 mtspr(SPRN_SPRG3, val);
726#ifdef CONFIG_KVM_BOOK3S_HANDLER 726 get_paca()->sprg3 = val;
727 get_paca()->kvm_hstate.sprg3 = val;
728#endif
729 727
730 put_cpu(); 728 put_cpu();
731 729
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 02b32216bbc3..201ba59738be 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -33,7 +33,6 @@
33#include <asm/prom.h> 33#include <asm/prom.h>
34#include <asm/firmware.h> 34#include <asm/firmware.h>
35#include <asm/tce.h> 35#include <asm/tce.h>
36#include <asm/abs_addr.h>
37#include <asm/page.h> 36#include <asm/page.h>
38#include <asm/hvcall.h> 37#include <asm/hvcall.h>
39 38
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 837f13e7b6bf..00aa61268e0d 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -141,7 +141,7 @@ extern char etext[];
141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 141int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
142{ 142{
143 pfn_t hpaddr; 143 pfn_t hpaddr;
144 u64 va; 144 u64 vpn;
145 u64 vsid; 145 u64 vsid;
146 struct kvmppc_sid_map *map; 146 struct kvmppc_sid_map *map;
147 volatile u32 *pteg; 147 volatile u32 *pteg;
@@ -173,7 +173,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
173 BUG_ON(!map); 173 BUG_ON(!map);
174 174
175 vsid = map->host_vsid; 175 vsid = map->host_vsid;
176 va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); 176 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | ((eaddr & ~ESID_MASK) >> VPN_SHIFT)
177 177
178next_pteg: 178next_pteg:
179 if (rr == 16) { 179 if (rr == 16) {
@@ -244,11 +244,11 @@ next_pteg:
244 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", 244 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
245 orig_pte->may_write ? 'w' : '-', 245 orig_pte->may_write ? 'w' : '-',
246 orig_pte->may_execute ? 'x' : '-', 246 orig_pte->may_execute ? 'x' : '-',
247 orig_pte->eaddr, (ulong)pteg, va, 247 orig_pte->eaddr, (ulong)pteg, vpn,
248 orig_pte->vpage, hpaddr); 248 orig_pte->vpage, hpaddr);
249 249
250 pte->slot = (ulong)&pteg[rr]; 250 pte->slot = (ulong)&pteg[rr];
251 pte->host_va = va; 251 pte->host_vpn = vpn;
252 pte->pte = *orig_pte; 252 pte->pte = *orig_pte;
253 pte->pfn = hpaddr >> PAGE_SHIFT; 253 pte->pfn = hpaddr >> PAGE_SHIFT;
254 254
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0688b6b39585..4d72f9ebc554 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -33,7 +33,7 @@
33 33
34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 34void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
35{ 35{
36 ppc_md.hpte_invalidate(pte->slot, pte->host_va, 36 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
37 MMU_PAGE_4K, MMU_SEGSIZE_256M, 37 MMU_PAGE_4K, MMU_SEGSIZE_256M,
38 false); 38 false);
39} 39}
@@ -80,8 +80,9 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
80 80
81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 81int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
82{ 82{
83 unsigned long vpn;
83 pfn_t hpaddr; 84 pfn_t hpaddr;
84 ulong hash, hpteg, va; 85 ulong hash, hpteg;
85 u64 vsid; 86 u64 vsid;
86 int ret; 87 int ret;
87 int rflags = 0x192; 88 int rflags = 0x192;
@@ -117,7 +118,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
117 } 118 }
118 119
119 vsid = map->host_vsid; 120 vsid = map->host_vsid;
120 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); 121 vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
121 122
122 if (!orig_pte->may_write) 123 if (!orig_pte->may_write)
123 rflags |= HPTE_R_PP; 124 rflags |= HPTE_R_PP;
@@ -129,7 +130,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
129 else 130 else
130 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); 131 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
131 132
132 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); 133 hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
133 134
134map_again: 135map_again:
135 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 136 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@@ -141,7 +142,8 @@ map_again:
141 goto out; 142 goto out;
142 } 143 }
143 144
144 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M); 145 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
146 MMU_PAGE_4K, MMU_SEGSIZE_256M);
145 147
146 if (ret < 0) { 148 if (ret < 0) {
147 /* If we couldn't map a primary PTE, try a secondary */ 149 /* If we couldn't map a primary PTE, try a secondary */
@@ -152,7 +154,8 @@ map_again:
152 } else { 154 } else {
153 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); 155 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
154 156
155 trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte); 157 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
158 vpn, hpaddr, orig_pte);
156 159
157 /* The ppc_md code may give us a secondary entry even though we 160 /* The ppc_md code may give us a secondary entry even though we
158 asked for a primary. Fix up. */ 161 asked for a primary. Fix up. */
@@ -162,7 +165,7 @@ map_again:
162 } 165 }
163 166
164 pte->slot = hpteg + (ret & 7); 167 pte->slot = hpteg + (ret & 7);
165 pte->host_va = va; 168 pte->host_vpn = vpn;
166 pte->pte = *orig_pte; 169 pte->pte = *orig_pte;
167 pte->pfn = hpaddr >> PAGE_SHIFT; 170 pte->pfn = hpaddr >> PAGE_SHIFT;
168 171
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 44b72feaff7d..74a24bbb9637 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1065,7 +1065,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1065 mtspr SPRN_DABRX,r6 1065 mtspr SPRN_DABRX,r6
1066 1066
1067 /* Restore SPRG3 */ 1067 /* Restore SPRG3 */
1068 ld r3,HSTATE_SPRG3(r13) 1068 ld r3,PACA_SPRG3(r13)
1069 mtspr SPRN_SPRG3,r3 1069 mtspr SPRN_SPRG3,r3
1070 1070
1071 /* 1071 /*
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 877186b7b1c3..ddb6a2149d44 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -189,7 +189,7 @@ TRACE_EVENT(kvm_book3s_mmu_map,
189 TP_ARGS(pte), 189 TP_ARGS(pte),
190 190
191 TP_STRUCT__entry( 191 TP_STRUCT__entry(
192 __field( u64, host_va ) 192 __field( u64, host_vpn )
193 __field( u64, pfn ) 193 __field( u64, pfn )
194 __field( ulong, eaddr ) 194 __field( ulong, eaddr )
195 __field( u64, vpage ) 195 __field( u64, vpage )
@@ -198,7 +198,7 @@ TRACE_EVENT(kvm_book3s_mmu_map,
198 ), 198 ),
199 199
200 TP_fast_assign( 200 TP_fast_assign(
201 __entry->host_va = pte->host_va; 201 __entry->host_vpn = pte->host_vpn;
202 __entry->pfn = pte->pfn; 202 __entry->pfn = pte->pfn;
203 __entry->eaddr = pte->pte.eaddr; 203 __entry->eaddr = pte->pte.eaddr;
204 __entry->vpage = pte->pte.vpage; 204 __entry->vpage = pte->pte.vpage;
@@ -208,8 +208,8 @@ TRACE_EVENT(kvm_book3s_mmu_map,
208 (pte->pte.may_execute ? 0x1 : 0); 208 (pte->pte.may_execute ? 0x1 : 0);
209 ), 209 ),
210 210
211 TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", 211 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
212 __entry->host_va, __entry->pfn, __entry->eaddr, 212 __entry->host_vpn, __entry->pfn, __entry->eaddr,
213 __entry->vpage, __entry->raddr, __entry->flags) 213 __entry->vpage, __entry->raddr, __entry->flags)
214); 214);
215 215
@@ -218,7 +218,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
218 TP_ARGS(pte), 218 TP_ARGS(pte),
219 219
220 TP_STRUCT__entry( 220 TP_STRUCT__entry(
221 __field( u64, host_va ) 221 __field( u64, host_vpn )
222 __field( u64, pfn ) 222 __field( u64, pfn )
223 __field( ulong, eaddr ) 223 __field( ulong, eaddr )
224 __field( u64, vpage ) 224 __field( u64, vpage )
@@ -227,7 +227,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
227 ), 227 ),
228 228
229 TP_fast_assign( 229 TP_fast_assign(
230 __entry->host_va = pte->host_va; 230 __entry->host_vpn = pte->host_vpn;
231 __entry->pfn = pte->pfn; 231 __entry->pfn = pte->pfn;
232 __entry->eaddr = pte->pte.eaddr; 232 __entry->eaddr = pte->pte.eaddr;
233 __entry->vpage = pte->pte.vpage; 233 __entry->vpage = pte->pte.vpage;
@@ -238,7 +238,7 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
238 ), 238 ),
239 239
240 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", 240 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
241 __entry->host_va, __entry->pfn, __entry->eaddr, 241 __entry->host_vpn, __entry->pfn, __entry->eaddr,
242 __entry->vpage, __entry->raddr, __entry->flags) 242 __entry->vpage, __entry->raddr, __entry->flags)
243); 243);
244 244
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 7ba6c96de778..0663630baf3b 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -239,8 +239,8 @@ _GLOBAL(memcpy_power7)
239 ori r9,r9,1 /* stream=1 */ 239 ori r9,r9,1 /* stream=1 */
240 240
241 srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */ 241 srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
242 cmpldi cr1,r7,0x3FF 242 cmpldi r7,0x3FF
243 ble cr1,1f 243 ble 1f
244 li r7,0x3FF 244 li r7,0x3FF
2451: lis r0,0x0E00 /* depth=7 */ 2451: lis r0,0x0E00 /* depth=7 */
246 sldi r7,r7,7 246 sldi r7,r7,7
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 9a52349874ee..e15c521846ca 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -566,7 +566,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
566 unsigned long int ea; 566 unsigned long int ea;
567 unsigned int cr, mb, me, sh; 567 unsigned int cr, mb, me, sh;
568 int err; 568 int err;
569 unsigned long old_ra; 569 unsigned long old_ra, val3;
570 long ival; 570 long ival;
571 571
572 opcode = instr >> 26; 572 opcode = instr >> 26;
@@ -1486,11 +1486,43 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1486 goto ldst_done; 1486 goto ldst_done;
1487 1487
1488 case 36: /* stw */ 1488 case 36: /* stw */
1489 case 37: /* stwu */
1490 val = regs->gpr[rd]; 1489 val = regs->gpr[rd];
1491 err = write_mem(val, dform_ea(instr, regs), 4, regs); 1490 err = write_mem(val, dform_ea(instr, regs), 4, regs);
1492 goto ldst_done; 1491 goto ldst_done;
1493 1492
1493 case 37: /* stwu */
1494 val = regs->gpr[rd];
1495 val3 = dform_ea(instr, regs);
1496 /*
1497 * For PPC32 we always use stwu to change stack point with r1. So
1498 * this emulated store may corrupt the exception frame, now we
1499 * have to provide the exception frame trampoline, which is pushed
1500 * below the kprobed function stack. So we only update gpr[1] but
1501 * don't emulate the real store operation. We will do real store
1502 * operation safely in exception return code by checking this flag.
1503 */
1504 if ((ra == 1) && !(regs->msr & MSR_PR) \
1505 && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
1506 /*
1507 * Check if we will touch kernel sack overflow
1508 */
1509 if (val3 - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
1510 printk(KERN_CRIT "Can't kprobe this since Kernel stack overflow.\n");
1511 err = -EINVAL;
1512 break;
1513 }
1514
1515 /*
1516 * Check if we already set since that means we'll
1517 * lose the previous value.
1518 */
1519 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
1520 set_thread_flag(TIF_EMULATE_STACK_STORE);
1521 err = 0;
1522 } else
1523 err = write_mem(val, val3, 4, regs);
1524 goto ldst_done;
1525
1494 case 38: /* stb */ 1526 case 38: /* stb */
1495 case 39: /* stbu */ 1527 case 39: /* stbu */
1496 val = regs->gpr[rd]; 1528 val = regs->gpr[rd];
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index e5f028b5794e..5495ebe983a2 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -133,6 +133,7 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address)
133 up_read(&current->mm->mmap_sem); 133 up_read(&current->mm->mmap_sem);
134 134
135 if (user_mode(regs)) { 135 if (user_mode(regs)) {
136 current->thread.trap_nr = BUS_ADRERR;
136 info.si_signo = SIGBUS; 137 info.si_signo = SIGBUS;
137 info.si_errno = 0; 138 info.si_errno = 0;
138 info.si_code = BUS_ADRERR; 139 info.si_code = BUS_ADRERR;
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 602aeb06d298..56585086413a 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -63,7 +63,7 @@ _GLOBAL(__hash_page_4K)
63 /* Save non-volatile registers. 63 /* Save non-volatile registers.
64 * r31 will hold "old PTE" 64 * r31 will hold "old PTE"
65 * r30 is "new PTE" 65 * r30 is "new PTE"
66 * r29 is "va" 66 * r29 is vpn
67 * r28 is a hash value 67 * r28 is a hash value
68 * r27 is hashtab mask (maybe dynamic patched instead ?) 68 * r27 is hashtab mask (maybe dynamic patched instead ?)
69 */ 69 */
@@ -111,10 +111,10 @@ BEGIN_FTR_SECTION
111 cmpdi r9,0 /* check segment size */ 111 cmpdi r9,0 /* check segment size */
112 bne 3f 112 bne 3f
113END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 113END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
114 /* Calc va and put it in r29 */ 114 /* Calc vpn and put it in r29 */
115 rldicr r29,r5,28,63-28 115 sldi r29,r5,SID_SHIFT - VPN_SHIFT
116 rldicl r3,r3,0,36 116 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
117 or r29,r3,r29 117 or r29,r28,r29
118 118
119 /* Calculate hash value for primary slot and store it in r28 */ 119 /* Calculate hash value for primary slot and store it in r28 */
120 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 120 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -122,14 +122,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
122 xor r28,r5,r0 122 xor r28,r5,r0
123 b 4f 123 b 4f
124 124
1253: /* Calc VA and hash in r29 and r28 for 1T segment */ 1253: /* Calc vpn and put it in r29 */
126 sldi r29,r5,40 /* vsid << 40 */ 126 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
127 clrldi r3,r3,24 /* ea & 0xffffffffff */ 127 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
128 or r29,r28,r29
129
130 /*
131 * calculate hash value for primary slot and
132 * store it in r28 for 1T segment
133 */
128 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 134 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
129 clrldi r5,r5,40 /* vsid & 0xffffff */ 135 clrldi r5,r5,40 /* vsid & 0xffffff */
130 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 136 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
131 xor r28,r28,r5 137 xor r28,r28,r5
132 or r29,r3,r29 /* VA */
133 xor r28,r28,r0 /* hash */ 138 xor r28,r28,r0 /* hash */
134 139
135 /* Convert linux PTE bits into HW equivalents */ 140 /* Convert linux PTE bits into HW equivalents */
@@ -185,7 +190,7 @@ htab_insert_pte:
185 190
186 /* Call ppc_md.hpte_insert */ 191 /* Call ppc_md.hpte_insert */
187 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 192 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
188 mr r4,r29 /* Retrieve va */ 193 mr r4,r29 /* Retrieve vpn */
189 li r7,0 /* !bolted, !secondary */ 194 li r7,0 /* !bolted, !secondary */
190 li r8,MMU_PAGE_4K /* page size */ 195 li r8,MMU_PAGE_4K /* page size */
191 ld r9,STK_PARAM(R9)(r1) /* segment size */ 196 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -208,7 +213,7 @@ _GLOBAL(htab_call_hpte_insert1)
208 213
209 /* Call ppc_md.hpte_insert */ 214 /* Call ppc_md.hpte_insert */
210 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 215 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
211 mr r4,r29 /* Retrieve va */ 216 mr r4,r29 /* Retrieve vpn */
212 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 217 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
213 li r8,MMU_PAGE_4K /* page size */ 218 li r8,MMU_PAGE_4K /* page size */
214 ld r9,STK_PARAM(R9)(r1) /* segment size */ 219 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -278,7 +283,7 @@ htab_modify_pte:
278 add r3,r0,r3 /* add slot idx */ 283 add r3,r0,r3 /* add slot idx */
279 284
280 /* Call ppc_md.hpte_updatepp */ 285 /* Call ppc_md.hpte_updatepp */
281 mr r5,r29 /* va */ 286 mr r5,r29 /* vpn */
282 li r6,MMU_PAGE_4K /* page size */ 287 li r6,MMU_PAGE_4K /* page size */
283 ld r7,STK_PARAM(R9)(r1) /* segment size */ 288 ld r7,STK_PARAM(R9)(r1) /* segment size */
284 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 289 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -339,7 +344,7 @@ _GLOBAL(__hash_page_4K)
339 /* Save non-volatile registers. 344 /* Save non-volatile registers.
340 * r31 will hold "old PTE" 345 * r31 will hold "old PTE"
341 * r30 is "new PTE" 346 * r30 is "new PTE"
342 * r29 is "va" 347 * r29 is vpn
343 * r28 is a hash value 348 * r28 is a hash value
344 * r27 is hashtab mask (maybe dynamic patched instead ?) 349 * r27 is hashtab mask (maybe dynamic patched instead ?)
345 * r26 is the hidx mask 350 * r26 is the hidx mask
@@ -394,10 +399,14 @@ BEGIN_FTR_SECTION
394 cmpdi r9,0 /* check segment size */ 399 cmpdi r9,0 /* check segment size */
395 bne 3f 400 bne 3f
396END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 401END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
397 /* Calc va and put it in r29 */ 402 /* Calc vpn and put it in r29 */
398 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ 403 sldi r29,r5,SID_SHIFT - VPN_SHIFT
399 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ 404 /*
400 or r29,r3,r29 /* r29 = va */ 405 * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff
406 * srdi r28,r3,VPN_SHIFT
407 */
408 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
409 or r29,r28,r29
401 410
402 /* Calculate hash value for primary slot and store it in r28 */ 411 /* Calculate hash value for primary slot and store it in r28 */
403 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 412 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -405,14 +414,23 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
405 xor r28,r5,r0 414 xor r28,r5,r0
406 b 4f 415 b 4f
407 416
4083: /* Calc VA and hash in r29 and r28 for 1T segment */ 4173: /* Calc vpn and put it in r29 */
409 sldi r29,r5,40 /* vsid << 40 */ 418 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
410 clrldi r3,r3,24 /* ea & 0xffffffffff */ 419 /*
420 * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff
421 * srdi r28,r3,VPN_SHIFT
422 */
423 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
424 or r29,r28,r29
425
426 /*
427 * Calculate hash value for primary slot and
428 * store it in r28 for 1T segment
429 */
411 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 430 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
412 clrldi r5,r5,40 /* vsid & 0xffffff */ 431 clrldi r5,r5,40 /* vsid & 0xffffff */
413 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 432 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
414 xor r28,r28,r5 433 xor r28,r28,r5
415 or r29,r3,r29 /* VA */
416 xor r28,r28,r0 /* hash */ 434 xor r28,r28,r0 /* hash */
417 435
418 /* Convert linux PTE bits into HW equivalents */ 436 /* Convert linux PTE bits into HW equivalents */
@@ -488,7 +506,7 @@ htab_special_pfn:
488 506
489 /* Call ppc_md.hpte_insert */ 507 /* Call ppc_md.hpte_insert */
490 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 508 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
491 mr r4,r29 /* Retrieve va */ 509 mr r4,r29 /* Retrieve vpn */
492 li r7,0 /* !bolted, !secondary */ 510 li r7,0 /* !bolted, !secondary */
493 li r8,MMU_PAGE_4K /* page size */ 511 li r8,MMU_PAGE_4K /* page size */
494 ld r9,STK_PARAM(R9)(r1) /* segment size */ 512 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -515,7 +533,7 @@ _GLOBAL(htab_call_hpte_insert1)
515 533
516 /* Call ppc_md.hpte_insert */ 534 /* Call ppc_md.hpte_insert */
517 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 535 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
518 mr r4,r29 /* Retrieve va */ 536 mr r4,r29 /* Retrieve vpn */
519 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 537 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
520 li r8,MMU_PAGE_4K /* page size */ 538 li r8,MMU_PAGE_4K /* page size */
521 ld r9,STK_PARAM(R9)(r1) /* segment size */ 539 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -547,7 +565,7 @@ _GLOBAL(htab_call_hpte_remove)
547 * useless now that the segment has been switched to 4k pages. 565 * useless now that the segment has been switched to 4k pages.
548 */ 566 */
549htab_inval_old_hpte: 567htab_inval_old_hpte:
550 mr r3,r29 /* virtual addr */ 568 mr r3,r29 /* vpn */
551 mr r4,r31 /* PTE.pte */ 569 mr r4,r31 /* PTE.pte */
552 li r5,0 /* PTE.hidx */ 570 li r5,0 /* PTE.hidx */
553 li r6,MMU_PAGE_64K /* psize */ 571 li r6,MMU_PAGE_64K /* psize */
@@ -620,7 +638,7 @@ htab_modify_pte:
620 add r3,r0,r3 /* add slot idx */ 638 add r3,r0,r3 /* add slot idx */
621 639
622 /* Call ppc_md.hpte_updatepp */ 640 /* Call ppc_md.hpte_updatepp */
623 mr r5,r29 /* va */ 641 mr r5,r29 /* vpn */
624 li r6,MMU_PAGE_4K /* page size */ 642 li r6,MMU_PAGE_4K /* page size */
625 ld r7,STK_PARAM(R9)(r1) /* segment size */ 643 ld r7,STK_PARAM(R9)(r1) /* segment size */
626 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 644 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -676,7 +694,7 @@ _GLOBAL(__hash_page_64K)
676 /* Save non-volatile registers. 694 /* Save non-volatile registers.
677 * r31 will hold "old PTE" 695 * r31 will hold "old PTE"
678 * r30 is "new PTE" 696 * r30 is "new PTE"
679 * r29 is "va" 697 * r29 is vpn
680 * r28 is a hash value 698 * r28 is a hash value
681 * r27 is hashtab mask (maybe dynamic patched instead ?) 699 * r27 is hashtab mask (maybe dynamic patched instead ?)
682 */ 700 */
@@ -729,10 +747,10 @@ BEGIN_FTR_SECTION
729 cmpdi r9,0 /* check segment size */ 747 cmpdi r9,0 /* check segment size */
730 bne 3f 748 bne 3f
731END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 749END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
732 /* Calc va and put it in r29 */ 750 /* Calc vpn and put it in r29 */
733 rldicr r29,r5,28,63-28 751 sldi r29,r5,SID_SHIFT - VPN_SHIFT
734 rldicl r3,r3,0,36 752 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
735 or r29,r3,r29 753 or r29,r28,r29
736 754
737 /* Calculate hash value for primary slot and store it in r28 */ 755 /* Calculate hash value for primary slot and store it in r28 */
738 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 756 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -740,14 +758,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
740 xor r28,r5,r0 758 xor r28,r5,r0
741 b 4f 759 b 4f
742 760
7433: /* Calc VA and hash in r29 and r28 for 1T segment */ 7613: /* Calc vpn and put it in r29 */
744 sldi r29,r5,40 /* vsid << 40 */ 762 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
745 clrldi r3,r3,24 /* ea & 0xffffffffff */ 763 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
764 or r29,r28,r29
765
766 /*
767 * calculate hash value for primary slot and
768 * store it in r28 for 1T segment
769 */
746 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 770 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
747 clrldi r5,r5,40 /* vsid & 0xffffff */ 771 clrldi r5,r5,40 /* vsid & 0xffffff */
748 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ 772 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
749 xor r28,r28,r5 773 xor r28,r28,r5
750 or r29,r3,r29 /* VA */
751 xor r28,r28,r0 /* hash */ 774 xor r28,r28,r0 /* hash */
752 775
753 /* Convert linux PTE bits into HW equivalents */ 776 /* Convert linux PTE bits into HW equivalents */
@@ -806,7 +829,7 @@ ht64_insert_pte:
806 829
807 /* Call ppc_md.hpte_insert */ 830 /* Call ppc_md.hpte_insert */
808 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 831 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
809 mr r4,r29 /* Retrieve va */ 832 mr r4,r29 /* Retrieve vpn */
810 li r7,0 /* !bolted, !secondary */ 833 li r7,0 /* !bolted, !secondary */
811 li r8,MMU_PAGE_64K 834 li r8,MMU_PAGE_64K
812 ld r9,STK_PARAM(R9)(r1) /* segment size */ 835 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -829,7 +852,7 @@ _GLOBAL(ht64_call_hpte_insert1)
829 852
830 /* Call ppc_md.hpte_insert */ 853 /* Call ppc_md.hpte_insert */
831 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 854 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
832 mr r4,r29 /* Retrieve va */ 855 mr r4,r29 /* Retrieve vpn */
833 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 856 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
834 li r8,MMU_PAGE_64K 857 li r8,MMU_PAGE_64K
835 ld r9,STK_PARAM(R9)(r1) /* segment size */ 858 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -899,7 +922,7 @@ ht64_modify_pte:
899 add r3,r0,r3 /* add slot idx */ 922 add r3,r0,r3 /* add slot idx */
900 923
901 /* Call ppc_md.hpte_updatepp */ 924 /* Call ppc_md.hpte_updatepp */
902 mr r5,r29 /* va */ 925 mr r5,r29 /* vpn */
903 li r6,MMU_PAGE_64K 926 li r6,MMU_PAGE_64K
904 ld r7,STK_PARAM(R9)(r1) /* segment size */ 927 ld r7,STK_PARAM(R9)(r1) /* segment size */
905 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 928 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 90039bc64119..ffc1e00f7a22 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -14,10 +14,10 @@
14 14
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/of.h>
17#include <linux/threads.h> 18#include <linux/threads.h>
18#include <linux/smp.h> 19#include <linux/smp.h>
19 20
20#include <asm/abs_addr.h>
21#include <asm/machdep.h> 21#include <asm/machdep.h>
22#include <asm/mmu.h> 22#include <asm/mmu.h>
23#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
@@ -39,22 +39,35 @@
39 39
40DEFINE_RAW_SPINLOCK(native_tlbie_lock); 40DEFINE_RAW_SPINLOCK(native_tlbie_lock);
41 41
42static inline void __tlbie(unsigned long va, int psize, int ssize) 42static inline void __tlbie(unsigned long vpn, int psize, int ssize)
43{ 43{
44 unsigned long va;
44 unsigned int penc; 45 unsigned int penc;
45 46
46 /* clear top 16 bits, non SLS segment */ 47 /*
48 * We need 14 to 65 bits of va for a tlibe of 4K page
49 * With vpn we ignore the lower VPN_SHIFT bits already.
50 * And top two bits are already ignored because we can
51 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
52 * of 12.
53 */
54 va = vpn << VPN_SHIFT;
55 /*
56 * clear top 16 bits of 64bit va, non SLS segment
57 * Older versions of the architecture (2.02 and earler) require the
58 * masking of the top 16 bits.
59 */
47 va &= ~(0xffffULL << 48); 60 va &= ~(0xffffULL << 48);
48 61
49 switch (psize) { 62 switch (psize) {
50 case MMU_PAGE_4K: 63 case MMU_PAGE_4K:
51 va &= ~0xffful;
52 va |= ssize << 8; 64 va |= ssize << 8;
53 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 65 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
54 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 66 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
55 : "memory"); 67 : "memory");
56 break; 68 break;
57 default: 69 default:
70 /* We need 14 to 14 + i bits of va */
58 penc = mmu_psize_defs[psize].penc; 71 penc = mmu_psize_defs[psize].penc;
59 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 72 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
60 va |= penc << 12; 73 va |= penc << 12;
@@ -67,21 +80,28 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
67 } 80 }
68} 81}
69 82
70static inline void __tlbiel(unsigned long va, int psize, int ssize) 83static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
71{ 84{
85 unsigned long va;
72 unsigned int penc; 86 unsigned int penc;
73 87
74 /* clear top 16 bits, non SLS segment */ 88 /* VPN_SHIFT can be atmost 12 */
89 va = vpn << VPN_SHIFT;
90 /*
91 * clear top 16 bits of 64 bit va, non SLS segment
92 * Older versions of the architecture (2.02 and earler) require the
93 * masking of the top 16 bits.
94 */
75 va &= ~(0xffffULL << 48); 95 va &= ~(0xffffULL << 48);
76 96
77 switch (psize) { 97 switch (psize) {
78 case MMU_PAGE_4K: 98 case MMU_PAGE_4K:
79 va &= ~0xffful;
80 va |= ssize << 8; 99 va |= ssize << 8;
81 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 100 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
82 : : "r"(va) : "memory"); 101 : : "r"(va) : "memory");
83 break; 102 break;
84 default: 103 default:
104 /* We need 14 to 14 + i bits of va */
85 penc = mmu_psize_defs[psize].penc; 105 penc = mmu_psize_defs[psize].penc;
86 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 106 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
87 va |= penc << 12; 107 va |= penc << 12;
@@ -94,7 +114,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
94 114
95} 115}
96 116
97static inline void tlbie(unsigned long va, int psize, int ssize, int local) 117static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
98{ 118{
99 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); 119 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
100 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 120 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -105,10 +125,10 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
105 raw_spin_lock(&native_tlbie_lock); 125 raw_spin_lock(&native_tlbie_lock);
106 asm volatile("ptesync": : :"memory"); 126 asm volatile("ptesync": : :"memory");
107 if (use_local) { 127 if (use_local) {
108 __tlbiel(va, psize, ssize); 128 __tlbiel(vpn, psize, ssize);
109 asm volatile("ptesync": : :"memory"); 129 asm volatile("ptesync": : :"memory");
110 } else { 130 } else {
111 __tlbie(va, psize, ssize); 131 __tlbie(vpn, psize, ssize);
112 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 132 asm volatile("eieio; tlbsync; ptesync": : :"memory");
113 } 133 }
114 if (lock_tlbie && !use_local) 134 if (lock_tlbie && !use_local)
@@ -134,7 +154,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
134 clear_bit_unlock(HPTE_LOCK_BIT, word); 154 clear_bit_unlock(HPTE_LOCK_BIT, word);
135} 155}
136 156
137static long native_hpte_insert(unsigned long hpte_group, unsigned long va, 157static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
138 unsigned long pa, unsigned long rflags, 158 unsigned long pa, unsigned long rflags,
139 unsigned long vflags, int psize, int ssize) 159 unsigned long vflags, int psize, int ssize)
140{ 160{
@@ -143,9 +163,9 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
143 int i; 163 int i;
144 164
145 if (!(vflags & HPTE_V_BOLTED)) { 165 if (!(vflags & HPTE_V_BOLTED)) {
146 DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx," 166 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
147 " rflags=%lx, vflags=%lx, psize=%d)\n", 167 " rflags=%lx, vflags=%lx, psize=%d)\n",
148 hpte_group, va, pa, rflags, vflags, psize); 168 hpte_group, vpn, pa, rflags, vflags, psize);
149 } 169 }
150 170
151 for (i = 0; i < HPTES_PER_GROUP; i++) { 171 for (i = 0; i < HPTES_PER_GROUP; i++) {
@@ -163,7 +183,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
163 if (i == HPTES_PER_GROUP) 183 if (i == HPTES_PER_GROUP)
164 return -1; 184 return -1;
165 185
166 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 186 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
167 hpte_r = hpte_encode_r(pa, psize) | rflags; 187 hpte_r = hpte_encode_r(pa, psize) | rflags;
168 188
169 if (!(vflags & HPTE_V_BOLTED)) { 189 if (!(vflags & HPTE_V_BOLTED)) {
@@ -225,17 +245,17 @@ static long native_hpte_remove(unsigned long hpte_group)
225} 245}
226 246
227static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 247static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
228 unsigned long va, int psize, int ssize, 248 unsigned long vpn, int psize, int ssize,
229 int local) 249 int local)
230{ 250{
231 struct hash_pte *hptep = htab_address + slot; 251 struct hash_pte *hptep = htab_address + slot;
232 unsigned long hpte_v, want_v; 252 unsigned long hpte_v, want_v;
233 int ret = 0; 253 int ret = 0;
234 254
235 want_v = hpte_encode_v(va, psize, ssize); 255 want_v = hpte_encode_v(vpn, psize, ssize);
236 256
237 DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", 257 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
238 va, want_v & HPTE_V_AVPN, slot, newpp); 258 vpn, want_v & HPTE_V_AVPN, slot, newpp);
239 259
240 native_lock_hpte(hptep); 260 native_lock_hpte(hptep);
241 261
@@ -254,12 +274,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
254 native_unlock_hpte(hptep); 274 native_unlock_hpte(hptep);
255 275
256 /* Ensure it is out of the tlb too. */ 276 /* Ensure it is out of the tlb too. */
257 tlbie(va, psize, ssize, local); 277 tlbie(vpn, psize, ssize, local);
258 278
259 return ret; 279 return ret;
260} 280}
261 281
262static long native_hpte_find(unsigned long va, int psize, int ssize) 282static long native_hpte_find(unsigned long vpn, int psize, int ssize)
263{ 283{
264 struct hash_pte *hptep; 284 struct hash_pte *hptep;
265 unsigned long hash; 285 unsigned long hash;
@@ -267,8 +287,8 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
267 long slot; 287 long slot;
268 unsigned long want_v, hpte_v; 288 unsigned long want_v, hpte_v;
269 289
270 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); 290 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
271 want_v = hpte_encode_v(va, psize, ssize); 291 want_v = hpte_encode_v(vpn, psize, ssize);
272 292
273 /* Bolted mappings are only ever in the primary group */ 293 /* Bolted mappings are only ever in the primary group */
274 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 294 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -295,14 +315,15 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
295static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 315static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
296 int psize, int ssize) 316 int psize, int ssize)
297{ 317{
298 unsigned long vsid, va; 318 unsigned long vpn;
319 unsigned long vsid;
299 long slot; 320 long slot;
300 struct hash_pte *hptep; 321 struct hash_pte *hptep;
301 322
302 vsid = get_kernel_vsid(ea, ssize); 323 vsid = get_kernel_vsid(ea, ssize);
303 va = hpt_va(ea, vsid, ssize); 324 vpn = hpt_vpn(ea, vsid, ssize);
304 325
305 slot = native_hpte_find(va, psize, ssize); 326 slot = native_hpte_find(vpn, psize, ssize);
306 if (slot == -1) 327 if (slot == -1)
307 panic("could not find page to bolt\n"); 328 panic("could not find page to bolt\n");
308 hptep = htab_address + slot; 329 hptep = htab_address + slot;
@@ -312,10 +333,10 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
312 (newpp & (HPTE_R_PP | HPTE_R_N)); 333 (newpp & (HPTE_R_PP | HPTE_R_N));
313 334
314 /* Ensure it is out of the tlb too. */ 335 /* Ensure it is out of the tlb too. */
315 tlbie(va, psize, ssize, 0); 336 tlbie(vpn, psize, ssize, 0);
316} 337}
317 338
318static void native_hpte_invalidate(unsigned long slot, unsigned long va, 339static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
319 int psize, int ssize, int local) 340 int psize, int ssize, int local)
320{ 341{
321 struct hash_pte *hptep = htab_address + slot; 342 struct hash_pte *hptep = htab_address + slot;
@@ -325,9 +346,9 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
325 346
326 local_irq_save(flags); 347 local_irq_save(flags);
327 348
328 DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); 349 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
329 350
330 want_v = hpte_encode_v(va, psize, ssize); 351 want_v = hpte_encode_v(vpn, psize, ssize);
331 native_lock_hpte(hptep); 352 native_lock_hpte(hptep);
332 hpte_v = hptep->v; 353 hpte_v = hptep->v;
333 354
@@ -339,7 +360,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
339 hptep->v = 0; 360 hptep->v = 0;
340 361
341 /* Invalidate the TLB */ 362 /* Invalidate the TLB */
342 tlbie(va, psize, ssize, local); 363 tlbie(vpn, psize, ssize, local);
343 364
344 local_irq_restore(flags); 365 local_irq_restore(flags);
345} 366}
@@ -349,11 +370,12 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
349#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 370#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
350 371
351static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 372static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
352 int *psize, int *ssize, unsigned long *va) 373 int *psize, int *ssize, unsigned long *vpn)
353{ 374{
375 unsigned long avpn, pteg, vpi;
354 unsigned long hpte_r = hpte->r; 376 unsigned long hpte_r = hpte->r;
355 unsigned long hpte_v = hpte->v; 377 unsigned long hpte_v = hpte->v;
356 unsigned long avpn; 378 unsigned long vsid, seg_off;
357 int i, size, shift, penc; 379 int i, size, shift, penc;
358 380
359 if (!(hpte_v & HPTE_V_LARGE)) 381 if (!(hpte_v & HPTE_V_LARGE))
@@ -380,32 +402,38 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
380 } 402 }
381 403
382 /* This works for all page sizes, and for 256M and 1T segments */ 404 /* This works for all page sizes, and for 256M and 1T segments */
405 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
383 shift = mmu_psize_defs[size].shift; 406 shift = mmu_psize_defs[size].shift;
384 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
385
386 if (shift < 23) {
387 unsigned long vpi, vsid, pteg;
388 407
389 pteg = slot / HPTES_PER_GROUP; 408 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
390 if (hpte_v & HPTE_V_SECONDARY) 409 pteg = slot / HPTES_PER_GROUP;
391 pteg = ~pteg; 410 if (hpte_v & HPTE_V_SECONDARY)
392 switch (hpte_v >> HPTE_V_SSIZE_SHIFT) { 411 pteg = ~pteg;
393 case MMU_SEGSIZE_256M: 412
394 vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask; 413 switch (*ssize) {
395 break; 414 case MMU_SEGSIZE_256M:
396 case MMU_SEGSIZE_1T: 415 /* We only have 28 - 23 bits of seg_off in avpn */
397 vsid = avpn >> 40; 416 seg_off = (avpn & 0x1f) << 23;
417 vsid = avpn >> 5;
418 /* We can find more bits from the pteg value */
419 if (shift < 23) {
420 vpi = (vsid ^ pteg) & htab_hash_mask;
421 seg_off |= vpi << shift;
422 }
423 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
424 case MMU_SEGSIZE_1T:
425 /* We only have 40 - 23 bits of seg_off in avpn */
426 seg_off = (avpn & 0x1ffff) << 23;
427 vsid = avpn >> 17;
428 if (shift < 23) {
398 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; 429 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
399 break; 430 seg_off |= vpi << shift;
400 default:
401 avpn = vpi = size = 0;
402 } 431 }
403 avpn |= (vpi << mmu_psize_defs[size].shift); 432 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
433 default:
434 *vpn = size = 0;
404 } 435 }
405
406 *va = avpn;
407 *psize = size; 436 *psize = size;
408 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
409} 437}
410 438
411/* 439/*
@@ -418,9 +446,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
418 */ 446 */
419static void native_hpte_clear(void) 447static void native_hpte_clear(void)
420{ 448{
449 unsigned long vpn = 0;
421 unsigned long slot, slots, flags; 450 unsigned long slot, slots, flags;
422 struct hash_pte *hptep = htab_address; 451 struct hash_pte *hptep = htab_address;
423 unsigned long hpte_v, va; 452 unsigned long hpte_v;
424 unsigned long pteg_count; 453 unsigned long pteg_count;
425 int psize, ssize; 454 int psize, ssize;
426 455
@@ -448,9 +477,9 @@ static void native_hpte_clear(void)
448 * already hold the native_tlbie_lock. 477 * already hold the native_tlbie_lock.
449 */ 478 */
450 if (hpte_v & HPTE_V_VALID) { 479 if (hpte_v & HPTE_V_VALID) {
451 hpte_decode(hptep, slot, &psize, &ssize, &va); 480 hpte_decode(hptep, slot, &psize, &ssize, &vpn);
452 hptep->v = 0; 481 hptep->v = 0;
453 __tlbie(va, psize, ssize); 482 __tlbie(vpn, psize, ssize);
454 } 483 }
455 } 484 }
456 485
@@ -465,7 +494,8 @@ static void native_hpte_clear(void)
465 */ 494 */
466static void native_flush_hash_range(unsigned long number, int local) 495static void native_flush_hash_range(unsigned long number, int local)
467{ 496{
468 unsigned long va, hash, index, hidx, shift, slot; 497 unsigned long vpn;
498 unsigned long hash, index, hidx, shift, slot;
469 struct hash_pte *hptep; 499 struct hash_pte *hptep;
470 unsigned long hpte_v; 500 unsigned long hpte_v;
471 unsigned long want_v; 501 unsigned long want_v;
@@ -479,18 +509,18 @@ static void native_flush_hash_range(unsigned long number, int local)
479 local_irq_save(flags); 509 local_irq_save(flags);
480 510
481 for (i = 0; i < number; i++) { 511 for (i = 0; i < number; i++) {
482 va = batch->vaddr[i]; 512 vpn = batch->vpn[i];
483 pte = batch->pte[i]; 513 pte = batch->pte[i];
484 514
485 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 515 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
486 hash = hpt_hash(va, shift, ssize); 516 hash = hpt_hash(vpn, shift, ssize);
487 hidx = __rpte_to_hidx(pte, index); 517 hidx = __rpte_to_hidx(pte, index);
488 if (hidx & _PTEIDX_SECONDARY) 518 if (hidx & _PTEIDX_SECONDARY)
489 hash = ~hash; 519 hash = ~hash;
490 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 520 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
491 slot += hidx & _PTEIDX_GROUP_IX; 521 slot += hidx & _PTEIDX_GROUP_IX;
492 hptep = htab_address + slot; 522 hptep = htab_address + slot;
493 want_v = hpte_encode_v(va, psize, ssize); 523 want_v = hpte_encode_v(vpn, psize, ssize);
494 native_lock_hpte(hptep); 524 native_lock_hpte(hptep);
495 hpte_v = hptep->v; 525 hpte_v = hptep->v;
496 if (!HPTE_V_COMPARE(hpte_v, want_v) || 526 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -505,12 +535,12 @@ static void native_flush_hash_range(unsigned long number, int local)
505 mmu_psize_defs[psize].tlbiel && local) { 535 mmu_psize_defs[psize].tlbiel && local) {
506 asm volatile("ptesync":::"memory"); 536 asm volatile("ptesync":::"memory");
507 for (i = 0; i < number; i++) { 537 for (i = 0; i < number; i++) {
508 va = batch->vaddr[i]; 538 vpn = batch->vpn[i];
509 pte = batch->pte[i]; 539 pte = batch->pte[i];
510 540
511 pte_iterate_hashed_subpages(pte, psize, va, index, 541 pte_iterate_hashed_subpages(pte, psize,
512 shift) { 542 vpn, index, shift) {
513 __tlbiel(va, psize, ssize); 543 __tlbiel(vpn, psize, ssize);
514 } pte_iterate_hashed_end(); 544 } pte_iterate_hashed_end();
515 } 545 }
516 asm volatile("ptesync":::"memory"); 546 asm volatile("ptesync":::"memory");
@@ -522,12 +552,12 @@ static void native_flush_hash_range(unsigned long number, int local)
522 552
523 asm volatile("ptesync":::"memory"); 553 asm volatile("ptesync":::"memory");
524 for (i = 0; i < number; i++) { 554 for (i = 0; i < number; i++) {
525 va = batch->vaddr[i]; 555 vpn = batch->vpn[i];
526 pte = batch->pte[i]; 556 pte = batch->pte[i];
527 557
528 pte_iterate_hashed_subpages(pte, psize, va, index, 558 pte_iterate_hashed_subpages(pte, psize,
529 shift) { 559 vpn, index, shift) {
530 __tlbie(va, psize, ssize); 560 __tlbie(vpn, psize, ssize);
531 } pte_iterate_hashed_end(); 561 } pte_iterate_hashed_end();
532 } 562 }
533 asm volatile("eieio; tlbsync; ptesync":::"memory"); 563 asm volatile("eieio; tlbsync; ptesync":::"memory");
@@ -539,29 +569,6 @@ static void native_flush_hash_range(unsigned long number, int local)
539 local_irq_restore(flags); 569 local_irq_restore(flags);
540} 570}
541 571
542#ifdef CONFIG_PPC_PSERIES
543/* Disable TLB batching on nighthawk */
544static inline int tlb_batching_enabled(void)
545{
546 struct device_node *root = of_find_node_by_path("/");
547 int enabled = 1;
548
549 if (root) {
550 const char *model = of_get_property(root, "model", NULL);
551 if (model && !strcmp(model, "IBM,9076-N81"))
552 enabled = 0;
553 of_node_put(root);
554 }
555
556 return enabled;
557}
558#else
559static inline int tlb_batching_enabled(void)
560{
561 return 1;
562}
563#endif
564
565void __init hpte_init_native(void) 572void __init hpte_init_native(void)
566{ 573{
567 ppc_md.hpte_invalidate = native_hpte_invalidate; 574 ppc_md.hpte_invalidate = native_hpte_invalidate;
@@ -570,6 +577,5 @@ void __init hpte_init_native(void)
570 ppc_md.hpte_insert = native_hpte_insert; 577 ppc_md.hpte_insert = native_hpte_insert;
571 ppc_md.hpte_remove = native_hpte_remove; 578 ppc_md.hpte_remove = native_hpte_remove;
572 ppc_md.hpte_clear_all = native_hpte_clear; 579 ppc_md.hpte_clear_all = native_hpte_clear;
573 if (tlb_batching_enabled()) 580 ppc_md.flush_hash_range = native_flush_hash_range;
574 ppc_md.flush_hash_range = native_flush_hash_range;
575} 581}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 377e5cbedbbb..3a292be2e079 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -43,7 +43,6 @@
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/machdep.h> 44#include <asm/machdep.h>
45#include <asm/prom.h> 45#include <asm/prom.h>
46#include <asm/abs_addr.h>
47#include <asm/tlbflush.h> 46#include <asm/tlbflush.h>
48#include <asm/io.h> 47#include <asm/io.h>
49#include <asm/eeh.h> 48#include <asm/eeh.h>
@@ -192,18 +191,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
192 vaddr += step, paddr += step) { 191 vaddr += step, paddr += step) {
193 unsigned long hash, hpteg; 192 unsigned long hash, hpteg;
194 unsigned long vsid = get_kernel_vsid(vaddr, ssize); 193 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
195 unsigned long va = hpt_va(vaddr, vsid, ssize); 194 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
196 unsigned long tprot = prot; 195 unsigned long tprot = prot;
197 196
198 /* Make kernel text executable */ 197 /* Make kernel text executable */
199 if (overlaps_kernel_text(vaddr, vaddr + step)) 198 if (overlaps_kernel_text(vaddr, vaddr + step))
200 tprot &= ~HPTE_R_N; 199 tprot &= ~HPTE_R_N;
201 200
202 hash = hpt_hash(va, shift, ssize); 201 hash = hpt_hash(vpn, shift, ssize);
203 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 202 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
204 203
205 BUG_ON(!ppc_md.hpte_insert); 204 BUG_ON(!ppc_md.hpte_insert);
206 ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot, 205 ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
207 HPTE_V_BOLTED, psize, ssize); 206 HPTE_V_BOLTED, psize, ssize);
208 207
209 if (ret < 0) 208 if (ret < 0)
@@ -651,7 +650,7 @@ static void __init htab_initialize(void)
651 DBG("Hash table allocated at %lx, size: %lx\n", table, 650 DBG("Hash table allocated at %lx, size: %lx\n", table,
652 htab_size_bytes); 651 htab_size_bytes);
653 652
654 htab_address = abs_to_virt(table); 653 htab_address = __va(table);
655 654
656 /* htab absolute addr + encoded htabsize */ 655 /* htab absolute addr + encoded htabsize */
657 _SDR1 = table + __ilog2(pteg_count) - 11; 656 _SDR1 = table + __ilog2(pteg_count) - 11;
@@ -804,16 +803,19 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
804#ifdef CONFIG_PPC_MM_SLICES 803#ifdef CONFIG_PPC_MM_SLICES
805unsigned int get_paca_psize(unsigned long addr) 804unsigned int get_paca_psize(unsigned long addr)
806{ 805{
807 unsigned long index, slices; 806 u64 lpsizes;
807 unsigned char *hpsizes;
808 unsigned long index, mask_index;
808 809
809 if (addr < SLICE_LOW_TOP) { 810 if (addr < SLICE_LOW_TOP) {
810 slices = get_paca()->context.low_slices_psize; 811 lpsizes = get_paca()->context.low_slices_psize;
811 index = GET_LOW_SLICE_INDEX(addr); 812 index = GET_LOW_SLICE_INDEX(addr);
812 } else { 813 return (lpsizes >> (index * 4)) & 0xF;
813 slices = get_paca()->context.high_slices_psize;
814 index = GET_HIGH_SLICE_INDEX(addr);
815 } 814 }
816 return (slices >> (index * 4)) & 0xF; 815 hpsizes = get_paca()->context.high_slices_psize;
816 index = GET_HIGH_SLICE_INDEX(addr);
817 mask_index = index & 0x1;
818 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
817} 819}
818 820
819#else 821#else
@@ -1153,21 +1155,21 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1153/* WARNING: This is called from hash_low_64.S, if you change this prototype, 1155/* WARNING: This is called from hash_low_64.S, if you change this prototype,
1154 * do not forget to update the assembly call site ! 1156 * do not forget to update the assembly call site !
1155 */ 1157 */
1156void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, 1158void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
1157 int local) 1159 int local)
1158{ 1160{
1159 unsigned long hash, index, shift, hidx, slot; 1161 unsigned long hash, index, shift, hidx, slot;
1160 1162
1161 DBG_LOW("flush_hash_page(va=%016lx)\n", va); 1163 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
1162 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 1164 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1163 hash = hpt_hash(va, shift, ssize); 1165 hash = hpt_hash(vpn, shift, ssize);
1164 hidx = __rpte_to_hidx(pte, index); 1166 hidx = __rpte_to_hidx(pte, index);
1165 if (hidx & _PTEIDX_SECONDARY) 1167 if (hidx & _PTEIDX_SECONDARY)
1166 hash = ~hash; 1168 hash = ~hash;
1167 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1169 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1168 slot += hidx & _PTEIDX_GROUP_IX; 1170 slot += hidx & _PTEIDX_GROUP_IX;
1169 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); 1171 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
1170 ppc_md.hpte_invalidate(slot, va, psize, ssize, local); 1172 ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
1171 } pte_iterate_hashed_end(); 1173 } pte_iterate_hashed_end();
1172} 1174}
1173 1175
@@ -1181,7 +1183,7 @@ void flush_hash_range(unsigned long number, int local)
1181 &__get_cpu_var(ppc64_tlb_batch); 1183 &__get_cpu_var(ppc64_tlb_batch);
1182 1184
1183 for (i = 0; i < number; i++) 1185 for (i = 0; i < number; i++)
1184 flush_hash_page(batch->vaddr[i], batch->pte[i], 1186 flush_hash_page(batch->vpn[i], batch->pte[i],
1185 batch->psize, batch->ssize, local); 1187 batch->psize, batch->ssize, local);
1186 } 1188 }
1187} 1189}
@@ -1208,14 +1210,14 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1208{ 1210{
1209 unsigned long hash, hpteg; 1211 unsigned long hash, hpteg;
1210 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1212 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1211 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1213 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1212 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); 1214 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
1213 int ret; 1215 int ret;
1214 1216
1215 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1217 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1216 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1218 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1217 1219
1218 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), 1220 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
1219 mode, HPTE_V_BOLTED, 1221 mode, HPTE_V_BOLTED,
1220 mmu_linear_psize, mmu_kernel_ssize); 1222 mmu_linear_psize, mmu_kernel_ssize);
1221 BUG_ON (ret < 0); 1223 BUG_ON (ret < 0);
@@ -1229,9 +1231,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1229{ 1231{
1230 unsigned long hash, hidx, slot; 1232 unsigned long hash, hidx, slot;
1231 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1233 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1232 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1234 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1233 1235
1234 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1236 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1235 spin_lock(&linear_map_hash_lock); 1237 spin_lock(&linear_map_hash_lock);
1236 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); 1238 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1237 hidx = linear_map_hash_slots[lmi] & 0x7f; 1239 hidx = linear_map_hash_slots[lmi] & 0x7f;
@@ -1241,7 +1243,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1241 hash = ~hash; 1243 hash = ~hash;
1242 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1244 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1243 slot += hidx & _PTEIDX_GROUP_IX; 1245 slot += hidx & _PTEIDX_GROUP_IX;
1244 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); 1246 ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0);
1245} 1247}
1246 1248
1247void kernel_map_pages(struct page *page, int numpages, int enable) 1249void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index cc5c273086cf..cecad348f604 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -18,14 +18,15 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
18 pte_t *ptep, unsigned long trap, int local, int ssize, 18 pte_t *ptep, unsigned long trap, int local, int ssize,
19 unsigned int shift, unsigned int mmu_psize) 19 unsigned int shift, unsigned int mmu_psize)
20{ 20{
21 unsigned long vpn;
21 unsigned long old_pte, new_pte; 22 unsigned long old_pte, new_pte;
22 unsigned long va, rflags, pa, sz; 23 unsigned long rflags, pa, sz;
23 long slot; 24 long slot;
24 25
25 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); 26 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
26 27
27 /* Search the Linux page table for a match with va */ 28 /* Search the Linux page table for a match with va */
28 va = hpt_va(ea, vsid, ssize); 29 vpn = hpt_vpn(ea, vsid, ssize);
29 30
30 /* At this point, we have a pte (old_pte) which can be used to build 31 /* At this point, we have a pte (old_pte) which can be used to build
31 * or update an HPTE. There are 2 cases: 32 * or update an HPTE. There are 2 cases:
@@ -69,19 +70,19 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
69 /* There MIGHT be an HPTE for this pte */ 70 /* There MIGHT be an HPTE for this pte */
70 unsigned long hash, slot; 71 unsigned long hash, slot;
71 72
72 hash = hpt_hash(va, shift, ssize); 73 hash = hpt_hash(vpn, shift, ssize);
73 if (old_pte & _PAGE_F_SECOND) 74 if (old_pte & _PAGE_F_SECOND)
74 hash = ~hash; 75 hash = ~hash;
75 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 76 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
76 slot += (old_pte & _PAGE_F_GIX) >> 12; 77 slot += (old_pte & _PAGE_F_GIX) >> 12;
77 78
78 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize, 79 if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
79 ssize, local) == -1) 80 ssize, local) == -1)
80 old_pte &= ~_PAGE_HPTEFLAGS; 81 old_pte &= ~_PAGE_HPTEFLAGS;
81 } 82 }
82 83
83 if (likely(!(old_pte & _PAGE_HASHPTE))) { 84 if (likely(!(old_pte & _PAGE_HASHPTE))) {
84 unsigned long hash = hpt_hash(va, shift, ssize); 85 unsigned long hash = hpt_hash(vpn, shift, ssize);
85 unsigned long hpte_group; 86 unsigned long hpte_group;
86 87
87 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; 88 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -101,14 +102,14 @@ repeat:
101 _PAGE_COHERENT | _PAGE_GUARDED)); 102 _PAGE_COHERENT | _PAGE_GUARDED));
102 103
103 /* Insert into the hash table, primary slot */ 104 /* Insert into the hash table, primary slot */
104 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, 105 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
105 mmu_psize, ssize); 106 mmu_psize, ssize);
106 107
107 /* Primary is full, try the secondary */ 108 /* Primary is full, try the secondary */
108 if (unlikely(slot == -1)) { 109 if (unlikely(slot == -1)) {
109 hpte_group = ((~hash & htab_hash_mask) * 110 hpte_group = ((~hash & htab_hash_mask) *
110 HPTES_PER_GROUP) & ~0x7UL; 111 HPTES_PER_GROUP) & ~0x7UL;
111 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 112 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
112 HPTE_V_SECONDARY, 113 HPTE_V_SECONDARY,
113 mmu_psize, ssize); 114 mmu_psize, ssize);
114 if (slot == -1) { 115 if (slot == -1) {
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 620b7acd2fdf..95a45293e5ac 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -62,7 +62,6 @@
62#include <asm/cputable.h> 62#include <asm/cputable.h>
63#include <asm/sections.h> 63#include <asm/sections.h>
64#include <asm/iommu.h> 64#include <asm/iommu.h>
65#include <asm/abs_addr.h>
66#include <asm/vdso.h> 65#include <asm/vdso.h>
67 66
68#include "mmu_decl.h" 67#include "mmu_decl.h"
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index fbdad0e3929a..0dba5066c22a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -62,7 +62,7 @@
62 62
63int init_bootmem_done; 63int init_bootmem_done;
64int mem_init_done; 64int mem_init_done;
65phys_addr_t memory_limit; 65unsigned long long memory_limit;
66 66
67#ifdef CONFIG_HIGHMEM 67#ifdef CONFIG_HIGHMEM
68pte_t *kmap_pte; 68pte_t *kmap_pte;
@@ -300,8 +300,7 @@ void __init mem_init(void)
300 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 300 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
301 301
302#ifdef CONFIG_SWIOTLB 302#ifdef CONFIG_SWIOTLB
303 if (ppc_swiotlb_enable) 303 swiotlb_init(0);
304 swiotlb_init(1);
305#endif 304#endif
306 305
307 num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; 306 num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40677aa0190e..40bc5b0ace54 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -30,11 +30,13 @@ static DEFINE_SPINLOCK(mmu_context_lock);
30static DEFINE_IDA(mmu_context_ida); 30static DEFINE_IDA(mmu_context_ida);
31 31
32/* 32/*
33 * The proto-VSID space has 2^35 - 1 segments available for user mappings. 33 * 256MB segment
34 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, 34 * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
35 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). 35 * available for user mappings. Each segment contains 2^28 bytes. Each
36 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
37 * (19 == 37 + 28 - 46).
36 */ 38 */
37#define MAX_CONTEXT ((1UL << 19) - 1) 39#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
38 40
39int __init_new_context(void) 41int __init_new_context(void)
40{ 42{
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 249a0631c4db..e212a271c7a4 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -51,13 +51,22 @@
51#include <asm/processor.h> 51#include <asm/processor.h>
52#include <asm/cputable.h> 52#include <asm/cputable.h>
53#include <asm/sections.h> 53#include <asm/sections.h>
54#include <asm/abs_addr.h>
55#include <asm/firmware.h> 54#include <asm/firmware.h>
56 55
57#include "mmu_decl.h" 56#include "mmu_decl.h"
58 57
59unsigned long ioremap_bot = IOREMAP_BASE; 58/* Some sanity checking */
59#if TASK_SIZE_USER64 > PGTABLE_RANGE
60#error TASK_SIZE_USER64 exceeds pagetable range
61#endif
62
63#ifdef CONFIG_PPC_STD_MMU_64
64#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
65#error TASK_SIZE_USER64 exceeds user VSID range
66#endif
67#endif
60 68
69unsigned long ioremap_bot = IOREMAP_BASE;
61 70
62#ifdef CONFIG_PPC_MMU_NOHASH 71#ifdef CONFIG_PPC_MMU_NOHASH
63static void *early_alloc_pgtable(unsigned long size) 72static void *early_alloc_pgtable(unsigned long size)
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index b9ee79ce2200..1a16ca227757 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -56,6 +56,12 @@ _GLOBAL(slb_allocate_realmode)
56 */ 56 */
57_GLOBAL(slb_miss_kernel_load_linear) 57_GLOBAL(slb_miss_kernel_load_linear)
58 li r11,0 58 li r11,0
59 li r9,0x1
60 /*
61 * for 1T we shift 12 bits more. slb_finish_load_1T will do
62 * the necessary adjustment
63 */
64 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
59BEGIN_FTR_SECTION 65BEGIN_FTR_SECTION
60 b slb_finish_load 66 b slb_finish_load
61END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 67END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -85,6 +91,12 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
85 _GLOBAL(slb_miss_kernel_load_io) 91 _GLOBAL(slb_miss_kernel_load_io)
86 li r11,0 92 li r11,0
876: 936:
94 li r9,0x1
95 /*
96 * for 1T we shift 12 bits more. slb_finish_load_1T will do
97 * the necessary adjustment
98 */
99 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
88BEGIN_FTR_SECTION 100BEGIN_FTR_SECTION
89 b slb_finish_load 101 b slb_finish_load
90END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -108,17 +120,31 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
108 * between 4k and 64k standard page size 120 * between 4k and 64k standard page size
109 */ 121 */
110#ifdef CONFIG_PPC_MM_SLICES 122#ifdef CONFIG_PPC_MM_SLICES
123 /* r10 have esid */
111 cmpldi r10,16 124 cmpldi r10,16
112 125 /* below SLICE_LOW_TOP */
113 /* Get the slice index * 4 in r11 and matching slice size mask in r9 */
114 ld r9,PACALOWSLICESPSIZE(r13)
115 sldi r11,r10,2
116 blt 5f 126 blt 5f
117 ld r9,PACAHIGHSLICEPSIZE(r13) 127 /*
118 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2) 128 * Handle hpsizes,
119 andi. r11,r11,0x3c 129 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
130 */
131 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
132 addi r9,r11,PACAHIGHSLICEPSIZE
133 lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
134 /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
135 rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
136 b 6f
120 137
1215: /* Extract the psize and multiply to get an array offset */ 1385:
139 /*
140 * Handle lpsizes
141 * r9 is get_paca()->context.low_slices_psize, r11 is index
142 */
143 ld r9,PACALOWSLICESPSIZE(r13)
144 mr r11,r10
1456:
146 sldi r11,r11,2 /* index * 4 */
147 /* Extract the psize and multiply to get an array offset */
122 srd r9,r9,r11 148 srd r9,r9,r11
123 andi. r9,r9,0xf 149 andi. r9,r9,0xf
124 mulli r9,r9,MMUPSIZEDEFSIZE 150 mulli r9,r9,MMUPSIZEDEFSIZE
@@ -209,7 +235,11 @@ _GLOBAL(slb_allocate_user)
209 */ 235 */
210slb_finish_load: 236slb_finish_load:
211 ASM_VSID_SCRAMBLE(r10,r9,256M) 237 ASM_VSID_SCRAMBLE(r10,r9,256M)
212 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ 238 /*
239 * bits above VSID_BITS_256M need to be ignored from r10
240 * also combine VSID and flags
241 */
242 rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
213 243
214 /* r3 = EA, r11 = VSID data */ 244 /* r3 = EA, r11 = VSID data */
215 /* 245 /*
@@ -252,10 +282,10 @@ _GLOBAL(slb_compare_rr_to_size)
252 bge 1f 282 bge 1f
253 283
254 /* still room in the slb cache */ 284 /* still room in the slb cache */
255 sldi r11,r3,1 /* r11 = offset * sizeof(u16) */ 285 sldi r11,r3,2 /* r11 = offset * sizeof(u32) */
256 rldicl r10,r10,36,28 /* get low 16 bits of the ESID */ 286 srdi r10,r10,28 /* get the 36 bits of the ESID */
257 add r11,r11,r13 /* r11 = (u16 *)paca + offset */ 287 add r11,r11,r13 /* r11 = (u32 *)paca + offset */
258 sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ 288 stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
259 addi r3,r3,1 /* offset++ */ 289 addi r3,r3,1 /* offset++ */
260 b 2f 290 b 2f
2611: /* offset >= SLB_CACHE_ENTRIES */ 2911: /* offset >= SLB_CACHE_ENTRIES */
@@ -273,7 +303,11 @@ _GLOBAL(slb_compare_rr_to_size)
273slb_finish_load_1T: 303slb_finish_load_1T:
274 srdi r10,r10,40-28 /* get 1T ESID */ 304 srdi r10,r10,40-28 /* get 1T ESID */
275 ASM_VSID_SCRAMBLE(r10,r9,1T) 305 ASM_VSID_SCRAMBLE(r10,r9,1T)
276 rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */ 306 /*
307 * bits above VSID_BITS_1T need to be ignored from r10
308 * also combine VSID and flags
309 */
310 rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
277 li r10,MMU_SEGSIZE_1T 311 li r10,MMU_SEGSIZE_1T
278 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ 312 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
279 313
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 73709f7ce92c..5829d2a950d4 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -34,6 +34,11 @@
34#include <asm/mmu.h> 34#include <asm/mmu.h>
35#include <asm/spu.h> 35#include <asm/spu.h>
36 36
37/* some sanity checks */
38#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
39#error PGTABLE_RANGE exceeds slice_mask high_slices size
40#endif
41
37static DEFINE_SPINLOCK(slice_convert_lock); 42static DEFINE_SPINLOCK(slice_convert_lock);
38 43
39 44
@@ -42,7 +47,7 @@ int _slice_debug = 1;
42 47
43static void slice_print_mask(const char *label, struct slice_mask mask) 48static void slice_print_mask(const char *label, struct slice_mask mask)
44{ 49{
45 char *p, buf[16 + 3 + 16 + 1]; 50 char *p, buf[16 + 3 + 64 + 1];
46 int i; 51 int i;
47 52
48 if (!_slice_debug) 53 if (!_slice_debug)
@@ -54,7 +59,7 @@ static void slice_print_mask(const char *label, struct slice_mask mask)
54 *(p++) = '-'; 59 *(p++) = '-';
55 *(p++) = ' '; 60 *(p++) = ' ';
56 for (i = 0; i < SLICE_NUM_HIGH; i++) 61 for (i = 0; i < SLICE_NUM_HIGH; i++)
57 *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0'; 62 *(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0';
58 *(p++) = 0; 63 *(p++) = 0;
59 64
60 printk(KERN_DEBUG "%s:%s\n", label, buf); 65 printk(KERN_DEBUG "%s:%s\n", label, buf);
@@ -84,8 +89,8 @@ static struct slice_mask slice_range_to_mask(unsigned long start,
84 } 89 }
85 90
86 if ((start + len) > SLICE_LOW_TOP) 91 if ((start + len) > SLICE_LOW_TOP)
87 ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1)) 92 ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1))
88 - (1u << GET_HIGH_SLICE_INDEX(start)); 93 - (1ul << GET_HIGH_SLICE_INDEX(start));
89 94
90 return ret; 95 return ret;
91} 96}
@@ -135,26 +140,31 @@ static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
135 140
136 for (i = 0; i < SLICE_NUM_HIGH; i++) 141 for (i = 0; i < SLICE_NUM_HIGH; i++)
137 if (!slice_high_has_vma(mm, i)) 142 if (!slice_high_has_vma(mm, i))
138 ret.high_slices |= 1u << i; 143 ret.high_slices |= 1ul << i;
139 144
140 return ret; 145 return ret;
141} 146}
142 147
143static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) 148static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
144{ 149{
150 unsigned char *hpsizes;
151 int index, mask_index;
145 struct slice_mask ret = { 0, 0 }; 152 struct slice_mask ret = { 0, 0 };
146 unsigned long i; 153 unsigned long i;
147 u64 psizes; 154 u64 lpsizes;
148 155
149 psizes = mm->context.low_slices_psize; 156 lpsizes = mm->context.low_slices_psize;
150 for (i = 0; i < SLICE_NUM_LOW; i++) 157 for (i = 0; i < SLICE_NUM_LOW; i++)
151 if (((psizes >> (i * 4)) & 0xf) == psize) 158 if (((lpsizes >> (i * 4)) & 0xf) == psize)
152 ret.low_slices |= 1u << i; 159 ret.low_slices |= 1u << i;
153 160
154 psizes = mm->context.high_slices_psize; 161 hpsizes = mm->context.high_slices_psize;
155 for (i = 0; i < SLICE_NUM_HIGH; i++) 162 for (i = 0; i < SLICE_NUM_HIGH; i++) {
156 if (((psizes >> (i * 4)) & 0xf) == psize) 163 mask_index = i & 0x1;
157 ret.high_slices |= 1u << i; 164 index = i >> 1;
165 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
166 ret.high_slices |= 1ul << i;
167 }
158 168
159 return ret; 169 return ret;
160} 170}
@@ -183,8 +193,10 @@ static void slice_flush_segments(void *parm)
183 193
184static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) 194static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
185{ 195{
196 int index, mask_index;
186 /* Write the new slice psize bits */ 197 /* Write the new slice psize bits */
187 u64 lpsizes, hpsizes; 198 unsigned char *hpsizes;
199 u64 lpsizes;
188 unsigned long i, flags; 200 unsigned long i, flags;
189 201
190 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); 202 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
@@ -201,14 +213,18 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
201 lpsizes = (lpsizes & ~(0xful << (i * 4))) | 213 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
202 (((unsigned long)psize) << (i * 4)); 214 (((unsigned long)psize) << (i * 4));
203 215
204 hpsizes = mm->context.high_slices_psize; 216 /* Assign the value back */
205 for (i = 0; i < SLICE_NUM_HIGH; i++)
206 if (mask.high_slices & (1u << i))
207 hpsizes = (hpsizes & ~(0xful << (i * 4))) |
208 (((unsigned long)psize) << (i * 4));
209
210 mm->context.low_slices_psize = lpsizes; 217 mm->context.low_slices_psize = lpsizes;
211 mm->context.high_slices_psize = hpsizes; 218
219 hpsizes = mm->context.high_slices_psize;
220 for (i = 0; i < SLICE_NUM_HIGH; i++) {
221 mask_index = i & 0x1;
222 index = i >> 1;
223 if (mask.high_slices & (1ul << i))
224 hpsizes[index] = (hpsizes[index] &
225 ~(0xf << (mask_index * 4))) |
226 (((unsigned long)psize) << (mask_index * 4));
227 }
212 228
213 slice_dbg(" lsps=%lx, hsps=%lx\n", 229 slice_dbg(" lsps=%lx, hsps=%lx\n",
214 mm->context.low_slices_psize, 230 mm->context.low_slices_psize,
@@ -587,18 +603,19 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
587 603
588unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) 604unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
589{ 605{
590 u64 psizes; 606 unsigned char *hpsizes;
591 int index; 607 int index, mask_index;
592 608
593 if (addr < SLICE_LOW_TOP) { 609 if (addr < SLICE_LOW_TOP) {
594 psizes = mm->context.low_slices_psize; 610 u64 lpsizes;
611 lpsizes = mm->context.low_slices_psize;
595 index = GET_LOW_SLICE_INDEX(addr); 612 index = GET_LOW_SLICE_INDEX(addr);
596 } else { 613 return (lpsizes >> (index * 4)) & 0xf;
597 psizes = mm->context.high_slices_psize;
598 index = GET_HIGH_SLICE_INDEX(addr);
599 } 614 }
600 615 hpsizes = mm->context.high_slices_psize;
601 return (psizes >> (index * 4)) & 0xf; 616 index = GET_HIGH_SLICE_INDEX(addr);
617 mask_index = index & 0x1;
618 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
602} 619}
603EXPORT_SYMBOL_GPL(get_slice_psize); 620EXPORT_SYMBOL_GPL(get_slice_psize);
604 621
@@ -618,7 +635,9 @@ EXPORT_SYMBOL_GPL(get_slice_psize);
618 */ 635 */
619void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) 636void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
620{ 637{
621 unsigned long flags, lpsizes, hpsizes; 638 int index, mask_index;
639 unsigned char *hpsizes;
640 unsigned long flags, lpsizes;
622 unsigned int old_psize; 641 unsigned int old_psize;
623 int i; 642 int i;
624 643
@@ -639,15 +658,21 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
639 if (((lpsizes >> (i * 4)) & 0xf) == old_psize) 658 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
640 lpsizes = (lpsizes & ~(0xful << (i * 4))) | 659 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
641 (((unsigned long)psize) << (i * 4)); 660 (((unsigned long)psize) << (i * 4));
661 /* Assign the value back */
662 mm->context.low_slices_psize = lpsizes;
642 663
643 hpsizes = mm->context.high_slices_psize; 664 hpsizes = mm->context.high_slices_psize;
644 for (i = 0; i < SLICE_NUM_HIGH; i++) 665 for (i = 0; i < SLICE_NUM_HIGH; i++) {
645 if (((hpsizes >> (i * 4)) & 0xf) == old_psize) 666 mask_index = i & 0x1;
646 hpsizes = (hpsizes & ~(0xful << (i * 4))) | 667 index = i >> 1;
647 (((unsigned long)psize) << (i * 4)); 668 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
669 hpsizes[index] = (hpsizes[index] &
670 ~(0xf << (mask_index * 4))) |
671 (((unsigned long)psize) << (mask_index * 4));
672 }
673
674
648 675
649 mm->context.low_slices_psize = lpsizes;
650 mm->context.high_slices_psize = hpsizes;
651 676
652 slice_dbg(" lsps=%lx, hsps=%lx\n", 677 slice_dbg(" lsps=%lx, hsps=%lx\n",
653 mm->context.low_slices_psize, 678 mm->context.low_slices_psize,
@@ -660,18 +685,27 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
660void slice_set_psize(struct mm_struct *mm, unsigned long address, 685void slice_set_psize(struct mm_struct *mm, unsigned long address,
661 unsigned int psize) 686 unsigned int psize)
662{ 687{
688 unsigned char *hpsizes;
663 unsigned long i, flags; 689 unsigned long i, flags;
664 u64 *p; 690 u64 *lpsizes;
665 691
666 spin_lock_irqsave(&slice_convert_lock, flags); 692 spin_lock_irqsave(&slice_convert_lock, flags);
667 if (address < SLICE_LOW_TOP) { 693 if (address < SLICE_LOW_TOP) {
668 i = GET_LOW_SLICE_INDEX(address); 694 i = GET_LOW_SLICE_INDEX(address);
669 p = &mm->context.low_slices_psize; 695 lpsizes = &mm->context.low_slices_psize;
696 *lpsizes = (*lpsizes & ~(0xful << (i * 4))) |
697 ((unsigned long) psize << (i * 4));
670 } else { 698 } else {
699 int index, mask_index;
671 i = GET_HIGH_SLICE_INDEX(address); 700 i = GET_HIGH_SLICE_INDEX(address);
672 p = &mm->context.high_slices_psize; 701 hpsizes = mm->context.high_slices_psize;
702 mask_index = i & 0x1;
703 index = i >> 1;
704 hpsizes[index] = (hpsizes[index] &
705 ~(0xf << (mask_index * 4))) |
706 (((unsigned long)psize) << (mask_index * 4));
673 } 707 }
674 *p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4)); 708
675 spin_unlock_irqrestore(&slice_convert_lock, flags); 709 spin_unlock_irqrestore(&slice_convert_lock, flags);
676 710
677#ifdef CONFIG_SPU_BASE 711#ifdef CONFIG_SPU_BASE
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 9106ebb118f5..3f8efa6f2997 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -20,7 +20,6 @@
20#include <asm/paca.h> 20#include <asm/paca.h>
21#include <asm/cputable.h> 21#include <asm/cputable.h>
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/abs_addr.h>
24 23
25struct stab_entry { 24struct stab_entry {
26 unsigned long esid_data; 25 unsigned long esid_data;
@@ -257,7 +256,7 @@ void __init stabs_alloc(void)
257 memset((void *)newstab, 0, HW_PAGE_SIZE); 256 memset((void *)newstab, 0, HW_PAGE_SIZE);
258 257
259 paca[cpu].stab_addr = newstab; 258 paca[cpu].stab_addr = newstab;
260 paca[cpu].stab_real = virt_to_abs(newstab); 259 paca[cpu].stab_real = __pa(newstab);
261 printk(KERN_INFO "Segment table for CPU %d at 0x%llx " 260 printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
262 "virtual, 0x%llx absolute\n", 261 "virtual, 0x%llx absolute\n",
263 cpu, paca[cpu].stab_addr, paca[cpu].stab_real); 262 cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index e4f8f1fc81a5..7c415ddde948 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -95,7 +95,8 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
95 struct mm_struct *mm = current->mm; 95 struct mm_struct *mm = current->mm;
96 struct subpage_prot_table *spt = &mm->context.spt; 96 struct subpage_prot_table *spt = &mm->context.spt;
97 u32 **spm, *spp; 97 u32 **spm, *spp;
98 int i, nw; 98 unsigned long i;
99 size_t nw;
99 unsigned long next, limit; 100 unsigned long next, limit;
100 101
101 down_write(&mm->mmap_sem); 102 down_write(&mm->mmap_sem);
@@ -144,7 +145,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
144 struct mm_struct *mm = current->mm; 145 struct mm_struct *mm = current->mm;
145 struct subpage_prot_table *spt = &mm->context.spt; 146 struct subpage_prot_table *spt = &mm->context.spt;
146 u32 **spm, *spp; 147 u32 **spm, *spp;
147 int i, nw; 148 unsigned long i;
149 size_t nw;
148 unsigned long next, limit; 150 unsigned long next, limit;
149 int err; 151 int err;
150 152
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 31f18207970b..ae758b3ff72c 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -42,8 +42,9 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
42void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 42void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
43 pte_t *ptep, unsigned long pte, int huge) 43 pte_t *ptep, unsigned long pte, int huge)
44{ 44{
45 unsigned long vpn;
45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); 46 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
46 unsigned long vsid, vaddr; 47 unsigned long vsid;
47 unsigned int psize; 48 unsigned int psize;
48 int ssize; 49 int ssize;
49 real_pte_t rpte; 50 real_pte_t rpte;
@@ -86,7 +87,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
87 ssize = mmu_kernel_ssize; 88 ssize = mmu_kernel_ssize;
88 } 89 }
89 vaddr = hpt_va(addr, vsid, ssize); 90 vpn = hpt_vpn(addr, vsid, ssize);
90 rpte = __real_pte(__pte(pte), ptep); 91 rpte = __real_pte(__pte(pte), ptep);
91 92
92 /* 93 /*
@@ -96,7 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
96 * and decide to use local invalidates instead... 97 * and decide to use local invalidates instead...
97 */ 98 */
98 if (!batch->active) { 99 if (!batch->active) {
99 flush_hash_page(vaddr, rpte, psize, ssize, 0); 100 flush_hash_page(vpn, rpte, psize, ssize, 0);
100 put_cpu_var(ppc64_tlb_batch); 101 put_cpu_var(ppc64_tlb_batch);
101 return; 102 return;
102 } 103 }
@@ -122,7 +123,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
122 batch->ssize = ssize; 123 batch->ssize = ssize;
123 } 124 }
124 batch->pte[i] = rpte; 125 batch->pte[i] = rpte;
125 batch->vaddr[i] = vaddr; 126 batch->vpn[i] = vpn;
126 batch->index = ++i; 127 batch->index = ++i;
127 if (i >= PPC64_TLB_BATCH_NR) 128 if (i >= PPC64_TLB_BATCH_NR)
128 __flush_tlb_pending(batch); 129 __flush_tlb_pending(batch);
@@ -146,7 +147,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
146 if (cpumask_equal(mm_cpumask(batch->mm), tmp)) 147 if (cpumask_equal(mm_cpumask(batch->mm), tmp))
147 local = 1; 148 local = 1;
148 if (i == 1) 149 if (i == 1)
149 flush_hash_page(batch->vaddr[0], batch->pte[0], 150 flush_hash_page(batch->vpn[0], batch->pte[0],
150 batch->psize, batch->ssize, local); 151 batch->psize, batch->ssize, local);
151 else 152 else
152 flush_hash_range(i, local); 153 flush_hash_range(i, local);
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index f09d48e3268d..b4113bf86353 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -20,6 +20,8 @@
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/exception-64e.h> 21#include <asm/exception-64e.h>
22#include <asm/ppc-opcode.h> 22#include <asm/ppc-opcode.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_booke_hv_asm.h>
23 25
24#ifdef CONFIG_PPC_64K_PAGES 26#ifdef CONFIG_PPC_64K_PAGES
25#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1) 27#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
@@ -37,12 +39,18 @@
37 * * 39 * *
38 **********************************************************************/ 40 **********************************************************************/
39 41
40.macro tlb_prolog_bolted addr 42.macro tlb_prolog_bolted intnum addr
41 mtspr SPRN_SPRG_TLB_SCRATCH,r13 43 mtspr SPRN_SPRG_GEN_SCRATCH,r13
42 mfspr r13,SPRN_SPRG_PACA 44 mfspr r13,SPRN_SPRG_PACA
43 std r10,PACA_EXTLB+EX_TLB_R10(r13) 45 std r10,PACA_EXTLB+EX_TLB_R10(r13)
44 mfcr r10 46 mfcr r10
45 std r11,PACA_EXTLB+EX_TLB_R11(r13) 47 std r11,PACA_EXTLB+EX_TLB_R11(r13)
48#ifdef CONFIG_KVM_BOOKE_HV
49BEGIN_FTR_SECTION
50 mfspr r11, SPRN_SRR1
51END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
52#endif
53 DO_KVM \intnum, SPRN_SRR1
46 std r16,PACA_EXTLB+EX_TLB_R16(r13) 54 std r16,PACA_EXTLB+EX_TLB_R16(r13)
47 mfspr r16,\addr /* get faulting address */ 55 mfspr r16,\addr /* get faulting address */
48 std r14,PACA_EXTLB+EX_TLB_R14(r13) 56 std r14,PACA_EXTLB+EX_TLB_R14(r13)
@@ -61,12 +69,12 @@
61 ld r15,PACA_EXTLB+EX_TLB_R15(r13) 69 ld r15,PACA_EXTLB+EX_TLB_R15(r13)
62 TLB_MISS_RESTORE_STATS_BOLTED 70 TLB_MISS_RESTORE_STATS_BOLTED
63 ld r16,PACA_EXTLB+EX_TLB_R16(r13) 71 ld r16,PACA_EXTLB+EX_TLB_R16(r13)
64 mfspr r13,SPRN_SPRG_TLB_SCRATCH 72 mfspr r13,SPRN_SPRG_GEN_SCRATCH
65.endm 73.endm
66 74
67/* Data TLB miss */ 75/* Data TLB miss */
68 START_EXCEPTION(data_tlb_miss_bolted) 76 START_EXCEPTION(data_tlb_miss_bolted)
69 tlb_prolog_bolted SPRN_DEAR 77 tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
70 78
71 /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */ 79 /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
72 80
@@ -214,7 +222,7 @@ itlb_miss_fault_bolted:
214 222
215/* Instruction TLB miss */ 223/* Instruction TLB miss */
216 START_EXCEPTION(instruction_tlb_miss_bolted) 224 START_EXCEPTION(instruction_tlb_miss_bolted)
217 tlb_prolog_bolted SPRN_SRR0 225 tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
218 226
219 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 227 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
220 srdi r15,r16,60 /* get region */ 228 srdi r15,r16,60 /* get region */
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 95ae77dec3f6..315f9495e9b2 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -21,6 +21,13 @@
21#include <asm/reg.h> 21#include <asm/reg.h>
22 22
23#define dbg(args...) 23#define dbg(args...)
24#define OPROFILE_PM_PMCSEL_MSK 0xffULL
25#define OPROFILE_PM_UNIT_SHIFT 60
26#define OPROFILE_PM_UNIT_MSK 0xfULL
27#define OPROFILE_MAX_PMC_NUM 3
28#define OPROFILE_PMSEL_FIELD_WIDTH 8
29#define OPROFILE_UNIT_FIELD_WIDTH 4
30#define MMCRA_SIAR_VALID_MASK 0x10000000ULL
24 31
25static unsigned long reset_value[OP_MAX_COUNTER]; 32static unsigned long reset_value[OP_MAX_COUNTER];
26 33
@@ -31,6 +38,61 @@ static int use_slot_nums;
31static u32 mmcr0_val; 38static u32 mmcr0_val;
32static u64 mmcr1_val; 39static u64 mmcr1_val;
33static u64 mmcra_val; 40static u64 mmcra_val;
41static u32 cntr_marked_events;
42
43static int power7_marked_instr_event(u64 mmcr1)
44{
45 u64 psel, unit;
46 int pmc, cntr_marked_events = 0;
47
48 /* Given the MMCR1 value, look at the field for each counter to
49 * determine if it is a marked event. Code based on the function
50 * power7_marked_instr_event() in file arch/powerpc/perf/power7-pmu.c.
51 */
52 for (pmc = 0; pmc < 4; pmc++) {
53 psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK
54 << (OPROFILE_MAX_PMC_NUM - pmc)
55 * OPROFILE_MAX_PMC_NUM);
56 psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
57 * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;
58 unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
59 << (OPROFILE_PM_UNIT_SHIFT
60 - (pmc * OPROFILE_PMSEL_FIELD_WIDTH )));
61 unit = unit >> (OPROFILE_PM_UNIT_SHIFT
62 - (pmc * OPROFILE_PMSEL_FIELD_WIDTH));
63
64 switch (psel >> 4) {
65 case 2:
66 cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc;
67 break;
68 case 3:
69 if (psel == 0x3c) {
70 cntr_marked_events |= (pmc == 0) << pmc;
71 break;
72 }
73
74 if (psel == 0x3e) {
75 cntr_marked_events |= (pmc != 1) << pmc;
76 break;
77 }
78
79 cntr_marked_events |= 1 << pmc;
80 break;
81 case 4:
82 case 5:
83 cntr_marked_events |= (unit == 0xd) << pmc;
84 break;
85 case 6:
86 if (psel == 0x64)
87 cntr_marked_events |= (pmc >= 2) << pmc;
88 break;
89 case 8:
90 cntr_marked_events |= (unit == 0xd) << pmc;
91 break;
92 }
93 }
94 return cntr_marked_events;
95}
34 96
35static int power4_reg_setup(struct op_counter_config *ctr, 97static int power4_reg_setup(struct op_counter_config *ctr,
36 struct op_system_config *sys, 98 struct op_system_config *sys,
@@ -47,6 +109,23 @@ static int power4_reg_setup(struct op_counter_config *ctr,
47 mmcr1_val = sys->mmcr1; 109 mmcr1_val = sys->mmcr1;
48 mmcra_val = sys->mmcra; 110 mmcra_val = sys->mmcra;
49 111
112 /* Power 7+ and newer architectures:
113 * Determine which counter events in the group (the group of events is
114 * specified by the bit settings in the MMCR1 register) are marked
115 * events for use in the interrupt handler. Do the calculation once
116 * before OProfile starts. Information is used in the interrupt
117 * handler. Starting with Power 7+ we only record the sample for
118 * marked events if the SIAR valid bit is set. For non marked events
119 * the sample is always recorded.
120 */
121 if (pvr_version_is(PVR_POWER7p))
122 cntr_marked_events = power7_marked_instr_event(mmcr1_val);
123 else
124 cntr_marked_events = 0; /* For older processors, set the bit map
125 * to zero so the sample will always be
126 * be recorded.
127 */
128
50 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) 129 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
51 reset_value[i] = 0x80000000UL - ctr[i].count; 130 reset_value[i] = 0x80000000UL - ctr[i].count;
52 131
@@ -61,10 +140,10 @@ static int power4_reg_setup(struct op_counter_config *ctr,
61 else 140 else
62 mmcr0_val |= MMCR0_PROBLEM_DISABLE; 141 mmcr0_val |= MMCR0_PROBLEM_DISABLE;
63 142
64 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) || 143 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
65 __is_processor(PV_970) || __is_processor(PV_970FX) || 144 pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
66 __is_processor(PV_970MP) || __is_processor(PV_970GX) || 145 pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX) ||
67 __is_processor(PV_POWER5) || __is_processor(PV_POWER5p)) 146 pvr_version_is(PVR_POWER5) || pvr_version_is(PVR_POWER5p))
68 use_slot_nums = 1; 147 use_slot_nums = 1;
69 148
70 return 0; 149 return 0;
@@ -84,9 +163,9 @@ extern void ppc_enable_pmcs(void);
84 */ 163 */
85static inline int mmcra_must_set_sample(void) 164static inline int mmcra_must_set_sample(void)
86{ 165{
87 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) || 166 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
88 __is_processor(PV_970) || __is_processor(PV_970FX) || 167 pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
89 __is_processor(PV_970MP) || __is_processor(PV_970GX)) 168 pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX))
90 return 1; 169 return 1;
91 170
92 return 0; 171 return 0;
@@ -276,7 +355,7 @@ static bool pmc_overflow(unsigned long val)
276 * PMCs because a user might set a period of less than 256 and we 355 * PMCs because a user might set a period of less than 256 and we
277 * don't want to mistakenly reset them. 356 * don't want to mistakenly reset them.
278 */ 357 */
279 if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) 358 if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
280 return true; 359 return true;
281 360
282 return false; 361 return false;
@@ -291,6 +370,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
291 int i; 370 int i;
292 unsigned int mmcr0; 371 unsigned int mmcr0;
293 unsigned long mmcra; 372 unsigned long mmcra;
373 bool siar_valid = false;
294 374
295 mmcra = mfspr(SPRN_MMCRA); 375 mmcra = mfspr(SPRN_MMCRA);
296 376
@@ -300,11 +380,29 @@ static void power4_handle_interrupt(struct pt_regs *regs,
300 /* set the PMM bit (see comment below) */ 380 /* set the PMM bit (see comment below) */
301 mtmsrd(mfmsr() | MSR_PMM); 381 mtmsrd(mfmsr() | MSR_PMM);
302 382
383 /* Check that the SIAR valid bit in MMCRA is set to 1. */
384 if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
385 siar_valid = true;
386
303 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { 387 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
304 val = classic_ctr_read(i); 388 val = classic_ctr_read(i);
305 if (pmc_overflow(val)) { 389 if (pmc_overflow(val)) {
306 if (oprofile_running && ctr[i].enabled) { 390 if (oprofile_running && ctr[i].enabled) {
307 oprofile_add_ext_sample(pc, regs, i, is_kernel); 391 /* Power 7+ and newer architectures:
392 * If the event is a marked event, then only
393 * save the sample if the SIAR valid bit is
394 * set. If the event is not marked, then
395 * always save the sample.
396 * Note, the Sample enable bit in the MMCRA
397 * register must be set to 1 if the group
398 * contains a marked event.
399 */
400 if ((siar_valid &&
401 (cntr_marked_events & (1 << i)))
402 || !(cntr_marked_events & (1 << i)))
403 oprofile_add_ext_sample(pc, regs, i,
404 is_kernel);
405
308 classic_ctr_write(i, reset_value[i]); 406 classic_ctr_write(i, reset_value[i]);
309 } else { 407 } else {
310 classic_ctr_write(i, 0); 408 classic_ctr_write(i, 0);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 7cd2dbd6e4c4..0db88f501f91 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -82,6 +82,11 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
82 return 0; 82 return 0;
83} 83}
84 84
85static inline int siar_valid(struct pt_regs *regs)
86{
87 return 1;
88}
89
85#endif /* CONFIG_PPC32 */ 90#endif /* CONFIG_PPC32 */
86 91
87/* 92/*
@@ -106,14 +111,20 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
106 * If we're not doing instruction sampling, give them the SDAR 111 * If we're not doing instruction sampling, give them the SDAR
107 * (sampled data address). If we are doing instruction sampling, then 112 * (sampled data address). If we are doing instruction sampling, then
108 * only give them the SDAR if it corresponds to the instruction 113 * only give them the SDAR if it corresponds to the instruction
109 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC 114 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
110 * bit in MMCRA. 115 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
111 */ 116 */
112static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) 117static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
113{ 118{
114 unsigned long mmcra = regs->dsisr; 119 unsigned long mmcra = regs->dsisr;
115 unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? 120 unsigned long sdsync;
116 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; 121
122 if (ppmu->flags & PPMU_SIAR_VALID)
123 sdsync = POWER7P_MMCRA_SDAR_VALID;
124 else if (ppmu->flags & PPMU_ALT_SIPR)
125 sdsync = POWER6_MMCRA_SDSYNC;
126 else
127 sdsync = MMCRA_SDSYNC;
117 128
118 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) 129 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
119 *addrp = mfspr(SPRN_SDAR); 130 *addrp = mfspr(SPRN_SDAR);
@@ -230,6 +241,24 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
230 return !regs->softe; 241 return !regs->softe;
231} 242}
232 243
244/*
245 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
246 * must be sampled only if the SIAR-valid bit is set.
247 *
248 * For unmarked instructions and for processors that don't have the SIAR-Valid
249 * bit, assume that SIAR is valid.
250 */
251static inline int siar_valid(struct pt_regs *regs)
252{
253 unsigned long mmcra = regs->dsisr;
254 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
255
256 if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
257 return mmcra & POWER7P_MMCRA_SIAR_VALID;
258
259 return 1;
260}
261
233#endif /* CONFIG_PPC64 */ 262#endif /* CONFIG_PPC64 */
234 263
235static void perf_event_interrupt(struct pt_regs *regs); 264static void perf_event_interrupt(struct pt_regs *regs);
@@ -1291,6 +1320,7 @@ struct pmu power_pmu = {
1291 .event_idx = power_pmu_event_idx, 1320 .event_idx = power_pmu_event_idx,
1292}; 1321};
1293 1322
1323
1294/* 1324/*
1295 * A counter has overflowed; update its count and record 1325 * A counter has overflowed; update its count and record
1296 * things if requested. Note that interrupts are hard-disabled 1326 * things if requested. Note that interrupts are hard-disabled
@@ -1324,7 +1354,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1324 left += period; 1354 left += period;
1325 if (left <= 0) 1355 if (left <= 0)
1326 left = period; 1356 left = period;
1327 record = 1; 1357 record = siar_valid(regs);
1328 event->hw.last_period = event->hw.sample_period; 1358 event->hw.last_period = event->hw.sample_period;
1329 } 1359 }
1330 if (left < 0x80000000LL) 1360 if (left < 0x80000000LL)
@@ -1374,8 +1404,10 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
1374{ 1404{
1375 unsigned long use_siar = regs->result; 1405 unsigned long use_siar = regs->result;
1376 1406
1377 if (use_siar) 1407 if (use_siar && siar_valid(regs))
1378 return mfspr(SPRN_SIAR) + perf_ip_adjust(regs); 1408 return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1409 else if (use_siar)
1410 return 0; // no valid instruction pointer
1379 else 1411 else
1380 return regs->nip; 1412 return regs->nip;
1381} 1413}
@@ -1396,7 +1428,7 @@ static bool pmc_overflow(unsigned long val)
1396 * PMCs because a user might set a period of less than 256 and we 1428 * PMCs because a user might set a period of less than 256 and we
1397 * don't want to mistakenly reset them. 1429 * don't want to mistakenly reset them.
1398 */ 1430 */
1399 if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) 1431 if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
1400 return true; 1432 return true;
1401 1433
1402 return false; 1434 return false;
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 1251e4d7e262..441af08edf43 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -373,6 +373,9 @@ static int __init init_power7_pmu(void)
373 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7")) 373 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
374 return -ENODEV; 374 return -ENODEV;
375 375
376 if (pvr_version_is(PVR_POWER7p))
377 power7_pmu.flags |= PPMU_SIAR_VALID;
378
376 return register_power_pmu(&power7_pmu); 379 return register_power_pmu(&power7_pmu);
377} 380}
378 381
diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c
index 9f6c33d63a42..6bd89a0e0dea 100644
--- a/arch/powerpc/platforms/44x/currituck.c
+++ b/arch/powerpc/platforms/44x/currituck.c
@@ -21,7 +21,6 @@
21 */ 21 */
22 22
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/memblock.h>
25#include <linux/of.h> 24#include <linux/of.h>
26#include <linux/of_platform.h> 25#include <linux/of_platform.h>
27#include <linux/rtc.h> 26#include <linux/rtc.h>
@@ -159,13 +158,8 @@ static void __init ppc47x_setup_arch(void)
159 158
160 /* No need to check the DMA config as we /know/ our windows are all of 159 /* No need to check the DMA config as we /know/ our windows are all of
161 * RAM. Lets hope that doesn't change */ 160 * RAM. Lets hope that doesn't change */
162#ifdef CONFIG_SWIOTLB 161 swiotlb_detect_4g();
163 if ((memblock_end_of_DRAM() - 1) > 0xffffffff) { 162
164 ppc_swiotlb_enable = 1;
165 set_pci_dma_ops(&swiotlb_dma_ops);
166 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
167 }
168#endif
169 ppc47x_smp_init(); 163 ppc47x_smp_init();
170} 164}
171 165
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index c16999802ecf..b62508b113db 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -2,6 +2,7 @@ config PPC_MPC512x
2 bool "512x-based boards" 2 bool "512x-based boards"
3 depends on 6xx 3 depends on 6xx
4 select FSL_SOC 4 select FSL_SOC
5 select FB_FSL_DIU
5 select IPIC 6 select IPIC
6 select PPC_CLOCK 7 select PPC_CLOCK
7 select PPC_PCI_CHOICE 8 select PPC_PCI_CHOICE
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index 1d8700ff60b0..9f771e05457c 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -54,14 +54,16 @@ static DEFINE_MUTEX(clocks_mutex);
54static struct clk *mpc5121_clk_get(struct device *dev, const char *id) 54static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
55{ 55{
56 struct clk *p, *clk = ERR_PTR(-ENOENT); 56 struct clk *p, *clk = ERR_PTR(-ENOENT);
57 int dev_match = 0; 57 int dev_match;
58 int id_match = 0; 58 int id_match;
59 59
60 if (dev == NULL || id == NULL) 60 if (dev == NULL || id == NULL)
61 return clk; 61 return clk;
62 62
63 mutex_lock(&clocks_mutex); 63 mutex_lock(&clocks_mutex);
64 list_for_each_entry(p, &clocks, node) { 64 list_for_each_entry(p, &clocks, node) {
65 dev_match = id_match = 0;
66
65 if (dev == p->dev) 67 if (dev == p->dev)
66 dev_match++; 68 dev_match++;
67 if (strcmp(id, p->name) == 0) 69 if (strcmp(id, p->name) == 0)
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index cfe958e94e1e..1650e090ef3a 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -191,8 +191,6 @@ mpc512x_valid_monitor_port(enum fsl_diu_monitor_port port)
191 191
192static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb; 192static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb;
193 193
194#if defined(CONFIG_FB_FSL_DIU) || \
195 defined(CONFIG_FB_FSL_DIU_MODULE)
196static inline void mpc512x_free_bootmem(struct page *page) 194static inline void mpc512x_free_bootmem(struct page *page)
197{ 195{
198 __ClearPageReserved(page); 196 __ClearPageReserved(page);
@@ -220,7 +218,6 @@ void mpc512x_release_bootmem(void)
220 } 218 }
221 diu_ops.release_bootmem = NULL; 219 diu_ops.release_bootmem = NULL;
222} 220}
223#endif
224 221
225/* 222/*
226 * Check if DIU was pre-initialized. If so, perform steps 223 * Check if DIU was pre-initialized. If so, perform steps
@@ -323,15 +320,12 @@ void __init mpc512x_setup_diu(void)
323 } 320 }
324 } 321 }
325 322
326#if defined(CONFIG_FB_FSL_DIU) || \
327 defined(CONFIG_FB_FSL_DIU_MODULE)
328 diu_ops.get_pixel_format = mpc512x_get_pixel_format; 323 diu_ops.get_pixel_format = mpc512x_get_pixel_format;
329 diu_ops.set_gamma_table = mpc512x_set_gamma_table; 324 diu_ops.set_gamma_table = mpc512x_set_gamma_table;
330 diu_ops.set_monitor_port = mpc512x_set_monitor_port; 325 diu_ops.set_monitor_port = mpc512x_set_monitor_port;
331 diu_ops.set_pixel_clock = mpc512x_set_pixel_clock; 326 diu_ops.set_pixel_clock = mpc512x_set_pixel_clock;
332 diu_ops.valid_monitor_port = mpc512x_valid_monitor_port; 327 diu_ops.valid_monitor_port = mpc512x_valid_monitor_port;
333 diu_ops.release_bootmem = mpc512x_release_bootmem; 328 diu_ops.release_bootmem = mpc512x_release_bootmem;
334#endif
335} 329}
336 330
337void __init mpc512x_init_IRQ(void) 331void __init mpc512x_init_IRQ(void)
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index c0aa04068d69..9cf36020cf0d 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -52,6 +52,7 @@ static void __init mpc5200_simple_setup_arch(void)
52static const char *board[] __initdata = { 52static const char *board[] __initdata = {
53 "anonymous,a4m072", 53 "anonymous,a4m072",
54 "anon,charon", 54 "anon,charon",
55 "ifm,o2d",
55 "intercontrol,digsy-mtc", 56 "intercontrol,digsy-mtc",
56 "manroland,mucmc52", 57 "manroland,mucmc52",
57 "manroland,uc101", 58 "manroland,uc101",
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index d61fb1c0c1a0..2351f9e0fb6f 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -170,7 +170,8 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
170 out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields); 170 out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
171 171
172 /* Kick it off */ 172 /* Kick it off */
173 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01); 173 if (!lpbfifo.req->defer_xfer_start)
174 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
174 if (dma) 175 if (dma)
175 bcom_enable(lpbfifo.bcom_cur_task); 176 bcom_enable(lpbfifo.bcom_cur_task);
176} 177}
@@ -421,6 +422,38 @@ int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
421} 422}
422EXPORT_SYMBOL(mpc52xx_lpbfifo_submit); 423EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
423 424
425int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req)
426{
427 unsigned long flags;
428
429 if (!lpbfifo.regs)
430 return -ENODEV;
431
432 spin_lock_irqsave(&lpbfifo.lock, flags);
433
434 /*
435 * If the req pointer is already set and a transfer was
436 * started on submit, then this transfer is in progress
437 */
438 if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) {
439 spin_unlock_irqrestore(&lpbfifo.lock, flags);
440 return -EBUSY;
441 }
442
443 /*
444 * If the req was previously submitted but not
445 * started, start it now
446 */
447 if (lpbfifo.req && lpbfifo.req == req &&
448 lpbfifo.req->defer_xfer_start) {
449 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
450 }
451
452 spin_unlock_irqrestore(&lpbfifo.lock, flags);
453 return 0;
454}
455EXPORT_SYMBOL(mpc52xx_lpbfifo_start_xfer);
456
424void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) 457void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
425{ 458{
426 unsigned long flags; 459 unsigned long flags;
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 159c01e91463..02d02a09942d 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -104,6 +104,13 @@ config P1022_DS
104 help 104 help
105 This option enables support for the Freescale P1022DS reference board. 105 This option enables support for the Freescale P1022DS reference board.
106 106
107config P1022_RDK
108 bool "Freescale / iVeia P1022 RDK"
109 select DEFAULT_UIMAGE
110 help
111 This option enables support for the Freescale / iVeia P1022RDK
112 reference board.
113
107config P1023_RDS 114config P1023_RDS
108 bool "Freescale P1023 RDS" 115 bool "Freescale P1023 RDS"
109 select DEFAULT_UIMAGE 116 select DEFAULT_UIMAGE
@@ -254,6 +261,20 @@ config P5020_DS
254 help 261 help
255 This option enables support for the P5020 DS board 262 This option enables support for the P5020 DS board
256 263
264config P5040_DS
265 bool "Freescale P5040 DS"
266 select DEFAULT_UIMAGE
267 select E500
268 select PPC_E500MC
269 select PHYS_64BIT
270 select SWIOTLB
271 select ARCH_REQUIRE_GPIOLIB
272 select GPIO_MPC8XXX
273 select HAS_RAPIDIO
274 select PPC_EPAPR_HV_PIC
275 help
276 This option enables support for the P5040 DS board
277
257config PPC_QEMU_E500 278config PPC_QEMU_E500
258 bool "QEMU generic e500 platform" 279 bool "QEMU generic e500 platform"
259 depends on EXPERIMENTAL 280 depends on EXPERIMENTAL
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 3dfe81175036..76f679cb04a0 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -15,11 +15,13 @@ obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o
15obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o 15obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o
16obj-$(CONFIG_P1010_RDB) += p1010rdb.o 16obj-$(CONFIG_P1010_RDB) += p1010rdb.o
17obj-$(CONFIG_P1022_DS) += p1022_ds.o 17obj-$(CONFIG_P1022_DS) += p1022_ds.o
18obj-$(CONFIG_P1022_RDK) += p1022_rdk.o
18obj-$(CONFIG_P1023_RDS) += p1023_rds.o 19obj-$(CONFIG_P1023_RDS) += p1023_rds.o
19obj-$(CONFIG_P2041_RDB) += p2041_rdb.o corenet_ds.o 20obj-$(CONFIG_P2041_RDB) += p2041_rdb.o corenet_ds.o
20obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o 21obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
21obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o 22obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
22obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o 23obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
24obj-$(CONFIG_P5040_DS) += p5040_ds.o corenet_ds.o
23obj-$(CONFIG_STX_GP3) += stx_gp3.o 25obj-$(CONFIG_STX_GP3) += stx_gp3.o
24obj-$(CONFIG_TQM85xx) += tqm85xx.o 26obj-$(CONFIG_TQM85xx) += tqm85xx.o
25obj-$(CONFIG_SBC8548) += sbc8548.o 27obj-$(CONFIG_SBC8548) += sbc8548.o
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 67dac22b4363..d0861a0d8360 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -27,6 +27,16 @@ static struct of_device_id __initdata mpc85xx_common_ids[] = {
27 { .compatible = "fsl,mpc8548-guts", }, 27 { .compatible = "fsl,mpc8548-guts", },
28 /* Probably unnecessary? */ 28 /* Probably unnecessary? */
29 { .compatible = "gpio-leds", }, 29 { .compatible = "gpio-leds", },
30 /* For all PCI controllers */
31 { .compatible = "fsl,mpc8540-pci", },
32 { .compatible = "fsl,mpc8548-pcie", },
33 { .compatible = "fsl,p1022-pcie", },
34 { .compatible = "fsl,p1010-pcie", },
35 { .compatible = "fsl,p1023-pcie", },
36 { .compatible = "fsl,p4080-pcie", },
37 { .compatible = "fsl,qoriq-pcie-v2.4", },
38 { .compatible = "fsl,qoriq-pcie-v2.3", },
39 { .compatible = "fsl,qoriq-pcie-v2.2", },
30 {}, 40 {},
31}; 41};
32 42
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 925b02874233..ed69c9250717 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -16,7 +16,6 @@
16#include <linux/kdev_t.h> 16#include <linux/kdev_t.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/memblock.h>
20 19
21#include <asm/time.h> 20#include <asm/time.h>
22#include <asm/machdep.h> 21#include <asm/machdep.h>
@@ -52,37 +51,16 @@ void __init corenet_ds_pic_init(void)
52 */ 51 */
53void __init corenet_ds_setup_arch(void) 52void __init corenet_ds_setup_arch(void)
54{ 53{
55#ifdef CONFIG_PCI
56 struct device_node *np;
57 struct pci_controller *hose;
58#endif
59 dma_addr_t max = 0xffffffff;
60
61 mpc85xx_smp_init(); 54 mpc85xx_smp_init();
62 55
63#ifdef CONFIG_PCI 56#if defined(CONFIG_PCI) && defined(CONFIG_PPC64)
64 for_each_node_by_type(np, "pci") {
65 if (of_device_is_compatible(np, "fsl,p4080-pcie") ||
66 of_device_is_compatible(np, "fsl,qoriq-pcie-v2.2")) {
67 fsl_add_bridge(np, 0);
68 hose = pci_find_hose_for_OF_device(np);
69 max = min(max, hose->dma_window_base_cur +
70 hose->dma_window_size);
71 }
72 }
73
74#ifdef CONFIG_PPC64
75 pci_devs_phb_init(); 57 pci_devs_phb_init();
76#endif 58#endif
77#endif
78 59
79#ifdef CONFIG_SWIOTLB 60 fsl_pci_assign_primary();
80 if ((memblock_end_of_DRAM() - 1) > max) { 61
81 ppc_swiotlb_enable = 1; 62 swiotlb_detect_4g();
82 set_pci_dma_ops(&swiotlb_dma_ops); 63
83 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
84 }
85#endif
86 pr_info("%s board from Freescale Semiconductor\n", ppc_md.name); 64 pr_info("%s board from Freescale Semiconductor\n", ppc_md.name);
87} 65}
88 66
@@ -99,6 +77,12 @@ static const struct of_device_id of_device_ids[] __devinitconst = {
99 { 77 {
100 .compatible = "fsl,qoriq-pcie-v2.2", 78 .compatible = "fsl,qoriq-pcie-v2.2",
101 }, 79 },
80 {
81 .compatible = "fsl,qoriq-pcie-v2.3",
82 },
83 {
84 .compatible = "fsl,qoriq-pcie-v2.4",
85 },
102 /* The following two are for the Freescale hypervisor */ 86 /* The following two are for the Freescale hypervisor */
103 { 87 {
104 .name = "hypervisor", 88 .name = "hypervisor",
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index b6a728b0a8ca..e6285ae6f423 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -22,7 +22,6 @@
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/of_platform.h> 24#include <linux/of_platform.h>
25#include <linux/memblock.h>
26 25
27#include <asm/time.h> 26#include <asm/time.h>
28#include <asm/machdep.h> 27#include <asm/machdep.h>
@@ -84,53 +83,39 @@ void __init ge_imp3a_pic_init(void)
84 of_node_put(cascade_node); 83 of_node_put(cascade_node);
85} 84}
86 85
87#ifdef CONFIG_PCI 86static void ge_imp3a_pci_assign_primary(void)
88static int primary_phb_addr;
89#endif /* CONFIG_PCI */
90
91/*
92 * Setup the architecture
93 */
94static void __init ge_imp3a_setup_arch(void)
95{ 87{
96 struct device_node *regs;
97#ifdef CONFIG_PCI 88#ifdef CONFIG_PCI
98 struct device_node *np; 89 struct device_node *np;
99 struct pci_controller *hose; 90 struct resource rsrc;
100#endif
101 dma_addr_t max = 0xffffffff;
102 91
103 if (ppc_md.progress)
104 ppc_md.progress("ge_imp3a_setup_arch()", 0);
105
106#ifdef CONFIG_PCI
107 for_each_node_by_type(np, "pci") { 92 for_each_node_by_type(np, "pci") {
108 if (of_device_is_compatible(np, "fsl,mpc8540-pci") || 93 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
109 of_device_is_compatible(np, "fsl,mpc8548-pcie") || 94 of_device_is_compatible(np, "fsl,mpc8548-pcie") ||
110 of_device_is_compatible(np, "fsl,p2020-pcie")) { 95 of_device_is_compatible(np, "fsl,p2020-pcie")) {
111 struct resource rsrc;
112 of_address_to_resource(np, 0, &rsrc); 96 of_address_to_resource(np, 0, &rsrc);
113 if ((rsrc.start & 0xfffff) == primary_phb_addr) 97 if ((rsrc.start & 0xfffff) == 0x9000)
114 fsl_add_bridge(np, 1); 98 fsl_pci_primary = np;
115 else
116 fsl_add_bridge(np, 0);
117
118 hose = pci_find_hose_for_OF_device(np);
119 max = min(max, hose->dma_window_base_cur +
120 hose->dma_window_size);
121 } 99 }
122 } 100 }
123#endif 101#endif
102}
103
104/*
105 * Setup the architecture
106 */
107static void __init ge_imp3a_setup_arch(void)
108{
109 struct device_node *regs;
110
111 if (ppc_md.progress)
112 ppc_md.progress("ge_imp3a_setup_arch()", 0);
124 113
125 mpc85xx_smp_init(); 114 mpc85xx_smp_init();
126 115
127#ifdef CONFIG_SWIOTLB 116 ge_imp3a_pci_assign_primary();
128 if ((memblock_end_of_DRAM() - 1) > max) { 117
129 ppc_swiotlb_enable = 1; 118 swiotlb_detect_4g();
130 set_pci_dma_ops(&swiotlb_dma_ops);
131 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
132 }
133#endif
134 119
135 /* Remap basic board registers */ 120 /* Remap basic board registers */
136 regs = of_find_compatible_node(NULL, NULL, "ge,imp3a-fpga-regs"); 121 regs = of_find_compatible_node(NULL, NULL, "ge,imp3a-fpga-regs");
@@ -215,17 +200,10 @@ static int __init ge_imp3a_probe(void)
215{ 200{
216 unsigned long root = of_get_flat_dt_root(); 201 unsigned long root = of_get_flat_dt_root();
217 202
218 if (of_flat_dt_is_compatible(root, "ge,IMP3A")) { 203 return of_flat_dt_is_compatible(root, "ge,IMP3A");
219#ifdef CONFIG_PCI
220 primary_phb_addr = 0x9000;
221#endif
222 return 1;
223 }
224
225 return 0;
226} 204}
227 205
228machine_device_initcall(ge_imp3a, mpc85xx_common_publish_devices); 206machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices);
229 207
230machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier); 208machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier);
231 209
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 767c7cf18a9c..15ce4b55f117 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -17,7 +17,6 @@
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/memblock.h>
21 20
22#include <asm/time.h> 21#include <asm/time.h>
23#include <asm/machdep.h> 22#include <asm/machdep.h>
@@ -46,46 +45,17 @@ void __init mpc8536_ds_pic_init(void)
46 */ 45 */
47static void __init mpc8536_ds_setup_arch(void) 46static void __init mpc8536_ds_setup_arch(void)
48{ 47{
49#ifdef CONFIG_PCI
50 struct device_node *np;
51 struct pci_controller *hose;
52#endif
53 dma_addr_t max = 0xffffffff;
54
55 if (ppc_md.progress) 48 if (ppc_md.progress)
56 ppc_md.progress("mpc8536_ds_setup_arch()", 0); 49 ppc_md.progress("mpc8536_ds_setup_arch()", 0);
57 50
58#ifdef CONFIG_PCI 51 fsl_pci_assign_primary();
59 for_each_node_by_type(np, "pci") {
60 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
61 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
62 struct resource rsrc;
63 of_address_to_resource(np, 0, &rsrc);
64 if ((rsrc.start & 0xfffff) == 0x8000)
65 fsl_add_bridge(np, 1);
66 else
67 fsl_add_bridge(np, 0);
68
69 hose = pci_find_hose_for_OF_device(np);
70 max = min(max, hose->dma_window_base_cur +
71 hose->dma_window_size);
72 }
73 }
74
75#endif
76 52
77#ifdef CONFIG_SWIOTLB 53 swiotlb_detect_4g();
78 if ((memblock_end_of_DRAM() - 1) > max) {
79 ppc_swiotlb_enable = 1;
80 set_pci_dma_ops(&swiotlb_dma_ops);
81 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
82 }
83#endif
84 54
85 printk("MPC8536 DS board from Freescale Semiconductor\n"); 55 printk("MPC8536 DS board from Freescale Semiconductor\n");
86} 56}
87 57
88machine_device_initcall(mpc8536_ds, mpc85xx_common_publish_devices); 58machine_arch_initcall(mpc8536_ds, mpc85xx_common_publish_devices);
89 59
90machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier); 60machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier);
91 61
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 29ee8fcd75a2..7d12a19aa7ee 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -137,10 +137,6 @@ static void __init init_ioports(void)
137 137
138static void __init mpc85xx_ads_setup_arch(void) 138static void __init mpc85xx_ads_setup_arch(void)
139{ 139{
140#ifdef CONFIG_PCI
141 struct device_node *np;
142#endif
143
144 if (ppc_md.progress) 140 if (ppc_md.progress)
145 ppc_md.progress("mpc85xx_ads_setup_arch()", 0); 141 ppc_md.progress("mpc85xx_ads_setup_arch()", 0);
146 142
@@ -150,11 +146,10 @@ static void __init mpc85xx_ads_setup_arch(void)
150#endif 146#endif
151 147
152#ifdef CONFIG_PCI 148#ifdef CONFIG_PCI
153 for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
154 fsl_add_bridge(np, 1);
155
156 ppc_md.pci_exclude_device = mpc85xx_exclude_device; 149 ppc_md.pci_exclude_device = mpc85xx_exclude_device;
157#endif 150#endif
151
152 fsl_pci_assign_primary();
158} 153}
159 154
160static void mpc85xx_ads_show_cpuinfo(struct seq_file *m) 155static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
@@ -173,7 +168,7 @@ static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
173 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 168 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
174} 169}
175 170
176machine_device_initcall(mpc85xx_ads, mpc85xx_common_publish_devices); 171machine_arch_initcall(mpc85xx_ads, mpc85xx_common_publish_devices);
177 172
178/* 173/*
179 * Called very early, device-tree isn't unflattened 174 * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 11156fb53d83..c474505ad0d0 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -276,6 +276,33 @@ machine_device_initcall(mpc85xx_cds, mpc85xx_cds_8259_attach);
276 276
277#endif /* CONFIG_PPC_I8259 */ 277#endif /* CONFIG_PPC_I8259 */
278 278
279static void mpc85xx_cds_pci_assign_primary(void)
280{
281#ifdef CONFIG_PCI
282 struct device_node *np;
283
284 if (fsl_pci_primary)
285 return;
286
287 /*
288 * MPC85xx_CDS has ISA bridge but unfortunately there is no
289 * isa node in device tree. We now looking for i8259 node as
290 * a workaround for such a broken device tree. This routine
291 * is for complying to all device trees.
292 */
293 np = of_find_node_by_name(NULL, "i8259");
294 while ((fsl_pci_primary = of_get_parent(np))) {
295 of_node_put(np);
296 np = fsl_pci_primary;
297
298 if ((of_device_is_compatible(np, "fsl,mpc8540-pci") ||
299 of_device_is_compatible(np, "fsl,mpc8548-pcie")) &&
300 of_device_is_available(np))
301 return;
302 }
303#endif
304}
305
279/* 306/*
280 * Setup the architecture 307 * Setup the architecture
281 */ 308 */
@@ -309,21 +336,12 @@ static void __init mpc85xx_cds_setup_arch(void)
309 } 336 }
310 337
311#ifdef CONFIG_PCI 338#ifdef CONFIG_PCI
312 for_each_node_by_type(np, "pci") {
313 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
314 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
315 struct resource rsrc;
316 of_address_to_resource(np, 0, &rsrc);
317 if ((rsrc.start & 0xfffff) == 0x8000)
318 fsl_add_bridge(np, 1);
319 else
320 fsl_add_bridge(np, 0);
321 }
322 }
323
324 ppc_md.pci_irq_fixup = mpc85xx_cds_pci_irq_fixup; 339 ppc_md.pci_irq_fixup = mpc85xx_cds_pci_irq_fixup;
325 ppc_md.pci_exclude_device = mpc85xx_exclude_device; 340 ppc_md.pci_exclude_device = mpc85xx_exclude_device;
326#endif 341#endif
342
343 mpc85xx_cds_pci_assign_primary();
344 fsl_pci_assign_primary();
327} 345}
328 346
329static void mpc85xx_cds_show_cpuinfo(struct seq_file *m) 347static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
@@ -355,7 +373,7 @@ static int __init mpc85xx_cds_probe(void)
355 return of_flat_dt_is_compatible(root, "MPC85xxCDS"); 373 return of_flat_dt_is_compatible(root, "MPC85xxCDS");
356} 374}
357 375
358machine_device_initcall(mpc85xx_cds, mpc85xx_common_publish_devices); 376machine_arch_initcall(mpc85xx_cds, mpc85xx_common_publish_devices);
359 377
360define_machine(mpc85xx_cds) { 378define_machine(mpc85xx_cds) {
361 .name = "MPC85xx CDS", 379 .name = "MPC85xx CDS",
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 6d3265fe7718..9ebb91ed96a3 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -20,7 +20,6 @@
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/memblock.h>
24 23
25#include <asm/time.h> 24#include <asm/time.h>
26#include <asm/machdep.h> 25#include <asm/machdep.h>
@@ -129,13 +128,11 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
129} 128}
130#endif /* CONFIG_PCI */ 129#endif /* CONFIG_PCI */
131 130
132static void __init mpc85xx_ds_pci_init(void) 131static void __init mpc85xx_ds_uli_init(void)
133{ 132{
134#ifdef CONFIG_PCI 133#ifdef CONFIG_PCI
135 struct device_node *node; 134 struct device_node *node;
136 135
137 fsl_pci_init();
138
139 /* See if we have a ULI under the primary */ 136 /* See if we have a ULI under the primary */
140 137
141 node = of_find_node_by_name(NULL, "uli1575"); 138 node = of_find_node_by_name(NULL, "uli1575");
@@ -159,7 +156,9 @@ static void __init mpc85xx_ds_setup_arch(void)
159 if (ppc_md.progress) 156 if (ppc_md.progress)
160 ppc_md.progress("mpc85xx_ds_setup_arch()", 0); 157 ppc_md.progress("mpc85xx_ds_setup_arch()", 0);
161 158
162 mpc85xx_ds_pci_init(); 159 swiotlb_detect_4g();
160 fsl_pci_assign_primary();
161 mpc85xx_ds_uli_init();
163 mpc85xx_smp_init(); 162 mpc85xx_smp_init();
164 163
165 printk("MPC85xx DS board from Freescale Semiconductor\n"); 164 printk("MPC85xx DS board from Freescale Semiconductor\n");
@@ -175,9 +174,9 @@ static int __init mpc8544_ds_probe(void)
175 return !!of_flat_dt_is_compatible(root, "MPC8544DS"); 174 return !!of_flat_dt_is_compatible(root, "MPC8544DS");
176} 175}
177 176
178machine_device_initcall(mpc8544_ds, mpc85xx_common_publish_devices); 177machine_arch_initcall(mpc8544_ds, mpc85xx_common_publish_devices);
179machine_device_initcall(mpc8572_ds, mpc85xx_common_publish_devices); 178machine_arch_initcall(mpc8572_ds, mpc85xx_common_publish_devices);
180machine_device_initcall(p2020_ds, mpc85xx_common_publish_devices); 179machine_arch_initcall(p2020_ds, mpc85xx_common_publish_devices);
181 180
182machine_arch_initcall(mpc8544_ds, swiotlb_setup_bus_notifier); 181machine_arch_initcall(mpc8544_ds, swiotlb_setup_bus_notifier);
183machine_arch_initcall(mpc8572_ds, swiotlb_setup_bus_notifier); 182machine_arch_initcall(mpc8572_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 8e4b094c553b..8498f7323470 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -327,44 +327,16 @@ static void __init mpc85xx_mds_qeic_init(void) { }
327 327
328static void __init mpc85xx_mds_setup_arch(void) 328static void __init mpc85xx_mds_setup_arch(void)
329{ 329{
330#ifdef CONFIG_PCI
331 struct pci_controller *hose;
332 struct device_node *np;
333#endif
334 dma_addr_t max = 0xffffffff;
335
336 if (ppc_md.progress) 330 if (ppc_md.progress)
337 ppc_md.progress("mpc85xx_mds_setup_arch()", 0); 331 ppc_md.progress("mpc85xx_mds_setup_arch()", 0);
338 332
339#ifdef CONFIG_PCI
340 for_each_node_by_type(np, "pci") {
341 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
342 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
343 struct resource rsrc;
344 of_address_to_resource(np, 0, &rsrc);
345 if ((rsrc.start & 0xfffff) == 0x8000)
346 fsl_add_bridge(np, 1);
347 else
348 fsl_add_bridge(np, 0);
349
350 hose = pci_find_hose_for_OF_device(np);
351 max = min(max, hose->dma_window_base_cur +
352 hose->dma_window_size);
353 }
354 }
355#endif
356
357 mpc85xx_smp_init(); 333 mpc85xx_smp_init();
358 334
359 mpc85xx_mds_qe_init(); 335 mpc85xx_mds_qe_init();
360 336
361#ifdef CONFIG_SWIOTLB 337 fsl_pci_assign_primary();
362 if ((memblock_end_of_DRAM() - 1) > max) { 338
363 ppc_swiotlb_enable = 1; 339 swiotlb_detect_4g();
364 set_pci_dma_ops(&swiotlb_dma_ops);
365 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
366 }
367#endif
368} 340}
369 341
370 342
@@ -409,9 +381,9 @@ static int __init mpc85xx_publish_devices(void)
409 return mpc85xx_common_publish_devices(); 381 return mpc85xx_common_publish_devices();
410} 382}
411 383
412machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices); 384machine_arch_initcall(mpc8568_mds, mpc85xx_publish_devices);
413machine_device_initcall(mpc8569_mds, mpc85xx_publish_devices); 385machine_arch_initcall(mpc8569_mds, mpc85xx_publish_devices);
414machine_device_initcall(p1021_mds, mpc85xx_common_publish_devices); 386machine_arch_initcall(p1021_mds, mpc85xx_common_publish_devices);
415 387
416machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier); 388machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier);
417machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier); 389machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 1910fdcb75b2..ede8771d6f02 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -86,23 +86,17 @@ void __init mpc85xx_rdb_pic_init(void)
86 */ 86 */
87static void __init mpc85xx_rdb_setup_arch(void) 87static void __init mpc85xx_rdb_setup_arch(void)
88{ 88{
89#if defined(CONFIG_PCI) || defined(CONFIG_QUICC_ENGINE) 89#ifdef CONFIG_QUICC_ENGINE
90 struct device_node *np; 90 struct device_node *np;
91#endif 91#endif
92 92
93 if (ppc_md.progress) 93 if (ppc_md.progress)
94 ppc_md.progress("mpc85xx_rdb_setup_arch()", 0); 94 ppc_md.progress("mpc85xx_rdb_setup_arch()", 0);
95 95
96#ifdef CONFIG_PCI
97 for_each_node_by_type(np, "pci") {
98 if (of_device_is_compatible(np, "fsl,mpc8548-pcie"))
99 fsl_add_bridge(np, 0);
100 }
101
102#endif
103
104 mpc85xx_smp_init(); 96 mpc85xx_smp_init();
105 97
98 fsl_pci_assign_primary();
99
106#ifdef CONFIG_QUICC_ENGINE 100#ifdef CONFIG_QUICC_ENGINE
107 np = of_find_compatible_node(NULL, NULL, "fsl,qe"); 101 np = of_find_compatible_node(NULL, NULL, "fsl,qe");
108 if (!np) { 102 if (!np) {
@@ -161,15 +155,15 @@ qe_fail:
161 printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n"); 155 printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
162} 156}
163 157
164machine_device_initcall(p2020_rdb, mpc85xx_common_publish_devices); 158machine_arch_initcall(p2020_rdb, mpc85xx_common_publish_devices);
165machine_device_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices); 159machine_arch_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices);
166machine_device_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices); 160machine_arch_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices);
167machine_device_initcall(p1020_rdb, mpc85xx_common_publish_devices); 161machine_arch_initcall(p1020_rdb, mpc85xx_common_publish_devices);
168machine_device_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices); 162machine_arch_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices);
169machine_device_initcall(p1020_utm_pc, mpc85xx_common_publish_devices); 163machine_arch_initcall(p1020_utm_pc, mpc85xx_common_publish_devices);
170machine_device_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices); 164machine_arch_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices);
171machine_device_initcall(p1025_rdb, mpc85xx_common_publish_devices); 165machine_arch_initcall(p1025_rdb, mpc85xx_common_publish_devices);
172machine_device_initcall(p1024_rdb, mpc85xx_common_publish_devices); 166machine_arch_initcall(p1024_rdb, mpc85xx_common_publish_devices);
173 167
174/* 168/*
175 * Called very early, device-tree isn't unflattened 169 * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index dbaf44354f0d..0252961392d5 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -46,25 +46,15 @@ void __init p1010_rdb_pic_init(void)
46 */ 46 */
47static void __init p1010_rdb_setup_arch(void) 47static void __init p1010_rdb_setup_arch(void)
48{ 48{
49#ifdef CONFIG_PCI
50 struct device_node *np;
51#endif
52
53 if (ppc_md.progress) 49 if (ppc_md.progress)
54 ppc_md.progress("p1010_rdb_setup_arch()", 0); 50 ppc_md.progress("p1010_rdb_setup_arch()", 0);
55 51
56#ifdef CONFIG_PCI 52 fsl_pci_assign_primary();
57 for_each_node_by_type(np, "pci") {
58 if (of_device_is_compatible(np, "fsl,p1010-pcie"))
59 fsl_add_bridge(np, 0);
60 }
61
62#endif
63 53
64 printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n"); 54 printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n");
65} 55}
66 56
67machine_device_initcall(p1010_rdb, mpc85xx_common_publish_devices); 57machine_arch_initcall(p1010_rdb, mpc85xx_common_publish_devices);
68machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier); 58machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
69 59
70/* 60/*
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 3c732acf331d..848a3e98e1c1 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -18,7 +18,6 @@
18 18
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/of_platform.h> 20#include <linux/of_platform.h>
21#include <linux/memblock.h>
22#include <asm/div64.h> 21#include <asm/div64.h>
23#include <asm/mpic.h> 22#include <asm/mpic.h>
24#include <asm/swiotlb.h> 23#include <asm/swiotlb.h>
@@ -507,32 +506,9 @@ early_param("video", early_video_setup);
507 */ 506 */
508static void __init p1022_ds_setup_arch(void) 507static void __init p1022_ds_setup_arch(void)
509{ 508{
510#ifdef CONFIG_PCI
511 struct device_node *np;
512#endif
513 dma_addr_t max = 0xffffffff;
514
515 if (ppc_md.progress) 509 if (ppc_md.progress)
516 ppc_md.progress("p1022_ds_setup_arch()", 0); 510 ppc_md.progress("p1022_ds_setup_arch()", 0);
517 511
518#ifdef CONFIG_PCI
519 for_each_compatible_node(np, "pci", "fsl,p1022-pcie") {
520 struct resource rsrc;
521 struct pci_controller *hose;
522
523 of_address_to_resource(np, 0, &rsrc);
524
525 if ((rsrc.start & 0xfffff) == 0x8000)
526 fsl_add_bridge(np, 1);
527 else
528 fsl_add_bridge(np, 0);
529
530 hose = pci_find_hose_for_OF_device(np);
531 max = min(max, hose->dma_window_base_cur +
532 hose->dma_window_size);
533 }
534#endif
535
536#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 512#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
537 diu_ops.get_pixel_format = p1022ds_get_pixel_format; 513 diu_ops.get_pixel_format = p1022ds_get_pixel_format;
538 diu_ops.set_gamma_table = p1022ds_set_gamma_table; 514 diu_ops.set_gamma_table = p1022ds_set_gamma_table;
@@ -601,18 +577,14 @@ static void __init p1022_ds_setup_arch(void)
601 577
602 mpc85xx_smp_init(); 578 mpc85xx_smp_init();
603 579
604#ifdef CONFIG_SWIOTLB 580 fsl_pci_assign_primary();
605 if ((memblock_end_of_DRAM() - 1) > max) { 581
606 ppc_swiotlb_enable = 1; 582 swiotlb_detect_4g();
607 set_pci_dma_ops(&swiotlb_dma_ops);
608 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
609 }
610#endif
611 583
612 pr_info("Freescale P1022 DS reference board\n"); 584 pr_info("Freescale P1022 DS reference board\n");
613} 585}
614 586
615machine_device_initcall(p1022_ds, mpc85xx_common_publish_devices); 587machine_arch_initcall(p1022_ds, mpc85xx_common_publish_devices);
616 588
617machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier); 589machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
618 590
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
new file mode 100644
index 000000000000..55ffa1cc380c
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -0,0 +1,167 @@
1/*
2 * P1022 RDK board specific routines
3 *
4 * Copyright 2012 Freescale Semiconductor, Inc.
5 *
6 * Author: Timur Tabi <timur@freescale.com>
7 *
8 * Based on p1022_ds.c
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/pci.h>
16#include <linux/of_platform.h>
17#include <asm/div64.h>
18#include <asm/mpic.h>
19#include <asm/swiotlb.h>
20
21#include <sysdev/fsl_soc.h>
22#include <sysdev/fsl_pci.h>
23#include <asm/udbg.h>
24#include <asm/fsl_guts.h>
25#include "smp.h"
26
27#include "mpc85xx.h"
28
29#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
30
31/* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */
32#define CLKDVDR_PXCKEN 0x80000000
33#define CLKDVDR_PXCKINV 0x10000000
34#define CLKDVDR_PXCKDLY 0x06000000
35#define CLKDVDR_PXCLK_MASK 0x00FF0000
36
37/**
38 * p1022rdk_set_monitor_port: switch the output to a different monitor port
39 */
40static void p1022rdk_set_monitor_port(enum fsl_diu_monitor_port port)
41{
42 if (port != FSL_DIU_PORT_DVI) {
43 pr_err("p1022rdk: unsupported monitor port %i\n", port);
44 return;
45 }
46}
47
48/**
49 * p1022rdk_set_pixel_clock: program the DIU's clock
50 *
51 * @pixclock: the wavelength, in picoseconds, of the clock
52 */
53void p1022rdk_set_pixel_clock(unsigned int pixclock)
54{
55 struct device_node *guts_np = NULL;
56 struct ccsr_guts __iomem *guts;
57 unsigned long freq;
58 u64 temp;
59 u32 pxclk;
60
61 /* Map the global utilities registers. */
62 guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
63 if (!guts_np) {
64 pr_err("p1022rdk: missing global utilties device node\n");
65 return;
66 }
67
68 guts = of_iomap(guts_np, 0);
69 of_node_put(guts_np);
70 if (!guts) {
71 pr_err("p1022rdk: could not map global utilties device\n");
72 return;
73 }
74
75 /* Convert pixclock from a wavelength to a frequency */
76 temp = 1000000000000ULL;
77 do_div(temp, pixclock);
78 freq = temp;
79
80 /*
81 * 'pxclk' is the ratio of the platform clock to the pixel clock.
82 * This number is programmed into the CLKDVDR register, and the valid
83 * range of values is 2-255.
84 */
85 pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq);
86 pxclk = clamp_t(u32, pxclk, 2, 255);
87
88 /* Disable the pixel clock, and set it to non-inverted and no delay */
89 clrbits32(&guts->clkdvdr,
90 CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK);
91
92 /* Enable the clock and set the pxclk */
93 setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16));
94
95 iounmap(guts);
96}
97
98/**
99 * p1022rdk_valid_monitor_port: set the monitor port for sysfs
100 */
101enum fsl_diu_monitor_port
102p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port)
103{
104 return FSL_DIU_PORT_DVI;
105}
106
107#endif
108
109void __init p1022_rdk_pic_init(void)
110{
111 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
112 MPIC_SINGLE_DEST_CPU,
113 0, 256, " OpenPIC ");
114 BUG_ON(mpic == NULL);
115 mpic_init(mpic);
116}
117
118/*
119 * Setup the architecture
120 */
121static void __init p1022_rdk_setup_arch(void)
122{
123 if (ppc_md.progress)
124 ppc_md.progress("p1022_rdk_setup_arch()", 0);
125
126#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
127 diu_ops.set_monitor_port = p1022rdk_set_monitor_port;
128 diu_ops.set_pixel_clock = p1022rdk_set_pixel_clock;
129 diu_ops.valid_monitor_port = p1022rdk_valid_monitor_port;
130#endif
131
132 mpc85xx_smp_init();
133
134 fsl_pci_assign_primary();
135
136 swiotlb_detect_4g();
137
138 pr_info("Freescale / iVeia P1022 RDK reference board\n");
139}
140
141machine_arch_initcall(p1022_rdk, mpc85xx_common_publish_devices);
142
143machine_arch_initcall(p1022_rdk, swiotlb_setup_bus_notifier);
144
145/*
146 * Called very early, device-tree isn't unflattened
147 */
148static int __init p1022_rdk_probe(void)
149{
150 unsigned long root = of_get_flat_dt_root();
151
152 return of_flat_dt_is_compatible(root, "fsl,p1022rdk");
153}
154
155define_machine(p1022_rdk) {
156 .name = "P1022 RDK",
157 .probe = p1022_rdk_probe,
158 .setup_arch = p1022_rdk_setup_arch,
159 .init_IRQ = p1022_rdk_pic_init,
160#ifdef CONFIG_PCI
161 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
162#endif
163 .get_irq = mpic_get_irq,
164 .restart = fsl_rstcr_restart,
165 .calibrate_decr = generic_calibrate_decr,
166 .progress = udbg_progress,
167};
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
index 2990e8b13dc9..9cc60a738834 100644
--- a/arch/powerpc/platforms/85xx/p1023_rds.c
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -80,15 +80,12 @@ static void __init mpc85xx_rds_setup_arch(void)
80 } 80 }
81 } 81 }
82 82
83#ifdef CONFIG_PCI
84 for_each_compatible_node(np, "pci", "fsl,p1023-pcie")
85 fsl_add_bridge(np, 0);
86#endif
87
88 mpc85xx_smp_init(); 83 mpc85xx_smp_init();
84
85 fsl_pci_assign_primary();
89} 86}
90 87
91machine_device_initcall(p1023_rds, mpc85xx_common_publish_devices); 88machine_arch_initcall(p1023_rds, mpc85xx_common_publish_devices);
92 89
93static void __init mpc85xx_rds_pic_init(void) 90static void __init mpc85xx_rds_pic_init(void)
94{ 91{
diff --git a/arch/powerpc/platforms/85xx/p2041_rdb.c b/arch/powerpc/platforms/85xx/p2041_rdb.c
index 6541fa2630c0..000c0892fc40 100644
--- a/arch/powerpc/platforms/85xx/p2041_rdb.c
+++ b/arch/powerpc/platforms/85xx/p2041_rdb.c
@@ -80,7 +80,7 @@ define_machine(p2041_rdb) {
80 .power_save = e500_idle, 80 .power_save = e500_idle,
81}; 81};
82 82
83machine_device_initcall(p2041_rdb, corenet_ds_publish_devices); 83machine_arch_initcall(p2041_rdb, corenet_ds_publish_devices);
84 84
85#ifdef CONFIG_SWIOTLB 85#ifdef CONFIG_SWIOTLB
86machine_arch_initcall(p2041_rdb, swiotlb_setup_bus_notifier); 86machine_arch_initcall(p2041_rdb, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c
index f238efa75891..b3edc205daa9 100644
--- a/arch/powerpc/platforms/85xx/p3041_ds.c
+++ b/arch/powerpc/platforms/85xx/p3041_ds.c
@@ -82,7 +82,7 @@ define_machine(p3041_ds) {
82 .power_save = e500_idle, 82 .power_save = e500_idle,
83}; 83};
84 84
85machine_device_initcall(p3041_ds, corenet_ds_publish_devices); 85machine_arch_initcall(p3041_ds, corenet_ds_publish_devices);
86 86
87#ifdef CONFIG_SWIOTLB 87#ifdef CONFIG_SWIOTLB
88machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier); 88machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c
index c92417dc6574..54df10632aea 100644
--- a/arch/powerpc/platforms/85xx/p4080_ds.c
+++ b/arch/powerpc/platforms/85xx/p4080_ds.c
@@ -81,7 +81,7 @@ define_machine(p4080_ds) {
81 .power_save = e500_idle, 81 .power_save = e500_idle,
82}; 82};
83 83
84machine_device_initcall(p4080_ds, corenet_ds_publish_devices); 84machine_arch_initcall(p4080_ds, corenet_ds_publish_devices);
85#ifdef CONFIG_SWIOTLB 85#ifdef CONFIG_SWIOTLB
86machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier); 86machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier);
87#endif 87#endif
diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c
index 17bef15a85ed..753a42c29d4d 100644
--- a/arch/powerpc/platforms/85xx/p5020_ds.c
+++ b/arch/powerpc/platforms/85xx/p5020_ds.c
@@ -91,7 +91,7 @@ define_machine(p5020_ds) {
91#endif 91#endif
92}; 92};
93 93
94machine_device_initcall(p5020_ds, corenet_ds_publish_devices); 94machine_arch_initcall(p5020_ds, corenet_ds_publish_devices);
95 95
96#ifdef CONFIG_SWIOTLB 96#ifdef CONFIG_SWIOTLB
97machine_arch_initcall(p5020_ds, swiotlb_setup_bus_notifier); 97machine_arch_initcall(p5020_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/p5040_ds.c b/arch/powerpc/platforms/85xx/p5040_ds.c
new file mode 100644
index 000000000000..11381851828e
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p5040_ds.c
@@ -0,0 +1,89 @@
1/*
2 * P5040 DS Setup
3 *
4 * Copyright 2009-2010 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14
15#include <asm/machdep.h>
16#include <asm/udbg.h>
17#include <asm/mpic.h>
18
19#include <linux/of_fdt.h>
20
21#include <sysdev/fsl_soc.h>
22#include <sysdev/fsl_pci.h>
23#include <asm/ehv_pic.h>
24
25#include "corenet_ds.h"
26
27/*
28 * Called very early, device-tree isn't unflattened
29 */
30static int __init p5040_ds_probe(void)
31{
32 unsigned long root = of_get_flat_dt_root();
33#ifdef CONFIG_SMP
34 extern struct smp_ops_t smp_85xx_ops;
35#endif
36
37 if (of_flat_dt_is_compatible(root, "fsl,P5040DS"))
38 return 1;
39
40 /* Check if we're running under the Freescale hypervisor */
41 if (of_flat_dt_is_compatible(root, "fsl,P5040DS-hv")) {
42 ppc_md.init_IRQ = ehv_pic_init;
43 ppc_md.get_irq = ehv_pic_get_irq;
44 ppc_md.restart = fsl_hv_restart;
45 ppc_md.power_off = fsl_hv_halt;
46 ppc_md.halt = fsl_hv_halt;
47#ifdef CONFIG_SMP
48 /*
49 * Disable the timebase sync operations because we can't write
50 * to the timebase registers under the hypervisor.
51 */
52 smp_85xx_ops.give_timebase = NULL;
53 smp_85xx_ops.take_timebase = NULL;
54#endif
55 return 1;
56 }
57
58 return 0;
59}
60
61define_machine(p5040_ds) {
62 .name = "P5040 DS",
63 .probe = p5040_ds_probe,
64 .setup_arch = corenet_ds_setup_arch,
65 .init_IRQ = corenet_ds_pic_init,
66#ifdef CONFIG_PCI
67 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
68#endif
69/* coreint doesn't play nice with lazy EE, use legacy mpic for now */
70#ifdef CONFIG_PPC64
71 .get_irq = mpic_get_irq,
72#else
73 .get_irq = mpic_get_coreint_irq,
74#endif
75 .restart = fsl_rstcr_restart,
76 .calibrate_decr = generic_calibrate_decr,
77 .progress = udbg_progress,
78#ifdef CONFIG_PPC64
79 .power_save = book3e_idle,
80#else
81 .power_save = e500_idle,
82#endif
83};
84
85machine_arch_initcall(p5040_ds, corenet_ds_publish_devices);
86
87#ifdef CONFIG_SWIOTLB
88machine_arch_initcall(p5040_ds, swiotlb_setup_bus_notifier);
89#endif
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 95a2e53af71b..f6ea5618c733 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -41,7 +41,8 @@ static void __init qemu_e500_setup_arch(void)
41{ 41{
42 ppc_md.progress("qemu_e500_setup_arch()", 0); 42 ppc_md.progress("qemu_e500_setup_arch()", 0);
43 43
44 fsl_pci_init(); 44 fsl_pci_assign_primary();
45 swiotlb_detect_4g();
45 mpc85xx_smp_init(); 46 mpc85xx_smp_init();
46} 47}
47 48
@@ -55,7 +56,7 @@ static int __init qemu_e500_probe(void)
55 return !!of_flat_dt_is_compatible(root, "fsl,qemu-e500"); 56 return !!of_flat_dt_is_compatible(root, "fsl,qemu-e500");
56} 57}
57 58
58machine_device_initcall(qemu_e500, mpc85xx_common_publish_devices); 59machine_arch_initcall(qemu_e500, mpc85xx_common_publish_devices);
59 60
60define_machine(qemu_e500) { 61define_machine(qemu_e500) {
61 .name = "QEMU e500", 62 .name = "QEMU e500",
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index cd3a66bdb54b..f62121825914 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -88,26 +88,11 @@ static int __init sbc8548_hw_rev(void)
88 */ 88 */
89static void __init sbc8548_setup_arch(void) 89static void __init sbc8548_setup_arch(void)
90{ 90{
91#ifdef CONFIG_PCI
92 struct device_node *np;
93#endif
94
95 if (ppc_md.progress) 91 if (ppc_md.progress)
96 ppc_md.progress("sbc8548_setup_arch()", 0); 92 ppc_md.progress("sbc8548_setup_arch()", 0);
97 93
98#ifdef CONFIG_PCI 94 fsl_pci_assign_primary();
99 for_each_node_by_type(np, "pci") { 95
100 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
101 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
102 struct resource rsrc;
103 of_address_to_resource(np, 0, &rsrc);
104 if ((rsrc.start & 0xfffff) == 0x8000)
105 fsl_add_bridge(np, 1);
106 else
107 fsl_add_bridge(np, 0);
108 }
109 }
110#endif
111 sbc_rev = sbc8548_hw_rev(); 96 sbc_rev = sbc8548_hw_rev();
112} 97}
113 98
@@ -128,7 +113,7 @@ static void sbc8548_show_cpuinfo(struct seq_file *m)
128 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 113 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
129} 114}
130 115
131machine_device_initcall(sbc8548, mpc85xx_common_publish_devices); 116machine_arch_initcall(sbc8548, mpc85xx_common_publish_devices);
132 117
133/* 118/*
134 * Called very early, device-tree isn't unflattened 119 * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index ff4249044a3c..6fcfa12e5c56 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -2,7 +2,7 @@
2 * Author: Andy Fleming <afleming@freescale.com> 2 * Author: Andy Fleming <afleming@freescale.com>
3 * Kumar Gala <galak@kernel.crashing.org> 3 * Kumar Gala <galak@kernel.crashing.org>
4 * 4 *
5 * Copyright 2006-2008, 2011 Freescale Semiconductor Inc. 5 * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 8 * under the terms of the GNU General Public License as published by the
@@ -17,6 +17,7 @@
17#include <linux/of.h> 17#include <linux/of.h>
18#include <linux/kexec.h> 18#include <linux/kexec.h>
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/cpu.h>
20 21
21#include <asm/machdep.h> 22#include <asm/machdep.h>
22#include <asm/pgtable.h> 23#include <asm/pgtable.h>
@@ -24,33 +25,118 @@
24#include <asm/mpic.h> 25#include <asm/mpic.h>
25#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
26#include <asm/dbell.h> 27#include <asm/dbell.h>
28#include <asm/fsl_guts.h>
27 29
28#include <sysdev/fsl_soc.h> 30#include <sysdev/fsl_soc.h>
29#include <sysdev/mpic.h> 31#include <sysdev/mpic.h>
30#include "smp.h" 32#include "smp.h"
31 33
32extern void __early_start(void); 34struct epapr_spin_table {
33 35 u32 addr_h;
34#define BOOT_ENTRY_ADDR_UPPER 0 36 u32 addr_l;
35#define BOOT_ENTRY_ADDR_LOWER 1 37 u32 r3_h;
36#define BOOT_ENTRY_R3_UPPER 2 38 u32 r3_l;
37#define BOOT_ENTRY_R3_LOWER 3 39 u32 reserved;
38#define BOOT_ENTRY_RESV 4 40 u32 pir;
39#define BOOT_ENTRY_PIR 5 41};
40#define BOOT_ENTRY_R6_UPPER 6 42
41#define BOOT_ENTRY_R6_LOWER 7 43static struct ccsr_guts __iomem *guts;
42#define NUM_BOOT_ENTRY 8 44static u64 timebase;
43#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32)) 45static int tb_req;
44 46static int tb_valid;
45static int __init 47
46smp_85xx_kick_cpu(int nr) 48static void mpc85xx_timebase_freeze(int freeze)
49{
50 uint32_t mask;
51
52 mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
53 if (freeze)
54 setbits32(&guts->devdisr, mask);
55 else
56 clrbits32(&guts->devdisr, mask);
57
58 in_be32(&guts->devdisr);
59}
60
61static void mpc85xx_give_timebase(void)
62{
63 unsigned long flags;
64
65 local_irq_save(flags);
66
67 while (!tb_req)
68 barrier();
69 tb_req = 0;
70
71 mpc85xx_timebase_freeze(1);
72 timebase = get_tb();
73 mb();
74 tb_valid = 1;
75
76 while (tb_valid)
77 barrier();
78
79 mpc85xx_timebase_freeze(0);
80
81 local_irq_restore(flags);
82}
83
84static void mpc85xx_take_timebase(void)
85{
86 unsigned long flags;
87
88 local_irq_save(flags);
89
90 tb_req = 1;
91 while (!tb_valid)
92 barrier();
93
94 set_tb(timebase >> 32, timebase & 0xffffffff);
95 isync();
96 tb_valid = 0;
97
98 local_irq_restore(flags);
99}
100
101#ifdef CONFIG_HOTPLUG_CPU
102static void __cpuinit smp_85xx_mach_cpu_die(void)
103{
104 unsigned int cpu = smp_processor_id();
105 u32 tmp;
106
107 local_irq_disable();
108 idle_task_exit();
109 generic_set_cpu_dead(cpu);
110 mb();
111
112 mtspr(SPRN_TCR, 0);
113
114 __flush_disable_L1();
115 tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
116 mtspr(SPRN_HID0, tmp);
117 isync();
118
119 /* Enter NAP mode. */
120 tmp = mfmsr();
121 tmp |= MSR_WE;
122 mb();
123 mtmsr(tmp);
124 isync();
125
126 while (1)
127 ;
128}
129#endif
130
131static int __cpuinit smp_85xx_kick_cpu(int nr)
47{ 132{
48 unsigned long flags; 133 unsigned long flags;
49 const u64 *cpu_rel_addr; 134 const u64 *cpu_rel_addr;
50 __iomem u32 *bptr_vaddr; 135 __iomem struct epapr_spin_table *spin_table;
51 struct device_node *np; 136 struct device_node *np;
52 int n = 0, hw_cpu = get_hard_smp_processor_id(nr); 137 int hw_cpu = get_hard_smp_processor_id(nr);
53 int ioremappable; 138 int ioremappable;
139 int ret = 0;
54 140
55 WARN_ON(nr < 0 || nr >= NR_CPUS); 141 WARN_ON(nr < 0 || nr >= NR_CPUS);
56 WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS); 142 WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -75,46 +161,81 @@ smp_85xx_kick_cpu(int nr)
75 161
76 /* Map the spin table */ 162 /* Map the spin table */
77 if (ioremappable) 163 if (ioremappable)
78 bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); 164 spin_table = ioremap(*cpu_rel_addr,
165 sizeof(struct epapr_spin_table));
79 else 166 else
80 bptr_vaddr = phys_to_virt(*cpu_rel_addr); 167 spin_table = phys_to_virt(*cpu_rel_addr);
81 168
82 local_irq_save(flags); 169 local_irq_save(flags);
83
84 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, hw_cpu);
85#ifdef CONFIG_PPC32 170#ifdef CONFIG_PPC32
86 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); 171#ifdef CONFIG_HOTPLUG_CPU
172 /* Corresponding to generic_set_cpu_dead() */
173 generic_set_cpu_up(nr);
174
175 if (system_state == SYSTEM_RUNNING) {
176 out_be32(&spin_table->addr_l, 0);
177
178 /*
179 * We don't set the BPTR register here since it already points
180 * to the boot page properly.
181 */
182 mpic_reset_core(hw_cpu);
183
184 /* wait until core is ready... */
185 if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1,
186 10000, 100)) {
187 pr_err("%s: timeout waiting for core %d to reset\n",
188 __func__, hw_cpu);
189 ret = -ENOENT;
190 goto out;
191 }
192
193 /* clear the acknowledge status */
194 __secondary_hold_acknowledge = -1;
195 }
196#endif
197 out_be32(&spin_table->pir, hw_cpu);
198 out_be32(&spin_table->addr_l, __pa(__early_start));
87 199
88 if (!ioremappable) 200 if (!ioremappable)
89 flush_dcache_range((ulong)bptr_vaddr, 201 flush_dcache_range((ulong)spin_table,
90 (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY)); 202 (ulong)spin_table + sizeof(struct epapr_spin_table));
91 203
92 /* Wait a bit for the CPU to ack. */ 204 /* Wait a bit for the CPU to ack. */
93 while ((__secondary_hold_acknowledge != hw_cpu) && (++n < 1000)) 205 if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
94 mdelay(1); 206 10000, 100)) {
207 pr_err("%s: timeout waiting for core %d to ack\n",
208 __func__, hw_cpu);
209 ret = -ENOENT;
210 goto out;
211 }
212out:
95#else 213#else
96 smp_generic_kick_cpu(nr); 214 smp_generic_kick_cpu(nr);
97 215
98 out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER), 216 out_be32(&spin_table->pir, hw_cpu);
99 __pa((u64)*((unsigned long long *) generic_secondary_smp_init))); 217 out_be64((u64 *)(&spin_table->addr_h),
218 __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
100 219
101 if (!ioremappable) 220 if (!ioremappable)
102 flush_dcache_range((ulong)bptr_vaddr, 221 flush_dcache_range((ulong)spin_table,
103 (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY)); 222 (ulong)spin_table + sizeof(struct epapr_spin_table));
104#endif 223#endif
105 224
106 local_irq_restore(flags); 225 local_irq_restore(flags);
107 226
108 if (ioremappable) 227 if (ioremappable)
109 iounmap(bptr_vaddr); 228 iounmap(spin_table);
110
111 pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
112 229
113 return 0; 230 return ret;
114} 231}
115 232
116struct smp_ops_t smp_85xx_ops = { 233struct smp_ops_t smp_85xx_ops = {
117 .kick_cpu = smp_85xx_kick_cpu, 234 .kick_cpu = smp_85xx_kick_cpu,
235#ifdef CONFIG_HOTPLUG_CPU
236 .cpu_disable = generic_cpu_disable,
237 .cpu_die = generic_cpu_die,
238#endif
118#ifdef CONFIG_KEXEC 239#ifdef CONFIG_KEXEC
119 .give_timebase = smp_generic_give_timebase, 240 .give_timebase = smp_generic_give_timebase,
120 .take_timebase = smp_generic_take_timebase, 241 .take_timebase = smp_generic_take_timebase,
@@ -218,8 +339,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
218} 339}
219#endif /* CONFIG_KEXEC */ 340#endif /* CONFIG_KEXEC */
220 341
221static void __init 342static void __cpuinit smp_85xx_setup_cpu(int cpu_nr)
222smp_85xx_setup_cpu(int cpu_nr)
223{ 343{
224 if (smp_85xx_ops.probe == smp_mpic_probe) 344 if (smp_85xx_ops.probe == smp_mpic_probe)
225 mpic_setup_this_cpu(); 345 mpic_setup_this_cpu();
@@ -228,6 +348,16 @@ smp_85xx_setup_cpu(int cpu_nr)
228 doorbell_setup_this_cpu(); 348 doorbell_setup_this_cpu();
229} 349}
230 350
351static const struct of_device_id mpc85xx_smp_guts_ids[] = {
352 { .compatible = "fsl,mpc8572-guts", },
353 { .compatible = "fsl,p1020-guts", },
354 { .compatible = "fsl,p1021-guts", },
355 { .compatible = "fsl,p1022-guts", },
356 { .compatible = "fsl,p1023-guts", },
357 { .compatible = "fsl,p2020-guts", },
358 {},
359};
360
231void __init mpc85xx_smp_init(void) 361void __init mpc85xx_smp_init(void)
232{ 362{
233 struct device_node *np; 363 struct device_node *np;
@@ -249,6 +379,22 @@ void __init mpc85xx_smp_init(void)
249 smp_85xx_ops.cause_ipi = doorbell_cause_ipi; 379 smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
250 } 380 }
251 381
382 np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
383 if (np) {
384 guts = of_iomap(np, 0);
385 of_node_put(np);
386 if (!guts) {
387 pr_err("%s: Could not map guts node address\n",
388 __func__);
389 return;
390 }
391 smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
392 smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
393#ifdef CONFIG_HOTPLUG_CPU
394 ppc_md.cpu_die = smp_85xx_mach_cpu_die;
395#endif
396 }
397
252 smp_ops = &smp_85xx_ops; 398 smp_ops = &smp_85xx_ops;
253 399
254#ifdef CONFIG_KEXEC 400#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index b9c6daa07b66..ae368e0e1076 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -66,20 +66,13 @@ static void __init socrates_pic_init(void)
66 */ 66 */
67static void __init socrates_setup_arch(void) 67static void __init socrates_setup_arch(void)
68{ 68{
69#ifdef CONFIG_PCI
70 struct device_node *np;
71#endif
72
73 if (ppc_md.progress) 69 if (ppc_md.progress)
74 ppc_md.progress("socrates_setup_arch()", 0); 70 ppc_md.progress("socrates_setup_arch()", 0);
75 71
76#ifdef CONFIG_PCI 72 fsl_pci_assign_primary();
77 for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
78 fsl_add_bridge(np, 1);
79#endif
80} 73}
81 74
82machine_device_initcall(socrates, mpc85xx_common_publish_devices); 75machine_arch_initcall(socrates, mpc85xx_common_publish_devices);
83 76
84/* 77/*
85 * Called very early, device-tree isn't unflattened 78 * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index e0508002b086..6f4939b6309e 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -60,21 +60,14 @@ static void __init stx_gp3_pic_init(void)
60 */ 60 */
61static void __init stx_gp3_setup_arch(void) 61static void __init stx_gp3_setup_arch(void)
62{ 62{
63#ifdef CONFIG_PCI
64 struct device_node *np;
65#endif
66
67 if (ppc_md.progress) 63 if (ppc_md.progress)
68 ppc_md.progress("stx_gp3_setup_arch()", 0); 64 ppc_md.progress("stx_gp3_setup_arch()", 0);
69 65
66 fsl_pci_assign_primary();
67
70#ifdef CONFIG_CPM2 68#ifdef CONFIG_CPM2
71 cpm2_reset(); 69 cpm2_reset();
72#endif 70#endif
73
74#ifdef CONFIG_PCI
75 for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
76 fsl_add_bridge(np, 1);
77#endif
78} 71}
79 72
80static void stx_gp3_show_cpuinfo(struct seq_file *m) 73static void stx_gp3_show_cpuinfo(struct seq_file *m)
@@ -93,7 +86,7 @@ static void stx_gp3_show_cpuinfo(struct seq_file *m)
93 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 86 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
94} 87}
95 88
96machine_device_initcall(stx_gp3, mpc85xx_common_publish_devices); 89machine_arch_initcall(stx_gp3, mpc85xx_common_publish_devices);
97 90
98/* 91/*
99 * Called very early, device-tree isn't unflattened 92 * Called very early, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index b62fa87521a3..b4e58cdc09a5 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -59,10 +59,6 @@ static void __init tqm85xx_pic_init(void)
59 */ 59 */
60static void __init tqm85xx_setup_arch(void) 60static void __init tqm85xx_setup_arch(void)
61{ 61{
62#ifdef CONFIG_PCI
63 struct device_node *np;
64#endif
65
66 if (ppc_md.progress) 62 if (ppc_md.progress)
67 ppc_md.progress("tqm85xx_setup_arch()", 0); 63 ppc_md.progress("tqm85xx_setup_arch()", 0);
68 64
@@ -70,20 +66,7 @@ static void __init tqm85xx_setup_arch(void)
70 cpm2_reset(); 66 cpm2_reset();
71#endif 67#endif
72 68
73#ifdef CONFIG_PCI 69 fsl_pci_assign_primary();
74 for_each_node_by_type(np, "pci") {
75 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
76 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
77 struct resource rsrc;
78 if (!of_address_to_resource(np, 0, &rsrc)) {
79 if ((rsrc.start & 0xfffff) == 0x8000)
80 fsl_add_bridge(np, 1);
81 else
82 fsl_add_bridge(np, 0);
83 }
84 }
85 }
86#endif
87} 70}
88 71
89static void tqm85xx_show_cpuinfo(struct seq_file *m) 72static void tqm85xx_show_cpuinfo(struct seq_file *m)
@@ -123,7 +106,7 @@ static void __devinit tqm85xx_ti1520_fixup(struct pci_dev *pdev)
123DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520, 106DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520,
124 tqm85xx_ti1520_fixup); 107 tqm85xx_ti1520_fixup);
125 108
126machine_device_initcall(tqm85xx, mpc85xx_common_publish_devices); 109machine_arch_initcall(tqm85xx, mpc85xx_common_publish_devices);
127 110
128static const char * const board[] __initconst = { 111static const char * const board[] __initconst = {
129 "tqc,tqm8540", 112 "tqc,tqm8540",
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 41c687550ea7..dcbf7e42dce7 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -111,18 +111,11 @@ static void xes_mpc85xx_fixups(void)
111 } 111 }
112} 112}
113 113
114#ifdef CONFIG_PCI
115static int primary_phb_addr;
116#endif
117
118/* 114/*
119 * Setup the architecture 115 * Setup the architecture
120 */ 116 */
121static void __init xes_mpc85xx_setup_arch(void) 117static void __init xes_mpc85xx_setup_arch(void)
122{ 118{
123#ifdef CONFIG_PCI
124 struct device_node *np;
125#endif
126 struct device_node *root; 119 struct device_node *root;
127 const char *model = "Unknown"; 120 const char *model = "Unknown";
128 121
@@ -137,26 +130,14 @@ static void __init xes_mpc85xx_setup_arch(void)
137 130
138 xes_mpc85xx_fixups(); 131 xes_mpc85xx_fixups();
139 132
140#ifdef CONFIG_PCI
141 for_each_node_by_type(np, "pci") {
142 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
143 of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
144 struct resource rsrc;
145 of_address_to_resource(np, 0, &rsrc);
146 if ((rsrc.start & 0xfffff) == primary_phb_addr)
147 fsl_add_bridge(np, 1);
148 else
149 fsl_add_bridge(np, 0);
150 }
151 }
152#endif
153
154 mpc85xx_smp_init(); 133 mpc85xx_smp_init();
134
135 fsl_pci_assign_primary();
155} 136}
156 137
157machine_device_initcall(xes_mpc8572, mpc85xx_common_publish_devices); 138machine_arch_initcall(xes_mpc8572, mpc85xx_common_publish_devices);
158machine_device_initcall(xes_mpc8548, mpc85xx_common_publish_devices); 139machine_arch_initcall(xes_mpc8548, mpc85xx_common_publish_devices);
159machine_device_initcall(xes_mpc8540, mpc85xx_common_publish_devices); 140machine_arch_initcall(xes_mpc8540, mpc85xx_common_publish_devices);
160 141
161/* 142/*
162 * Called very early, device-tree isn't unflattened 143 * Called very early, device-tree isn't unflattened
@@ -165,42 +146,21 @@ static int __init xes_mpc8572_probe(void)
165{ 146{
166 unsigned long root = of_get_flat_dt_root(); 147 unsigned long root = of_get_flat_dt_root();
167 148
168 if (of_flat_dt_is_compatible(root, "xes,MPC8572")) { 149 return of_flat_dt_is_compatible(root, "xes,MPC8572");
169#ifdef CONFIG_PCI
170 primary_phb_addr = 0x8000;
171#endif
172 return 1;
173 } else {
174 return 0;
175 }
176} 150}
177 151
178static int __init xes_mpc8548_probe(void) 152static int __init xes_mpc8548_probe(void)
179{ 153{
180 unsigned long root = of_get_flat_dt_root(); 154 unsigned long root = of_get_flat_dt_root();
181 155
182 if (of_flat_dt_is_compatible(root, "xes,MPC8548")) { 156 return of_flat_dt_is_compatible(root, "xes,MPC8548");
183#ifdef CONFIG_PCI
184 primary_phb_addr = 0xb000;
185#endif
186 return 1;
187 } else {
188 return 0;
189 }
190} 157}
191 158
192static int __init xes_mpc8540_probe(void) 159static int __init xes_mpc8540_probe(void)
193{ 160{
194 unsigned long root = of_get_flat_dt_root(); 161 unsigned long root = of_get_flat_dt_root();
195 162
196 if (of_flat_dt_is_compatible(root, "xes,MPC8540")) { 163 return of_flat_dt_is_compatible(root, "xes,MPC8540");
197#ifdef CONFIG_PCI
198 primary_phb_addr = 0xb000;
199#endif
200 return 1;
201 } else {
202 return 0;
203 }
204} 164}
205 165
206define_machine(xes_mpc8572) { 166define_machine(xes_mpc8572) {
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 563aafa8629c..bf5338754c5a 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -73,13 +73,6 @@ static void __init gef_ppc9a_init_irq(void)
73static void __init gef_ppc9a_setup_arch(void) 73static void __init gef_ppc9a_setup_arch(void)
74{ 74{
75 struct device_node *regs; 75 struct device_node *regs;
76#ifdef CONFIG_PCI
77 struct device_node *np;
78
79 for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
80 fsl_add_bridge(np, 1);
81 }
82#endif
83 76
84 printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n"); 77 printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n");
85 78
@@ -87,6 +80,8 @@ static void __init gef_ppc9a_setup_arch(void)
87 mpc86xx_smp_init(); 80 mpc86xx_smp_init();
88#endif 81#endif
89 82
83 fsl_pci_assign_primary();
84
90 /* Remap basic board registers */ 85 /* Remap basic board registers */
91 regs = of_find_compatible_node(NULL, NULL, "gef,ppc9a-fpga-regs"); 86 regs = of_find_compatible_node(NULL, NULL, "gef,ppc9a-fpga-regs");
92 if (regs) { 87 if (regs) {
@@ -221,6 +216,7 @@ static long __init mpc86xx_time_init(void)
221static __initdata struct of_device_id of_bus_ids[] = { 216static __initdata struct of_device_id of_bus_ids[] = {
222 { .compatible = "simple-bus", }, 217 { .compatible = "simple-bus", },
223 { .compatible = "gianfar", }, 218 { .compatible = "gianfar", },
219 { .compatible = "fsl,mpc8641-pcie", },
224 {}, 220 {},
225}; 221};
226 222
@@ -231,7 +227,7 @@ static int __init declare_of_platform_devices(void)
231 227
232 return 0; 228 return 0;
233} 229}
234machine_device_initcall(gef_ppc9a, declare_of_platform_devices); 230machine_arch_initcall(gef_ppc9a, declare_of_platform_devices);
235 231
236define_machine(gef_ppc9a) { 232define_machine(gef_ppc9a) {
237 .name = "GE PPC9A", 233 .name = "GE PPC9A",
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index cc6a91ae0889..0b7851330a07 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -73,20 +73,14 @@ static void __init gef_sbc310_init_irq(void)
73static void __init gef_sbc310_setup_arch(void) 73static void __init gef_sbc310_setup_arch(void)
74{ 74{
75 struct device_node *regs; 75 struct device_node *regs;
76#ifdef CONFIG_PCI
77 struct device_node *np;
78
79 for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
80 fsl_add_bridge(np, 1);
81 }
82#endif
83
84 printk(KERN_INFO "GE Intelligent Platforms SBC310 6U VPX SBC\n"); 76 printk(KERN_INFO "GE Intelligent Platforms SBC310 6U VPX SBC\n");
85 77
86#ifdef CONFIG_SMP 78#ifdef CONFIG_SMP
87 mpc86xx_smp_init(); 79 mpc86xx_smp_init();
88#endif 80#endif
89 81
82 fsl_pci_assign_primary();
83
90 /* Remap basic board registers */ 84 /* Remap basic board registers */
91 regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs"); 85 regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs");
92 if (regs) { 86 if (regs) {
@@ -209,6 +203,7 @@ static long __init mpc86xx_time_init(void)
209static __initdata struct of_device_id of_bus_ids[] = { 203static __initdata struct of_device_id of_bus_ids[] = {
210 { .compatible = "simple-bus", }, 204 { .compatible = "simple-bus", },
211 { .compatible = "gianfar", }, 205 { .compatible = "gianfar", },
206 { .compatible = "fsl,mpc8641-pcie", },
212 {}, 207 {},
213}; 208};
214 209
@@ -219,7 +214,7 @@ static int __init declare_of_platform_devices(void)
219 214
220 return 0; 215 return 0;
221} 216}
222machine_device_initcall(gef_sbc310, declare_of_platform_devices); 217machine_arch_initcall(gef_sbc310, declare_of_platform_devices);
223 218
224define_machine(gef_sbc310) { 219define_machine(gef_sbc310) {
225 .name = "GE SBC310", 220 .name = "GE SBC310",
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index aead6b337f4a..b9eb174897b1 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -73,13 +73,6 @@ static void __init gef_sbc610_init_irq(void)
73static void __init gef_sbc610_setup_arch(void) 73static void __init gef_sbc610_setup_arch(void)
74{ 74{
75 struct device_node *regs; 75 struct device_node *regs;
76#ifdef CONFIG_PCI
77 struct device_node *np;
78
79 for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
80 fsl_add_bridge(np, 1);
81 }
82#endif
83 76
84 printk(KERN_INFO "GE Intelligent Platforms SBC610 6U VPX SBC\n"); 77 printk(KERN_INFO "GE Intelligent Platforms SBC610 6U VPX SBC\n");
85 78
@@ -87,6 +80,8 @@ static void __init gef_sbc610_setup_arch(void)
87 mpc86xx_smp_init(); 80 mpc86xx_smp_init();
88#endif 81#endif
89 82
83 fsl_pci_assign_primary();
84
90 /* Remap basic board registers */ 85 /* Remap basic board registers */
91 regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs"); 86 regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs");
92 if (regs) { 87 if (regs) {
@@ -198,6 +193,7 @@ static long __init mpc86xx_time_init(void)
198static __initdata struct of_device_id of_bus_ids[] = { 193static __initdata struct of_device_id of_bus_ids[] = {
199 { .compatible = "simple-bus", }, 194 { .compatible = "simple-bus", },
200 { .compatible = "gianfar", }, 195 { .compatible = "gianfar", },
196 { .compatible = "fsl,mpc8641-pcie", },
201 {}, 197 {},
202}; 198};
203 199
@@ -208,7 +204,7 @@ static int __init declare_of_platform_devices(void)
208 204
209 return 0; 205 return 0;
210} 206}
211machine_device_initcall(gef_sbc610, declare_of_platform_devices); 207machine_arch_initcall(gef_sbc610, declare_of_platform_devices);
212 208
213define_machine(gef_sbc610) { 209define_machine(gef_sbc610) {
214 .name = "GE SBC610", 210 .name = "GE SBC610",
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 62cd3c555bfb..a817398a56da 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -91,6 +91,9 @@ static struct of_device_id __initdata mpc8610_ids[] = {
91 { .compatible = "simple-bus", }, 91 { .compatible = "simple-bus", },
92 /* So that the DMA channel nodes can be probed individually: */ 92 /* So that the DMA channel nodes can be probed individually: */
93 { .compatible = "fsl,eloplus-dma", }, 93 { .compatible = "fsl,eloplus-dma", },
94 /* PCI controllers */
95 { .compatible = "fsl,mpc8610-pci", },
96 { .compatible = "fsl,mpc8641-pcie", },
94 {} 97 {}
95}; 98};
96 99
@@ -107,7 +110,7 @@ static int __init mpc8610_declare_of_platform_devices(void)
107 110
108 return 0; 111 return 0;
109} 112}
110machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices); 113machine_arch_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
111 114
112#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 115#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
113 116
@@ -278,25 +281,13 @@ mpc8610hpcd_valid_monitor_port(enum fsl_diu_monitor_port port)
278static void __init mpc86xx_hpcd_setup_arch(void) 281static void __init mpc86xx_hpcd_setup_arch(void)
279{ 282{
280 struct resource r; 283 struct resource r;
281 struct device_node *np;
282 unsigned char *pixis; 284 unsigned char *pixis;
283 285
284 if (ppc_md.progress) 286 if (ppc_md.progress)
285 ppc_md.progress("mpc86xx_hpcd_setup_arch()", 0); 287 ppc_md.progress("mpc86xx_hpcd_setup_arch()", 0);
286 288
287#ifdef CONFIG_PCI 289 fsl_pci_assign_primary();
288 for_each_node_by_type(np, "pci") { 290
289 if (of_device_is_compatible(np, "fsl,mpc8610-pci")
290 || of_device_is_compatible(np, "fsl,mpc8641-pcie")) {
291 struct resource rsrc;
292 of_address_to_resource(np, 0, &rsrc);
293 if ((rsrc.start & 0xfffff) == 0xa000)
294 fsl_add_bridge(np, 1);
295 else
296 fsl_add_bridge(np, 0);
297 }
298 }
299#endif
300#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 291#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
301 diu_ops.get_pixel_format = mpc8610hpcd_get_pixel_format; 292 diu_ops.get_pixel_format = mpc8610hpcd_get_pixel_format;
302 diu_ops.set_gamma_table = mpc8610hpcd_set_gamma_table; 293 diu_ops.set_gamma_table = mpc8610hpcd_set_gamma_table;
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 817245bc0219..e8bf3fae5606 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -19,7 +19,6 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/of_platform.h> 21#include <linux/of_platform.h>
22#include <linux/memblock.h>
23 22
24#include <asm/time.h> 23#include <asm/time.h>
25#include <asm/machdep.h> 24#include <asm/machdep.h>
@@ -51,15 +50,8 @@ extern int uli_exclude_device(struct pci_controller *hose,
51static int mpc86xx_exclude_device(struct pci_controller *hose, 50static int mpc86xx_exclude_device(struct pci_controller *hose,
52 u_char bus, u_char devfn) 51 u_char bus, u_char devfn)
53{ 52{
54 struct device_node* node; 53 if (hose->dn == fsl_pci_primary)
55 struct resource rsrc;
56
57 node = hose->dn;
58 of_address_to_resource(node, 0, &rsrc);
59
60 if ((rsrc.start & 0xfffff) == 0x8000) {
61 return uli_exclude_device(hose, bus, devfn); 54 return uli_exclude_device(hose, bus, devfn);
62 }
63 55
64 return PCIBIOS_SUCCESSFUL; 56 return PCIBIOS_SUCCESSFUL;
65} 57}
@@ -69,30 +61,11 @@ static int mpc86xx_exclude_device(struct pci_controller *hose,
69static void __init 61static void __init
70mpc86xx_hpcn_setup_arch(void) 62mpc86xx_hpcn_setup_arch(void)
71{ 63{
72#ifdef CONFIG_PCI
73 struct device_node *np;
74 struct pci_controller *hose;
75#endif
76 dma_addr_t max = 0xffffffff;
77
78 if (ppc_md.progress) 64 if (ppc_md.progress)
79 ppc_md.progress("mpc86xx_hpcn_setup_arch()", 0); 65 ppc_md.progress("mpc86xx_hpcn_setup_arch()", 0);
80 66
81#ifdef CONFIG_PCI 67#ifdef CONFIG_PCI
82 for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
83 struct resource rsrc;
84 of_address_to_resource(np, 0, &rsrc);
85 if ((rsrc.start & 0xfffff) == 0x8000)
86 fsl_add_bridge(np, 1);
87 else
88 fsl_add_bridge(np, 0);
89 hose = pci_find_hose_for_OF_device(np);
90 max = min(max, hose->dma_window_base_cur +
91 hose->dma_window_size);
92 }
93
94 ppc_md.pci_exclude_device = mpc86xx_exclude_device; 68 ppc_md.pci_exclude_device = mpc86xx_exclude_device;
95
96#endif 69#endif
97 70
98 printk("MPC86xx HPCN board from Freescale Semiconductor\n"); 71 printk("MPC86xx HPCN board from Freescale Semiconductor\n");
@@ -101,13 +74,9 @@ mpc86xx_hpcn_setup_arch(void)
101 mpc86xx_smp_init(); 74 mpc86xx_smp_init();
102#endif 75#endif
103 76
104#ifdef CONFIG_SWIOTLB 77 fsl_pci_assign_primary();
105 if ((memblock_end_of_DRAM() - 1) > max) { 78
106 ppc_swiotlb_enable = 1; 79 swiotlb_detect_4g();
107 set_pci_dma_ops(&swiotlb_dma_ops);
108 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
109 }
110#endif
111} 80}
112 81
113 82
@@ -162,6 +131,7 @@ static __initdata struct of_device_id of_bus_ids[] = {
162 { .compatible = "simple-bus", }, 131 { .compatible = "simple-bus", },
163 { .compatible = "fsl,srio", }, 132 { .compatible = "fsl,srio", },
164 { .compatible = "gianfar", }, 133 { .compatible = "gianfar", },
134 { .compatible = "fsl,mpc8641-pcie", },
165 {}, 135 {},
166}; 136};
167 137
@@ -171,7 +141,7 @@ static int __init declare_of_platform_devices(void)
171 141
172 return 0; 142 return 0;
173} 143}
174machine_device_initcall(mpc86xx_hpcn, declare_of_platform_devices); 144machine_arch_initcall(mpc86xx_hpcn, declare_of_platform_devices);
175machine_arch_initcall(mpc86xx_hpcn, swiotlb_setup_bus_notifier); 145machine_arch_initcall(mpc86xx_hpcn, swiotlb_setup_bus_notifier);
176 146
177define_machine(mpc86xx_hpcn) { 147define_machine(mpc86xx_hpcn) {
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index e7007d0d949e..b47a8fd0f3d3 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -38,23 +38,16 @@
38static void __init 38static void __init
39sbc8641_setup_arch(void) 39sbc8641_setup_arch(void)
40{ 40{
41#ifdef CONFIG_PCI
42 struct device_node *np;
43#endif
44
45 if (ppc_md.progress) 41 if (ppc_md.progress)
46 ppc_md.progress("sbc8641_setup_arch()", 0); 42 ppc_md.progress("sbc8641_setup_arch()", 0);
47 43
48#ifdef CONFIG_PCI
49 for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie")
50 fsl_add_bridge(np, 0);
51#endif
52
53 printk("SBC8641 board from Wind River\n"); 44 printk("SBC8641 board from Wind River\n");
54 45
55#ifdef CONFIG_SMP 46#ifdef CONFIG_SMP
56 mpc86xx_smp_init(); 47 mpc86xx_smp_init();
57#endif 48#endif
49
50 fsl_pci_assign_primary();
58} 51}
59 52
60 53
@@ -102,6 +95,7 @@ mpc86xx_time_init(void)
102static __initdata struct of_device_id of_bus_ids[] = { 95static __initdata struct of_device_id of_bus_ids[] = {
103 { .compatible = "simple-bus", }, 96 { .compatible = "simple-bus", },
104 { .compatible = "gianfar", }, 97 { .compatible = "gianfar", },
98 { .compatible = "fsl,mpc8641-pcie", },
105 {}, 99 {},
106}; 100};
107 101
@@ -111,7 +105,7 @@ static int __init declare_of_platform_devices(void)
111 105
112 return 0; 106 return 0;
113} 107}
114machine_device_initcall(sbc8641, declare_of_platform_devices); 108machine_arch_initcall(sbc8641, declare_of_platform_devices);
115 109
116define_machine(sbc8641) { 110define_machine(sbc8641) {
117 .name = "SBC8641D", 111 .name = "SBC8641D",
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
index 852592b2b712..affcf566d460 100644
--- a/arch/powerpc/platforms/cell/beat.c
+++ b/arch/powerpc/platforms/cell/beat.c
@@ -136,9 +136,9 @@ ssize_t beat_nvram_get_size(void)
136 return BEAT_NVRAM_SIZE; 136 return BEAT_NVRAM_SIZE;
137} 137}
138 138
139int beat_set_xdabr(unsigned long dabr) 139int beat_set_xdabr(unsigned long dabr, unsigned long dabrx)
140{ 140{
141 if (beat_set_dabr(dabr, DABRX_KERNEL | DABRX_USER)) 141 if (beat_set_dabr(dabr, dabrx))
142 return -1; 142 return -1;
143 return 0; 143 return 0;
144} 144}
diff --git a/arch/powerpc/platforms/cell/beat.h b/arch/powerpc/platforms/cell/beat.h
index 32c8efcedc80..bfcb8e351ae5 100644
--- a/arch/powerpc/platforms/cell/beat.h
+++ b/arch/powerpc/platforms/cell/beat.h
@@ -32,7 +32,7 @@ void beat_get_rtc_time(struct rtc_time *);
32ssize_t beat_nvram_get_size(void); 32ssize_t beat_nvram_get_size(void);
33ssize_t beat_nvram_read(char *, size_t, loff_t *); 33ssize_t beat_nvram_read(char *, size_t, loff_t *);
34ssize_t beat_nvram_write(char *, size_t, loff_t *); 34ssize_t beat_nvram_write(char *, size_t, loff_t *);
35int beat_set_xdabr(unsigned long); 35int beat_set_xdabr(unsigned long, unsigned long);
36void beat_power_save(void); 36void beat_power_save(void);
37void beat_kexec_cpu_down(int, int); 37void beat_kexec_cpu_down(int, int);
38 38
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 943c9d39aa16..0f6f83988b3d 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -88,7 +88,7 @@ static inline unsigned int beat_read_mask(unsigned hpte_group)
88} 88}
89 89
90static long beat_lpar_hpte_insert(unsigned long hpte_group, 90static long beat_lpar_hpte_insert(unsigned long hpte_group,
91 unsigned long va, unsigned long pa, 91 unsigned long vpn, unsigned long pa,
92 unsigned long rflags, unsigned long vflags, 92 unsigned long rflags, unsigned long vflags,
93 int psize, int ssize) 93 int psize, int ssize)
94{ 94{
@@ -103,7 +103,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
103 "rflags=%lx, vflags=%lx, psize=%d)\n", 103 "rflags=%lx, vflags=%lx, psize=%d)\n",
104 hpte_group, va, pa, rflags, vflags, psize); 104 hpte_group, va, pa, rflags, vflags, psize);
105 105
106 hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | 106 hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
107 vflags | HPTE_V_VALID; 107 vflags | HPTE_V_VALID;
108 hpte_r = hpte_encode_r(pa, psize) | rflags; 108 hpte_r = hpte_encode_r(pa, psize) | rflags;
109 109
@@ -184,14 +184,14 @@ static void beat_lpar_hptab_clear(void)
184 */ 184 */
185static long beat_lpar_hpte_updatepp(unsigned long slot, 185static long beat_lpar_hpte_updatepp(unsigned long slot,
186 unsigned long newpp, 186 unsigned long newpp,
187 unsigned long va, 187 unsigned long vpn,
188 int psize, int ssize, int local) 188 int psize, int ssize, int local)
189{ 189{
190 unsigned long lpar_rc; 190 unsigned long lpar_rc;
191 u64 dummy0, dummy1; 191 u64 dummy0, dummy1;
192 unsigned long want_v; 192 unsigned long want_v;
193 193
194 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 194 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
195 195
196 DBG_LOW(" update: " 196 DBG_LOW(" update: "
197 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", 197 "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -220,15 +220,15 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
220 return 0; 220 return 0;
221} 221}
222 222
223static long beat_lpar_hpte_find(unsigned long va, int psize) 223static long beat_lpar_hpte_find(unsigned long vpn, int psize)
224{ 224{
225 unsigned long hash; 225 unsigned long hash;
226 unsigned long i, j; 226 unsigned long i, j;
227 long slot; 227 long slot;
228 unsigned long want_v, hpte_v; 228 unsigned long want_v, hpte_v;
229 229
230 hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); 230 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
231 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 231 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
232 232
233 for (j = 0; j < 2; j++) { 233 for (j = 0; j < 2; j++) {
234 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 234 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -255,14 +255,15 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
255 unsigned long ea, 255 unsigned long ea,
256 int psize, int ssize) 256 int psize, int ssize)
257{ 257{
258 unsigned long lpar_rc, slot, vsid, va; 258 unsigned long vpn;
259 unsigned long lpar_rc, slot, vsid;
259 u64 dummy0, dummy1; 260 u64 dummy0, dummy1;
260 261
261 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); 262 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
262 va = (vsid << 28) | (ea & 0x0fffffff); 263 vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
263 264
264 raw_spin_lock(&beat_htab_lock); 265 raw_spin_lock(&beat_htab_lock);
265 slot = beat_lpar_hpte_find(va, psize); 266 slot = beat_lpar_hpte_find(vpn, psize);
266 BUG_ON(slot == -1); 267 BUG_ON(slot == -1);
267 268
268 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, 269 lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
@@ -272,7 +273,7 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
272 BUG_ON(lpar_rc != 0); 273 BUG_ON(lpar_rc != 0);
273} 274}
274 275
275static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 276static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
276 int psize, int ssize, int local) 277 int psize, int ssize, int local)
277{ 278{
278 unsigned long want_v; 279 unsigned long want_v;
@@ -282,7 +283,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
282 283
283 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 284 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
284 slot, va, psize, local); 285 slot, va, psize, local);
285 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 286 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
286 287
287 raw_spin_lock_irqsave(&beat_htab_lock, flags); 288 raw_spin_lock_irqsave(&beat_htab_lock, flags);
288 dummy1 = beat_lpar_hpte_getword0(slot); 289 dummy1 = beat_lpar_hpte_getword0(slot);
@@ -311,7 +312,7 @@ void __init hpte_init_beat(void)
311} 312}
312 313
313static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, 314static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
314 unsigned long va, unsigned long pa, 315 unsigned long vpn, unsigned long pa,
315 unsigned long rflags, unsigned long vflags, 316 unsigned long rflags, unsigned long vflags,
316 int psize, int ssize) 317 int psize, int ssize)
317{ 318{
@@ -322,11 +323,11 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
322 return -1; 323 return -1;
323 324
324 if (!(vflags & HPTE_V_BOLTED)) 325 if (!(vflags & HPTE_V_BOLTED))
325 DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " 326 DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
326 "rflags=%lx, vflags=%lx, psize=%d)\n", 327 "rflags=%lx, vflags=%lx, psize=%d)\n",
327 hpte_group, va, pa, rflags, vflags, psize); 328 hpte_group, vpn, pa, rflags, vflags, psize);
328 329
329 hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | 330 hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
330 vflags | HPTE_V_VALID; 331 vflags | HPTE_V_VALID;
331 hpte_r = hpte_encode_r(pa, psize) | rflags; 332 hpte_r = hpte_encode_r(pa, psize) | rflags;
332 333
@@ -364,14 +365,14 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
364 */ 365 */
365static long beat_lpar_hpte_updatepp_v3(unsigned long slot, 366static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
366 unsigned long newpp, 367 unsigned long newpp,
367 unsigned long va, 368 unsigned long vpn,
368 int psize, int ssize, int local) 369 int psize, int ssize, int local)
369{ 370{
370 unsigned long lpar_rc; 371 unsigned long lpar_rc;
371 unsigned long want_v; 372 unsigned long want_v;
372 unsigned long pss; 373 unsigned long pss;
373 374
374 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 375 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
375 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 376 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
376 377
377 DBG_LOW(" update: " 378 DBG_LOW(" update: "
@@ -392,16 +393,16 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
392 return 0; 393 return 0;
393} 394}
394 395
395static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va, 396static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
396 int psize, int ssize, int local) 397 int psize, int ssize, int local)
397{ 398{
398 unsigned long want_v; 399 unsigned long want_v;
399 unsigned long lpar_rc; 400 unsigned long lpar_rc;
400 unsigned long pss; 401 unsigned long pss;
401 402
402 DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 403 DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
403 slot, va, psize, local); 404 slot, vpn, psize, local);
404 want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); 405 want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
405 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; 406 pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
406 407
407 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss); 408 lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 14943ef01918..7d2d036754b5 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -19,12 +19,12 @@
19 19
20#undef DEBUG 20#undef DEBUG
21 21
22#include <linux/memblock.h>
22#include <linux/types.h> 23#include <linux/types.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/pci.h> 25#include <linux/pci.h>
25#include <asm/iommu.h> 26#include <asm/iommu.h>
26#include <asm/machdep.h> 27#include <asm/machdep.h>
27#include <asm/abs_addr.h>
28#include <asm/firmware.h> 28#include <asm/firmware.h>
29 29
30#define IOBMAP_PAGE_SHIFT 12 30#define IOBMAP_PAGE_SHIFT 12
@@ -99,7 +99,7 @@ static int iobmap_build(struct iommu_table *tbl, long index,
99 ip = ((u32 *)tbl->it_base) + index; 99 ip = ((u32 *)tbl->it_base) + index;
100 100
101 while (npages--) { 101 while (npages--) {
102 rpn = virt_to_abs(uaddr) >> IOBMAP_PAGE_SHIFT; 102 rpn = __pa(uaddr) >> IOBMAP_PAGE_SHIFT;
103 103
104 *(ip++) = IOBMAP_L2E_V | rpn; 104 *(ip++) = IOBMAP_L2E_V | rpn;
105 /* invalidate tlb, can be optimized more */ 105 /* invalidate tlb, can be optimized more */
@@ -258,7 +258,7 @@ void __init alloc_iobmap_l2(void)
258 return; 258 return;
259#endif 259#endif
260 /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */ 260 /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
261 iob_l2_base = (u32 *)abs_to_virt(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); 261 iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
262 262
263 printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base); 263 printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
264} 264}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 0e7eccc0f88d..471aa3ccd9fd 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -30,19 +30,10 @@
30#include <asm/opal.h> 30#include <asm/opal.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/tce.h> 32#include <asm/tce.h>
33#include <asm/abs_addr.h>
34 33
35#include "powernv.h" 34#include "powernv.h"
36#include "pci.h" 35#include "pci.h"
37 36
38struct resource_wrap {
39 struct list_head link;
40 resource_size_t size;
41 resource_size_t align;
42 struct pci_dev *dev; /* Set if it's a device */
43 struct pci_bus *bus; /* Set if it's a bridge */
44};
45
46static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe, 37static int __pe_printk(const char *level, const struct pnv_ioda_pe *pe,
47 struct va_format *vaf) 38 struct va_format *vaf)
48{ 39{
@@ -78,273 +69,6 @@ define_pe_printk_level(pe_err, KERN_ERR);
78define_pe_printk_level(pe_warn, KERN_WARNING); 69define_pe_printk_level(pe_warn, KERN_WARNING);
79define_pe_printk_level(pe_info, KERN_INFO); 70define_pe_printk_level(pe_info, KERN_INFO);
80 71
81
82/* Calculate resource usage & alignment requirement of a single
83 * device. This will also assign all resources within the device
84 * for a given type starting at 0 for the biggest one and then
85 * assigning in decreasing order of size.
86 */
87static void __devinit pnv_ioda_calc_dev(struct pci_dev *dev, unsigned int flags,
88 resource_size_t *size,
89 resource_size_t *align)
90{
91 resource_size_t start;
92 struct resource *r;
93 int i;
94
95 pr_devel(" -> CDR %s\n", pci_name(dev));
96
97 *size = *align = 0;
98
99 /* Clear the resources out and mark them all unset */
100 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
101 r = &dev->resource[i];
102 if (!(r->flags & flags))
103 continue;
104 if (r->start) {
105 r->end -= r->start;
106 r->start = 0;
107 }
108 r->flags |= IORESOURCE_UNSET;
109 }
110
111 /* We currently keep all memory resources together, we
112 * will handle prefetch & 64-bit separately in the future
113 * but for now we stick everybody in M32
114 */
115 start = 0;
116 for (;;) {
117 resource_size_t max_size = 0;
118 int max_no = -1;
119
120 /* Find next biggest resource */
121 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
122 r = &dev->resource[i];
123 if (!(r->flags & IORESOURCE_UNSET) ||
124 !(r->flags & flags))
125 continue;
126 if (resource_size(r) > max_size) {
127 max_size = resource_size(r);
128 max_no = i;
129 }
130 }
131 if (max_no < 0)
132 break;
133 r = &dev->resource[max_no];
134 if (max_size > *align)
135 *align = max_size;
136 *size += max_size;
137 r->start = start;
138 start += max_size;
139 r->end = r->start + max_size - 1;
140 r->flags &= ~IORESOURCE_UNSET;
141 pr_devel(" -> R%d %016llx..%016llx\n",
142 max_no, r->start, r->end);
143 }
144 pr_devel(" <- CDR %s size=%llx align=%llx\n",
145 pci_name(dev), *size, *align);
146}
147
148/* Allocate a resource "wrap" for a given device or bridge and
149 * insert it at the right position in the sorted list
150 */
151static void __devinit pnv_ioda_add_wrap(struct list_head *list,
152 struct pci_bus *bus,
153 struct pci_dev *dev,
154 resource_size_t size,
155 resource_size_t align)
156{
157 struct resource_wrap *w1, *w = kzalloc(sizeof(*w), GFP_KERNEL);
158
159 w->size = size;
160 w->align = align;
161 w->dev = dev;
162 w->bus = bus;
163
164 list_for_each_entry(w1, list, link) {
165 if (w1->align < align) {
166 list_add_tail(&w->link, &w1->link);
167 return;
168 }
169 }
170 list_add_tail(&w->link, list);
171}
172
173/* Offset device resources of a given type */
174static void __devinit pnv_ioda_offset_dev(struct pci_dev *dev,
175 unsigned int flags,
176 resource_size_t offset)
177{
178 struct resource *r;
179 int i;
180
181 pr_devel(" -> ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
182
183 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
184 r = &dev->resource[i];
185 if (r->flags & flags) {
186 dev->resource[i].start += offset;
187 dev->resource[i].end += offset;
188 }
189 }
190
191 pr_devel(" <- ODR %s [%x] +%016llx\n", pci_name(dev), flags, offset);
192}
193
194/* Offset bus resources (& all children) of a given type */
195static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus,
196 unsigned int flags,
197 resource_size_t offset)
198{
199 struct resource *r;
200 struct pci_dev *dev;
201 struct pci_bus *cbus;
202 int i;
203
204 pr_devel(" -> OBR %s [%x] +%016llx\n",
205 bus->self ? pci_name(bus->self) : "root", flags, offset);
206
207 pci_bus_for_each_resource(bus, r, i) {
208 if (r && (r->flags & flags)) {
209 r->start += offset;
210 r->end += offset;
211 }
212 }
213 list_for_each_entry(dev, &bus->devices, bus_list)
214 pnv_ioda_offset_dev(dev, flags, offset);
215 list_for_each_entry(cbus, &bus->children, node)
216 pnv_ioda_offset_bus(cbus, flags, offset);
217
218 pr_devel(" <- OBR %s [%x]\n",
219 bus->self ? pci_name(bus->self) : "root", flags);
220}
221
222/* This is the guts of our IODA resource allocation. This is called
223 * recursively for each bus in the system. It calculates all the
224 * necessary size and requirements for children and assign them
225 * resources such that:
226 *
227 * - Each function fits in it's own contiguous set of IO/M32
228 * segment
229 *
230 * - All segments behind a P2P bridge are contiguous and obey
231 * alignment constraints of those bridges
232 */
233static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
234 resource_size_t *size,
235 resource_size_t *align)
236{
237 struct pci_controller *hose = pci_bus_to_host(bus);
238 struct pnv_phb *phb = hose->private_data;
239 resource_size_t dev_size, dev_align, start;
240 resource_size_t min_align, min_balign;
241 struct pci_dev *cdev;
242 struct pci_bus *cbus;
243 struct list_head head;
244 struct resource_wrap *w;
245 unsigned int bres;
246
247 *size = *align = 0;
248
249 pr_devel("-> CBR %s [%x]\n",
250 bus->self ? pci_name(bus->self) : "root", flags);
251
252 /* Calculate alignment requirements based on the type
253 * of resource we are working on
254 */
255 if (flags & IORESOURCE_IO) {
256 bres = 0;
257 min_align = phb->ioda.io_segsize;
258 min_balign = 0x1000;
259 } else {
260 bres = 1;
261 min_align = phb->ioda.m32_segsize;
262 min_balign = 0x100000;
263 }
264
265 /* Gather all our children resources ordered by alignment */
266 INIT_LIST_HEAD(&head);
267
268 /* - Busses */
269 list_for_each_entry(cbus, &bus->children, node) {
270 pnv_ioda_calc_bus(cbus, flags, &dev_size, &dev_align);
271 pnv_ioda_add_wrap(&head, cbus, NULL, dev_size, dev_align);
272 }
273
274 /* - Devices */
275 list_for_each_entry(cdev, &bus->devices, bus_list) {
276 pnv_ioda_calc_dev(cdev, flags, &dev_size, &dev_align);
277 /* Align them to segment size */
278 if (dev_align < min_align)
279 dev_align = min_align;
280 pnv_ioda_add_wrap(&head, NULL, cdev, dev_size, dev_align);
281 }
282 if (list_empty(&head))
283 goto empty;
284
285 /* Now we can do two things: assign offsets to them within that
286 * level and get our total alignment & size requirements. The
287 * assignment algorithm is going to be uber-trivial for now, we
288 * can try to be smarter later at filling out holes.
289 */
290 if (bus->self) {
291 /* No offset for downstream bridges */
292 start = 0;
293 } else {
294 /* Offset from the root */
295 if (flags & IORESOURCE_IO)
296 /* Don't hand out IO 0 */
297 start = hose->io_resource.start + 0x1000;
298 else
299 start = hose->mem_resources[0].start;
300 }
301 while(!list_empty(&head)) {
302 w = list_first_entry(&head, struct resource_wrap, link);
303 list_del(&w->link);
304 if (w->size) {
305 if (start) {
306 start = ALIGN(start, w->align);
307 if (w->dev)
308 pnv_ioda_offset_dev(w->dev,flags,start);
309 else if (w->bus)
310 pnv_ioda_offset_bus(w->bus,flags,start);
311 }
312 if (w->align > *align)
313 *align = w->align;
314 }
315 start += w->size;
316 kfree(w);
317 }
318 *size = start;
319
320 /* Align and setup bridge resources */
321 *align = max_t(resource_size_t, *align,
322 max_t(resource_size_t, min_align, min_balign));
323 *size = ALIGN(*size,
324 max_t(resource_size_t, min_align, min_balign));
325 empty:
326 /* Only setup P2P's, not the PHB itself */
327 if (bus->self) {
328 struct resource *res = bus->resource[bres];
329
330 if (WARN_ON(res == NULL))
331 return;
332
333 /*
334 * FIXME: We should probably export and call
335 * pci_bridge_check_ranges() to properly re-initialize
336 * the PCI portion of the flags here, and to detect
337 * what the bridge actually supports.
338 */
339 res->start = 0;
340 res->flags = (*size) ? flags : 0;
341 res->end = (*size) ? (*size - 1) : 0;
342 }
343
344 pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n",
345 bus->self ? pci_name(bus->self) : "root", flags,*size,*align);
346}
347
348static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) 72static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
349{ 73{
350 struct device_node *np; 74 struct device_node *np;
@@ -355,172 +79,6 @@ static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
355 return PCI_DN(np); 79 return PCI_DN(np);
356} 80}
357 81
358static void __devinit pnv_ioda_setup_pe_segments(struct pci_dev *dev)
359{
360 struct pci_controller *hose = pci_bus_to_host(dev->bus);
361 struct pnv_phb *phb = hose->private_data;
362 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
363 unsigned int pe, i;
364 resource_size_t pos;
365 struct resource io_res;
366 struct resource m32_res;
367 struct pci_bus_region region;
368 int rc;
369
370 /* Anything not referenced in the device-tree gets PE#0 */
371 pe = pdn ? pdn->pe_number : 0;
372
373 /* Calculate the device min/max */
374 io_res.start = m32_res.start = (resource_size_t)-1;
375 io_res.end = m32_res.end = 0;
376 io_res.flags = IORESOURCE_IO;
377 m32_res.flags = IORESOURCE_MEM;
378
379 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
380 struct resource *r = NULL;
381 if (dev->resource[i].flags & IORESOURCE_IO)
382 r = &io_res;
383 if (dev->resource[i].flags & IORESOURCE_MEM)
384 r = &m32_res;
385 if (!r)
386 continue;
387 if (dev->resource[i].start < r->start)
388 r->start = dev->resource[i].start;
389 if (dev->resource[i].end > r->end)
390 r->end = dev->resource[i].end;
391 }
392
393 /* Setup IO segments */
394 if (io_res.start < io_res.end) {
395 pcibios_resource_to_bus(dev, &region, &io_res);
396 pos = region.start;
397 i = pos / phb->ioda.io_segsize;
398 while(i < phb->ioda.total_pe && pos <= region.end) {
399 if (phb->ioda.io_segmap[i]) {
400 pr_err("%s: Trying to use IO seg #%d which is"
401 " already used by PE# %d\n",
402 pci_name(dev), i,
403 phb->ioda.io_segmap[i]);
404 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
405 break;
406 }
407 phb->ioda.io_segmap[i] = pe;
408 rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
409 OPAL_IO_WINDOW_TYPE,
410 0, i);
411 if (rc != OPAL_SUCCESS) {
412 pr_err("%s: OPAL error %d setting up mapping"
413 " for IO seg# %d\n",
414 pci_name(dev), rc, i);
415 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
416 break;
417 }
418 pos += phb->ioda.io_segsize;
419 i++;
420 };
421 }
422
423 /* Setup M32 segments */
424 if (m32_res.start < m32_res.end) {
425 pcibios_resource_to_bus(dev, &region, &m32_res);
426 pos = region.start;
427 i = pos / phb->ioda.m32_segsize;
428 while(i < phb->ioda.total_pe && pos <= region.end) {
429 if (phb->ioda.m32_segmap[i]) {
430 pr_err("%s: Trying to use M32 seg #%d which is"
431 " already used by PE# %d\n",
432 pci_name(dev), i,
433 phb->ioda.m32_segmap[i]);
434 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
435 break;
436 }
437 phb->ioda.m32_segmap[i] = pe;
438 rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe,
439 OPAL_M32_WINDOW_TYPE,
440 0, i);
441 if (rc != OPAL_SUCCESS) {
442 pr_err("%s: OPAL error %d setting up mapping"
443 " for M32 seg# %d\n",
444 pci_name(dev), rc, i);
445 /* XXX DO SOMETHING TO DISABLE DEVICE ? */
446 break;
447 }
448 pos += phb->ioda.m32_segsize;
449 i++;
450 }
451 }
452}
453
454/* Check if a resource still fits in the total IO or M32 range
455 * for a given PHB
456 */
457static int __devinit pnv_ioda_resource_fit(struct pci_controller *hose,
458 struct resource *r)
459{
460 struct resource *bounds;
461
462 if (r->flags & IORESOURCE_IO)
463 bounds = &hose->io_resource;
464 else if (r->flags & IORESOURCE_MEM)
465 bounds = &hose->mem_resources[0];
466 else
467 return 1;
468
469 if (r->start >= bounds->start && r->end <= bounds->end)
470 return 1;
471 r->flags = 0;
472 return 0;
473}
474
475static void __devinit pnv_ioda_update_resources(struct pci_bus *bus)
476{
477 struct pci_controller *hose = pci_bus_to_host(bus);
478 struct pci_bus *cbus;
479 struct pci_dev *cdev;
480 unsigned int i;
481
482 /* We used to clear all device enables here. However it looks like
483 * clearing MEM enable causes Obsidian (IPR SCS) to go bonkers,
484 * and shoot fatal errors to the PHB which in turns fences itself
485 * and we can't recover from that ... yet. So for now, let's leave
486 * the enables as-is and hope for the best.
487 */
488
489 /* Check if bus resources fit in our IO or M32 range */
490 for (i = 0; bus->self && (i < 2); i++) {
491 struct resource *r = bus->resource[i];
492 if (r && !pnv_ioda_resource_fit(hose, r))
493 pr_err("%s: Bus %d resource %d disabled, no room\n",
494 pci_name(bus->self), bus->number, i);
495 }
496
497 /* Update self if it's not a PHB */
498 if (bus->self)
499 pci_setup_bridge(bus);
500
501 /* Update child devices */
502 list_for_each_entry(cdev, &bus->devices, bus_list) {
503 /* Check if resource fits, if not, disabled it */
504 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
505 struct resource *r = &cdev->resource[i];
506 if (!pnv_ioda_resource_fit(hose, r))
507 pr_err("%s: Resource %d disabled, no room\n",
508 pci_name(cdev), i);
509 }
510
511 /* Assign segments */
512 pnv_ioda_setup_pe_segments(cdev);
513
514 /* Update HW BARs */
515 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
516 pci_update_resource(cdev, i);
517 }
518
519 /* Update child busses */
520 list_for_each_entry(cbus, &bus->children, node)
521 pnv_ioda_update_resources(cbus);
522}
523
524static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb) 82static int __devinit pnv_ioda_alloc_pe(struct pnv_phb *phb)
525{ 83{
526 unsigned long pe; 84 unsigned long pe;
@@ -548,7 +106,7 @@ static void __devinit pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
548 * but in the meantime, we need to protect them to avoid warnings 106 * but in the meantime, we need to protect them to avoid warnings
549 */ 107 */
550#ifdef CONFIG_PCI_MSI 108#ifdef CONFIG_PCI_MSI
551static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev) 109static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev)
552{ 110{
553 struct pci_controller *hose = pci_bus_to_host(dev->bus); 111 struct pci_controller *hose = pci_bus_to_host(dev->bus);
554 struct pnv_phb *phb = hose->private_data; 112 struct pnv_phb *phb = hose->private_data;
@@ -560,19 +118,6 @@ static struct pnv_ioda_pe * __devinit __pnv_ioda_get_one_pe(struct pci_dev *dev)
560 return NULL; 118 return NULL;
561 return &phb->ioda.pe_array[pdn->pe_number]; 119 return &phb->ioda.pe_array[pdn->pe_number];
562} 120}
563
564static struct pnv_ioda_pe * __devinit pnv_ioda_get_pe(struct pci_dev *dev)
565{
566 struct pnv_ioda_pe *pe = __pnv_ioda_get_one_pe(dev);
567
568 while (!pe && dev->bus->self) {
569 dev = dev->bus->self;
570 pe = __pnv_ioda_get_one_pe(dev);
571 if (pe)
572 pe = pe->bus_pe;
573 }
574 return pe;
575}
576#endif /* CONFIG_PCI_MSI */ 121#endif /* CONFIG_PCI_MSI */
577 122
578static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb, 123static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
@@ -589,7 +134,11 @@ static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
589 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; 134 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
590 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; 135 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
591 parent = pe->pbus->self; 136 parent = pe->pbus->self;
592 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; 137 if (pe->flags & PNV_IODA_PE_BUS_ALL)
138 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
139 else
140 count = 1;
141
593 switch(count) { 142 switch(count) {
594 case 1: bcomp = OpalPciBusAll; break; 143 case 1: bcomp = OpalPciBusAll; break;
595 case 2: bcomp = OpalPciBus7Bits; break; 144 case 2: bcomp = OpalPciBus7Bits; break;
@@ -666,13 +215,13 @@ static void __devinit pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
666{ 215{
667 struct pnv_ioda_pe *lpe; 216 struct pnv_ioda_pe *lpe;
668 217
669 list_for_each_entry(lpe, &phb->ioda.pe_list, link) { 218 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
670 if (lpe->dma_weight < pe->dma_weight) { 219 if (lpe->dma_weight < pe->dma_weight) {
671 list_add_tail(&pe->link, &lpe->link); 220 list_add_tail(&pe->dma_link, &lpe->dma_link);
672 return; 221 return;
673 } 222 }
674 } 223 }
675 list_add_tail(&pe->link, &phb->ioda.pe_list); 224 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
676} 225}
677 226
678static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev) 227static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
@@ -699,6 +248,7 @@ static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
699 return 10; 248 return 10;
700} 249}
701 250
251#if 0
702static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev) 252static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev)
703{ 253{
704 struct pci_controller *hose = pci_bus_to_host(dev->bus); 254 struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -767,6 +317,7 @@ static struct pnv_ioda_pe * __devinit pnv_ioda_setup_dev_PE(struct pci_dev *dev)
767 317
768 return pe; 318 return pe;
769} 319}
320#endif /* Useful for SRIOV case */
770 321
771static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) 322static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
772{ 323{
@@ -784,34 +335,33 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
784 pdn->pcidev = dev; 335 pdn->pcidev = dev;
785 pdn->pe_number = pe->pe_number; 336 pdn->pe_number = pe->pe_number;
786 pe->dma_weight += pnv_ioda_dma_weight(dev); 337 pe->dma_weight += pnv_ioda_dma_weight(dev);
787 if (dev->subordinate) 338 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
788 pnv_ioda_setup_same_PE(dev->subordinate, pe); 339 pnv_ioda_setup_same_PE(dev->subordinate, pe);
789 } 340 }
790} 341}
791 342
792static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev, 343/*
793 struct pnv_ioda_pe *ppe) 344 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
345 * single PCI bus. Another one that contains the primary PCI bus and its
346 * subordinate PCI devices and buses. The second type of PE is normally
347 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
348 */
349static void __devinit pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
794{ 350{
795 struct pci_controller *hose = pci_bus_to_host(dev->bus); 351 struct pci_controller *hose = pci_bus_to_host(bus);
796 struct pnv_phb *phb = hose->private_data; 352 struct pnv_phb *phb = hose->private_data;
797 struct pci_bus *bus = dev->subordinate;
798 struct pnv_ioda_pe *pe; 353 struct pnv_ioda_pe *pe;
799 int pe_num; 354 int pe_num;
800 355
801 if (!bus) {
802 pr_warning("%s: Bridge without a subordinate bus !\n",
803 pci_name(dev));
804 return;
805 }
806 pe_num = pnv_ioda_alloc_pe(phb); 356 pe_num = pnv_ioda_alloc_pe(phb);
807 if (pe_num == IODA_INVALID_PE) { 357 if (pe_num == IODA_INVALID_PE) {
808 pr_warning("%s: Not enough PE# available, disabling bus\n", 358 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
809 pci_name(dev)); 359 __func__, pci_domain_nr(bus), bus->number);
810 return; 360 return;
811 } 361 }
812 362
813 pe = &phb->ioda.pe_array[pe_num]; 363 pe = &phb->ioda.pe_array[pe_num];
814 ppe->bus_pe = pe; 364 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
815 pe->pbus = bus; 365 pe->pbus = bus;
816 pe->pdev = NULL; 366 pe->pdev = NULL;
817 pe->tce32_seg = -1; 367 pe->tce32_seg = -1;
@@ -819,8 +369,12 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
819 pe->rid = bus->busn_res.start << 8; 369 pe->rid = bus->busn_res.start << 8;
820 pe->dma_weight = 0; 370 pe->dma_weight = 0;
821 371
822 pe_info(pe, "Secondary busses %pR associated with PE\n", 372 if (all)
823 &bus->busn_res); 373 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
374 bus->busn_res.start, bus->busn_res.end, pe_num);
375 else
376 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
377 bus->busn_res.start, pe_num);
824 378
825 if (pnv_ioda_configure_pe(phb, pe)) { 379 if (pnv_ioda_configure_pe(phb, pe)) {
826 /* XXX What do we do here ? */ 380 /* XXX What do we do here ? */
@@ -833,6 +387,9 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
833 /* Associate it with all child devices */ 387 /* Associate it with all child devices */
834 pnv_ioda_setup_same_PE(bus, pe); 388 pnv_ioda_setup_same_PE(bus, pe);
835 389
390 /* Put PE to the list */
391 list_add_tail(&pe->list, &phb->ioda.pe_list);
392
836 /* Account for one DMA PE if at least one DMA capable device exist 393 /* Account for one DMA PE if at least one DMA capable device exist
837 * below the bridge 394 * below the bridge
838 */ 395 */
@@ -848,17 +405,33 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
848static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus) 405static void __devinit pnv_ioda_setup_PEs(struct pci_bus *bus)
849{ 406{
850 struct pci_dev *dev; 407 struct pci_dev *dev;
851 struct pnv_ioda_pe *pe; 408
409 pnv_ioda_setup_bus_PE(bus, 0);
852 410
853 list_for_each_entry(dev, &bus->devices, bus_list) { 411 list_for_each_entry(dev, &bus->devices, bus_list) {
854 pe = pnv_ioda_setup_dev_PE(dev); 412 if (dev->subordinate) {
855 if (pe == NULL) 413 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
856 continue; 414 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
857 /* Leaving the PCIe domain ... single PE# */ 415 else
858 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) 416 pnv_ioda_setup_PEs(dev->subordinate);
859 pnv_ioda_setup_bus_PE(dev, pe); 417 }
860 else if (dev->subordinate) 418 }
861 pnv_ioda_setup_PEs(dev->subordinate); 419}
420
421/*
422 * Configure PEs so that the downstream PCI buses and devices
423 * could have their associated PE#. Unfortunately, we didn't
424 * figure out the way to identify the PLX bridge yet. So we
425 * simply put the PCI bus and the subordinate behind the root
426 * port to PE# here. The game rule here is expected to be changed
427 * as soon as we can detected PLX bridge correctly.
428 */
429static void __devinit pnv_pci_ioda_setup_PEs(void)
430{
431 struct pci_controller *hose, *tmp;
432
433 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
434 pnv_ioda_setup_PEs(hose->bus);
862 } 435 }
863} 436}
864 437
@@ -1000,7 +573,7 @@ static void __devinit pnv_ioda_setup_dma(struct pnv_phb *phb)
1000 remaining = phb->ioda.tce32_count; 573 remaining = phb->ioda.tce32_count;
1001 tw = phb->ioda.dma_weight; 574 tw = phb->ioda.dma_weight;
1002 base = 0; 575 base = 0;
1003 list_for_each_entry(pe, &phb->ioda.pe_list, link) { 576 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
1004 if (!pe->dma_weight) 577 if (!pe->dma_weight)
1005 continue; 578 continue;
1006 if (!remaining) { 579 if (!remaining) {
@@ -1109,36 +682,115 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
1109static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { } 682static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
1110#endif /* CONFIG_PCI_MSI */ 683#endif /* CONFIG_PCI_MSI */
1111 684
1112/* This is the starting point of our IODA specific resource 685/*
1113 * allocation process 686 * This function is supposed to be called on basis of PE from top
687 * to bottom style. So the the I/O or MMIO segment assigned to
688 * parent PE could be overrided by its child PEs if necessary.
1114 */ 689 */
1115static void __devinit pnv_pci_ioda_fixup_phb(struct pci_controller *hose) 690static void __devinit pnv_ioda_setup_pe_seg(struct pci_controller *hose,
691 struct pnv_ioda_pe *pe)
1116{ 692{
1117 resource_size_t size, align; 693 struct pnv_phb *phb = hose->private_data;
1118 struct pci_bus *child; 694 struct pci_bus_region region;
695 struct resource *res;
696 int i, index;
697 int rc;
1119 698
1120 /* Associate PEs per functions */ 699 /*
1121 pnv_ioda_setup_PEs(hose->bus); 700 * NOTE: We only care PCI bus based PE for now. For PCI
701 * device based PE, for example SRIOV sensitive VF should
702 * be figured out later.
703 */
704 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
1122 705
1123 /* Calculate all resources */ 706 pci_bus_for_each_resource(pe->pbus, res, i) {
1124 pnv_ioda_calc_bus(hose->bus, IORESOURCE_IO, &size, &align); 707 if (!res || !res->flags ||
1125 pnv_ioda_calc_bus(hose->bus, IORESOURCE_MEM, &size, &align); 708 res->start > res->end)
709 continue;
1126 710
1127 /* Apply then to HW */ 711 if (res->flags & IORESOURCE_IO) {
1128 pnv_ioda_update_resources(hose->bus); 712 region.start = res->start - phb->ioda.io_pci_base;
713 region.end = res->end - phb->ioda.io_pci_base;
714 index = region.start / phb->ioda.io_segsize;
715
716 while (index < phb->ioda.total_pe &&
717 region.start <= region.end) {
718 phb->ioda.io_segmap[index] = pe->pe_number;
719 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
720 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
721 if (rc != OPAL_SUCCESS) {
722 pr_err("%s: OPAL error %d when mapping IO "
723 "segment #%d to PE#%d\n",
724 __func__, rc, index, pe->pe_number);
725 break;
726 }
727
728 region.start += phb->ioda.io_segsize;
729 index++;
730 }
731 } else if (res->flags & IORESOURCE_MEM) {
732 region.start = res->start -
733 hose->pci_mem_offset -
734 phb->ioda.m32_pci_base;
735 region.end = res->end -
736 hose->pci_mem_offset -
737 phb->ioda.m32_pci_base;
738 index = region.start / phb->ioda.m32_segsize;
739
740 while (index < phb->ioda.total_pe &&
741 region.start <= region.end) {
742 phb->ioda.m32_segmap[index] = pe->pe_number;
743 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
744 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
745 if (rc != OPAL_SUCCESS) {
746 pr_err("%s: OPAL error %d when mapping M32 "
747 "segment#%d to PE#%d",
748 __func__, rc, index, pe->pe_number);
749 break;
750 }
751
752 region.start += phb->ioda.m32_segsize;
753 index++;
754 }
755 }
756 }
757}
1129 758
1130 /* Setup DMA */ 759static void __devinit pnv_pci_ioda_setup_seg(void)
1131 pnv_ioda_setup_dma(hose->private_data); 760{
761 struct pci_controller *tmp, *hose;
762 struct pnv_phb *phb;
763 struct pnv_ioda_pe *pe;
1132 764
1133 /* Configure PCI Express settings */ 765 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1134 list_for_each_entry(child, &hose->bus->children, node) { 766 phb = hose->private_data;
1135 struct pci_dev *self = child->self; 767 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
1136 if (!self) 768 pnv_ioda_setup_pe_seg(hose, pe);
1137 continue; 769 }
1138 pcie_bus_configure_settings(child, self->pcie_mpss); 770 }
771}
772
773static void __devinit pnv_pci_ioda_setup_DMA(void)
774{
775 struct pci_controller *hose, *tmp;
776 struct pnv_phb *phb;
777
778 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
779 pnv_ioda_setup_dma(hose->private_data);
780
781 /* Mark the PHB initialization done */
782 phb = hose->private_data;
783 phb->initialized = 1;
1139 } 784 }
1140} 785}
1141 786
787static void __devinit pnv_pci_ioda_fixup(void)
788{
789 pnv_pci_ioda_setup_PEs();
790 pnv_pci_ioda_setup_seg();
791 pnv_pci_ioda_setup_DMA();
792}
793
1142/* 794/*
1143 * Returns the alignment for I/O or memory windows for P2P 795 * Returns the alignment for I/O or memory windows for P2P
1144 * bridges. That actually depends on how PEs are segmented. 796 * bridges. That actually depends on how PEs are segmented.
@@ -1182,10 +834,22 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
1182 */ 834 */
1183static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev) 835static int __devinit pnv_pci_enable_device_hook(struct pci_dev *dev)
1184{ 836{
1185 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 837 struct pci_controller *hose = pci_bus_to_host(dev->bus);
838 struct pnv_phb *phb = hose->private_data;
839 struct pci_dn *pdn;
1186 840
841 /* The function is probably called while the PEs have
842 * not be created yet. For example, resource reassignment
843 * during PCI probe period. We just skip the check if
844 * PEs isn't ready.
845 */
846 if (!phb->initialized)
847 return 0;
848
849 pdn = pnv_ioda_get_pdn(dev);
1187 if (!pdn || pdn->pe_number == IODA_INVALID_PE) 850 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1188 return -EINVAL; 851 return -EINVAL;
852
1189 return 0; 853 return 0;
1190} 854}
1191 855
@@ -1276,9 +940,9 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
1276 /* Allocate aux data & arrays */ 940 /* Allocate aux data & arrays */
1277 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long)); 941 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1278 m32map_off = size; 942 m32map_off = size;
1279 size += phb->ioda.total_pe; 943 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1280 iomap_off = size; 944 iomap_off = size;
1281 size += phb->ioda.total_pe; 945 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
1282 pemap_off = size; 946 pemap_off = size;
1283 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe); 947 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1284 aux = alloc_bootmem(size); 948 aux = alloc_bootmem(size);
@@ -1289,6 +953,7 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
1289 phb->ioda.pe_array = aux + pemap_off; 953 phb->ioda.pe_array = aux + pemap_off;
1290 set_bit(0, phb->ioda.pe_alloc); 954 set_bit(0, phb->ioda.pe_alloc);
1291 955
956 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
1292 INIT_LIST_HEAD(&phb->ioda.pe_list); 957 INIT_LIST_HEAD(&phb->ioda.pe_list);
1293 958
1294 /* Calculate how many 32-bit TCE segments we have */ 959 /* Calculate how many 32-bit TCE segments we have */
@@ -1337,15 +1002,17 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
1337 /* Setup MSI support */ 1002 /* Setup MSI support */
1338 pnv_pci_init_ioda_msis(phb); 1003 pnv_pci_init_ioda_msis(phb);
1339 1004
1340 /* We set both PCI_PROBE_ONLY and PCI_REASSIGN_ALL_RSRC. This is an 1005 /*
1341 * odd combination which essentially means that we skip all resource 1006 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1342 * fixups and assignments in the generic code, and do it all 1007 * to let the PCI core do resource assignment. It's supposed
1343 * ourselves here 1008 * that the PCI core will do correct I/O and MMIO alignment
1009 * for the P2P bridge bars so that each PCI bus (excluding
1010 * the child P2P bridges) can form individual PE.
1344 */ 1011 */
1345 ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb; 1012 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
1346 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook; 1013 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
1347 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment; 1014 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
1348 pci_add_flags(PCI_PROBE_ONLY | PCI_REASSIGN_ALL_RSRC); 1015 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
1349 1016
1350 /* Reset IODA tables to a clean state */ 1017 /* Reset IODA tables to a clean state */
1351 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET); 1018 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 264967770c3a..6b4bef4e9d82 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -30,7 +30,6 @@
30#include <asm/opal.h> 30#include <asm/opal.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/tce.h> 32#include <asm/tce.h>
33#include <asm/abs_addr.h>
34 33
35#include "powernv.h" 34#include "powernv.h"
36#include "pci.h" 35#include "pci.h"
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index be3cfc5ceabb..c01688a1a741 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -30,7 +30,6 @@
30#include <asm/opal.h> 30#include <asm/opal.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/tce.h> 32#include <asm/tce.h>
33#include <asm/abs_addr.h>
34#include <asm/firmware.h> 33#include <asm/firmware.h>
35 34
36#include "powernv.h" 35#include "powernv.h"
@@ -447,6 +446,11 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
447 pnv_tce_invalidate(tbl, tces, tcep - 1); 446 pnv_tce_invalidate(tbl, tces, tcep - 1);
448} 447}
449 448
449static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
450{
451 return ((u64 *)tbl->it_base)[index - tbl->it_offset];
452}
453
450void pnv_pci_setup_iommu_table(struct iommu_table *tbl, 454void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
451 void *tce_mem, u64 tce_size, 455 void *tce_mem, u64 tce_size,
452 u64 dma_offset) 456 u64 dma_offset)
@@ -597,6 +601,7 @@ void __init pnv_pci_init(void)
597 ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup; 601 ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
598 ppc_md.tce_build = pnv_tce_build; 602 ppc_md.tce_build = pnv_tce_build;
599 ppc_md.tce_free = pnv_tce_free; 603 ppc_md.tce_free = pnv_tce_free;
604 ppc_md.tce_get = pnv_tce_get;
600 ppc_md.pci_probe_mode = pnv_pci_probe_mode; 605 ppc_md.pci_probe_mode = pnv_pci_probe_mode;
601 set_pci_dma_ops(&dma_iommu_ops); 606 set_pci_dma_ops(&dma_iommu_ops);
602 607
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8bc479634643..7cfb7c883deb 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -17,9 +17,14 @@ enum pnv_phb_model {
17}; 17};
18 18
19#define PNV_PCI_DIAG_BUF_SIZE 4096 19#define PNV_PCI_DIAG_BUF_SIZE 4096
20#define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */
21#define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */
22#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
20 23
21/* Data associated with a PE, including IOMMU tracking etc.. */ 24/* Data associated with a PE, including IOMMU tracking etc.. */
22struct pnv_ioda_pe { 25struct pnv_ioda_pe {
26 unsigned long flags;
27
23 /* A PE can be associated with a single device or an 28 /* A PE can be associated with a single device or an
24 * entire bus (& children). In the former case, pdev 29 * entire bus (& children). In the former case, pdev
25 * is populated, in the later case, pbus is. 30 * is populated, in the later case, pbus is.
@@ -40,11 +45,6 @@ struct pnv_ioda_pe {
40 */ 45 */
41 unsigned int dma_weight; 46 unsigned int dma_weight;
42 47
43 /* This is a PCI-E -> PCI-X bridge, this points to the
44 * corresponding bus PE
45 */
46 struct pnv_ioda_pe *bus_pe;
47
48 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */ 48 /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
49 int tce32_seg; 49 int tce32_seg;
50 int tce32_segcount; 50 int tce32_segcount;
@@ -59,7 +59,8 @@ struct pnv_ioda_pe {
59 int mve_number; 59 int mve_number;
60 60
61 /* Link in list of PE#s */ 61 /* Link in list of PE#s */
62 struct list_head link; 62 struct list_head dma_link;
63 struct list_head list;
63}; 64};
64 65
65struct pnv_phb { 66struct pnv_phb {
@@ -68,6 +69,7 @@ struct pnv_phb {
68 enum pnv_phb_model model; 69 enum pnv_phb_model model;
69 u64 opal_id; 70 u64 opal_id;
70 void __iomem *regs; 71 void __iomem *regs;
72 int initialized;
71 spinlock_t lock; 73 spinlock_t lock;
72 74
73#ifdef CONFIG_PCI_MSI 75#ifdef CONFIG_PCI_MSI
@@ -107,6 +109,11 @@ struct pnv_phb {
107 unsigned int *io_segmap; 109 unsigned int *io_segmap;
108 struct pnv_ioda_pe *pe_array; 110 struct pnv_ioda_pe *pe_array;
109 111
112 /* Sorted list of used PE's based
113 * on the sequence of creation
114 */
115 struct list_head pe_list;
116
110 /* Reverse map of PEs, will have to extend if 117 /* Reverse map of PEs, will have to extend if
111 * we are to support more than 256 PEs, indexed 118 * we are to support more than 256 PEs, indexed
112 * bus { bus, devfn } 119 * bus { bus, devfn }
@@ -125,7 +132,7 @@ struct pnv_phb {
125 /* Sorted list of used PE's, sorted at 132 /* Sorted list of used PE's, sorted at
126 * boot for resource allocation purposes 133 * boot for resource allocation purposes
127 */ 134 */
128 struct list_head pe_list; 135 struct list_head pe_dma_list;
129 } ioda; 136 } ioda;
130 }; 137 };
131 138
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 3124cf791ebb..d00d7b0a3bda 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -43,7 +43,7 @@ enum ps3_lpar_vas_id {
43 43
44static DEFINE_SPINLOCK(ps3_htab_lock); 44static DEFINE_SPINLOCK(ps3_htab_lock);
45 45
46static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va, 46static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
47 unsigned long pa, unsigned long rflags, unsigned long vflags, 47 unsigned long pa, unsigned long rflags, unsigned long vflags,
48 int psize, int ssize) 48 int psize, int ssize)
49{ 49{
@@ -61,7 +61,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
61 */ 61 */
62 vflags &= ~HPTE_V_SECONDARY; 62 vflags &= ~HPTE_V_SECONDARY;
63 63
64 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 64 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
65 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags; 65 hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
66 66
67 spin_lock_irqsave(&ps3_htab_lock, flags); 67 spin_lock_irqsave(&ps3_htab_lock, flags);
@@ -75,8 +75,8 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
75 75
76 if (result) { 76 if (result) {
77 /* all entries bolted !*/ 77 /* all entries bolted !*/
78 pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n", 78 pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
79 __func__, result, va, pa, hpte_group, hpte_v, hpte_r); 79 __func__, result, vpn, pa, hpte_group, hpte_v, hpte_r);
80 BUG(); 80 BUG();
81 } 81 }
82 82
@@ -107,7 +107,7 @@ static long ps3_hpte_remove(unsigned long hpte_group)
107} 107}
108 108
109static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, 109static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
110 unsigned long va, int psize, int ssize, int local) 110 unsigned long vpn, int psize, int ssize, int local)
111{ 111{
112 int result; 112 int result;
113 u64 hpte_v, want_v, hpte_rs; 113 u64 hpte_v, want_v, hpte_rs;
@@ -115,7 +115,7 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
115 unsigned long flags; 115 unsigned long flags;
116 long ret; 116 long ret;
117 117
118 want_v = hpte_encode_v(va, psize, ssize); 118 want_v = hpte_encode_v(vpn, psize, ssize);
119 119
120 spin_lock_irqsave(&ps3_htab_lock, flags); 120 spin_lock_irqsave(&ps3_htab_lock, flags);
121 121
@@ -125,8 +125,8 @@ static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
125 &hpte_rs); 125 &hpte_rs);
126 126
127 if (result) { 127 if (result) {
128 pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n", 128 pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n",
129 __func__, result, va, slot, psize); 129 __func__, result, vpn, slot, psize);
130 BUG(); 130 BUG();
131 } 131 }
132 132
@@ -159,7 +159,7 @@ static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
159 panic("ps3_hpte_updateboltedpp() not implemented"); 159 panic("ps3_hpte_updateboltedpp() not implemented");
160} 160}
161 161
162static void ps3_hpte_invalidate(unsigned long slot, unsigned long va, 162static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn,
163 int psize, int ssize, int local) 163 int psize, int ssize, int local)
164{ 164{
165 unsigned long flags; 165 unsigned long flags;
@@ -170,8 +170,8 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
170 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0); 170 result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0);
171 171
172 if (result) { 172 if (result) {
173 pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n", 173 pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n",
174 __func__, result, va, slot, psize); 174 __func__, result, vpn, slot, psize);
175 BUG(); 175 BUG();
176 } 176 }
177 177
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 2d664c5a83b0..3f509f86432c 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -184,11 +184,15 @@ early_param("ps3flash", early_parse_ps3flash);
184#define prealloc_ps3flash_bounce_buffer() do { } while (0) 184#define prealloc_ps3flash_bounce_buffer() do { } while (0)
185#endif 185#endif
186 186
187static int ps3_set_dabr(unsigned long dabr) 187static int ps3_set_dabr(unsigned long dabr, unsigned long dabrx)
188{ 188{
189 enum {DABR_USER = 1, DABR_KERNEL = 2,}; 189 /* Have to set at least one bit in the DABRX */
190 if (dabrx == 0 && dabr == 0)
191 dabrx = DABRX_USER;
192 /* hypervisor only allows us to set BTI, Kernel and user */
193 dabrx &= DABRX_BTI | DABRX_KERNEL | DABRX_USER;
190 194
191 return lv1_set_dabr(dabr, DABR_KERNEL | DABR_USER) ? -1 : 0; 195 return lv1_set_dabr(dabr, dabrx) ? -1 : 0;
192} 196}
193 197
194static void __init ps3_setup_arch(void) 198static void __init ps3_setup_arch(void)
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index c222189f5bb2..890622b87c8f 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -6,8 +6,9 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
6 firmware.o power.o dlpar.o mobility.o 6 firmware.o power.o dlpar.o mobility.o
7obj-$(CONFIG_SMP) += smp.o 7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_SCANLOG) += scanlog.o 8obj-$(CONFIG_SCANLOG) += scanlog.o
9obj-$(CONFIG_EEH) += eeh.o eeh_dev.o eeh_cache.o eeh_driver.o \ 9obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
10 eeh_event.o eeh_sysfs.o eeh_pseries.o 10 eeh_driver.o eeh_event.o eeh_sysfs.o \
11 eeh_pseries.o
11obj-$(CONFIG_KEXEC) += kexec.o 12obj-$(CONFIG_KEXEC) += kexec.o
12obj-$(CONFIG_PCI) += pci.o pci_dlpar.o 13obj-$(CONFIG_PCI) += pci.o pci_dlpar.o
13obj-$(CONFIG_PSERIES_MSI) += msi.o 14obj-$(CONFIG_PSERIES_MSI) += msi.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index ecd394cf34e6..9a04322b1736 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -92,6 +92,20 @@ struct eeh_ops *eeh_ops = NULL;
92int eeh_subsystem_enabled; 92int eeh_subsystem_enabled;
93EXPORT_SYMBOL(eeh_subsystem_enabled); 93EXPORT_SYMBOL(eeh_subsystem_enabled);
94 94
95/*
96 * EEH probe mode support. The intention is to support multiple
97 * platforms for EEH. Some platforms like pSeries do PCI emunation
98 * based on device tree. However, other platforms like powernv probe
99 * PCI devices from hardware. The flag is used to distinguish that.
100 * In addition, struct eeh_ops::probe would be invoked for particular
101 * OF node or PCI device so that the corresponding PE would be created
102 * there.
103 */
104int eeh_probe_mode;
105
106/* Global EEH mutex */
107DEFINE_MUTEX(eeh_mutex);
108
95/* Lock to avoid races due to multiple reports of an error */ 109/* Lock to avoid races due to multiple reports of an error */
96static DEFINE_RAW_SPINLOCK(confirm_error_lock); 110static DEFINE_RAW_SPINLOCK(confirm_error_lock);
97 111
@@ -204,22 +218,12 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
204 } 218 }
205 } 219 }
206 220
207 /* Gather status on devices under the bridge */
208 if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
209 struct device_node *child;
210
211 for_each_child_of_node(dn, child) {
212 if (of_node_to_eeh_dev(child))
213 n += eeh_gather_pci_data(of_node_to_eeh_dev(child), buf+n, len-n);
214 }
215 }
216
217 return n; 221 return n;
218} 222}
219 223
220/** 224/**
221 * eeh_slot_error_detail - Generate combined log including driver log and error log 225 * eeh_slot_error_detail - Generate combined log including driver log and error log
222 * @edev: device to report error log for 226 * @pe: EEH PE
223 * @severity: temporary or permanent error log 227 * @severity: temporary or permanent error log
224 * 228 *
225 * This routine should be called to generate the combined log, which 229 * This routine should be called to generate the combined log, which
@@ -227,17 +231,22 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
227 * out from the config space of the corresponding PCI device, while 231 * out from the config space of the corresponding PCI device, while
228 * the error log is fetched through platform dependent function call. 232 * the error log is fetched through platform dependent function call.
229 */ 233 */
230void eeh_slot_error_detail(struct eeh_dev *edev, int severity) 234void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
231{ 235{
232 size_t loglen = 0; 236 size_t loglen = 0;
233 pci_regs_buf[0] = 0; 237 struct eeh_dev *edev;
234 238
235 eeh_pci_enable(edev, EEH_OPT_THAW_MMIO); 239 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
236 eeh_ops->configure_bridge(eeh_dev_to_of_node(edev)); 240 eeh_ops->configure_bridge(pe);
237 eeh_restore_bars(edev); 241 eeh_pe_restore_bars(pe);
238 loglen = eeh_gather_pci_data(edev, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
239 242
240 eeh_ops->get_log(eeh_dev_to_of_node(edev), severity, pci_regs_buf, loglen); 243 pci_regs_buf[0] = 0;
244 eeh_pe_for_each_dev(pe, edev) {
245 loglen += eeh_gather_pci_data(edev, pci_regs_buf,
246 EEH_PCI_REGS_LOG_LEN);
247 }
248
249 eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
241} 250}
242 251
243/** 252/**
@@ -261,126 +270,8 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
261} 270}
262 271
263/** 272/**
264 * eeh_find_device_pe - Retrieve the PE for the given device 273 * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
265 * @dn: device node 274 * @edev: eeh device
266 *
267 * Return the PE under which this device lies
268 */
269struct device_node *eeh_find_device_pe(struct device_node *dn)
270{
271 while (dn->parent && of_node_to_eeh_dev(dn->parent) &&
272 (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
273 dn = dn->parent;
274 }
275 return dn;
276}
277
278/**
279 * __eeh_mark_slot - Mark all child devices as failed
280 * @parent: parent device
281 * @mode_flag: failure flag
282 *
283 * Mark all devices that are children of this device as failed.
284 * Mark the device driver too, so that it can see the failure
285 * immediately; this is critical, since some drivers poll
286 * status registers in interrupts ... If a driver is polling,
287 * and the slot is frozen, then the driver can deadlock in
288 * an interrupt context, which is bad.
289 */
290static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
291{
292 struct device_node *dn;
293
294 for_each_child_of_node(parent, dn) {
295 if (of_node_to_eeh_dev(dn)) {
296 /* Mark the pci device driver too */
297 struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
298
299 of_node_to_eeh_dev(dn)->mode |= mode_flag;
300
301 if (dev && dev->driver)
302 dev->error_state = pci_channel_io_frozen;
303
304 __eeh_mark_slot(dn, mode_flag);
305 }
306 }
307}
308
309/**
310 * eeh_mark_slot - Mark the indicated device and its children as failed
311 * @dn: parent device
312 * @mode_flag: failure flag
313 *
314 * Mark the indicated device and its child devices as failed.
315 * The device drivers are marked as failed as well.
316 */
317void eeh_mark_slot(struct device_node *dn, int mode_flag)
318{
319 struct pci_dev *dev;
320 dn = eeh_find_device_pe(dn);
321
322 /* Back up one, since config addrs might be shared */
323 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
324 dn = dn->parent;
325
326 of_node_to_eeh_dev(dn)->mode |= mode_flag;
327
328 /* Mark the pci device too */
329 dev = of_node_to_eeh_dev(dn)->pdev;
330 if (dev)
331 dev->error_state = pci_channel_io_frozen;
332
333 __eeh_mark_slot(dn, mode_flag);
334}
335
336/**
337 * __eeh_clear_slot - Clear failure flag for the child devices
338 * @parent: parent device
339 * @mode_flag: flag to be cleared
340 *
341 * Clear failure flag for the child devices.
342 */
343static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
344{
345 struct device_node *dn;
346
347 for_each_child_of_node(parent, dn) {
348 if (of_node_to_eeh_dev(dn)) {
349 of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
350 of_node_to_eeh_dev(dn)->check_count = 0;
351 __eeh_clear_slot(dn, mode_flag);
352 }
353 }
354}
355
356/**
357 * eeh_clear_slot - Clear failure flag for the indicated device and its children
358 * @dn: parent device
359 * @mode_flag: flag to be cleared
360 *
361 * Clear failure flag for the indicated device and its children.
362 */
363void eeh_clear_slot(struct device_node *dn, int mode_flag)
364{
365 unsigned long flags;
366 raw_spin_lock_irqsave(&confirm_error_lock, flags);
367
368 dn = eeh_find_device_pe(dn);
369
370 /* Back up one, since config addrs might be shared */
371 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
372 dn = dn->parent;
373
374 of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
375 of_node_to_eeh_dev(dn)->check_count = 0;
376 __eeh_clear_slot(dn, mode_flag);
377 raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
378}
379
380/**
381 * eeh_dn_check_failure - Check if all 1's data is due to EEH slot freeze
382 * @dn: device node
383 * @dev: pci device, if known
384 * 275 *
385 * Check for an EEH failure for the given device node. Call this 276 * Check for an EEH failure for the given device node. Call this
386 * routine if the result of a read was all 0xff's and you want to 277 * routine if the result of a read was all 0xff's and you want to
@@ -392,11 +283,13 @@ void eeh_clear_slot(struct device_node *dn, int mode_flag)
392 * 283 *
393 * It is safe to call this routine in an interrupt context. 284 * It is safe to call this routine in an interrupt context.
394 */ 285 */
395int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) 286int eeh_dev_check_failure(struct eeh_dev *edev)
396{ 287{
397 int ret; 288 int ret;
398 unsigned long flags; 289 unsigned long flags;
399 struct eeh_dev *edev; 290 struct device_node *dn;
291 struct pci_dev *dev;
292 struct eeh_pe *pe;
400 int rc = 0; 293 int rc = 0;
401 const char *location; 294 const char *location;
402 295
@@ -405,23 +298,23 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
405 if (!eeh_subsystem_enabled) 298 if (!eeh_subsystem_enabled)
406 return 0; 299 return 0;
407 300
408 if (!dn) { 301 if (!edev) {
409 eeh_stats.no_dn++; 302 eeh_stats.no_dn++;
410 return 0; 303 return 0;
411 } 304 }
412 dn = eeh_find_device_pe(dn); 305 dn = eeh_dev_to_of_node(edev);
413 edev = of_node_to_eeh_dev(dn); 306 dev = eeh_dev_to_pci_dev(edev);
307 pe = edev->pe;
414 308
415 /* Access to IO BARs might get this far and still not want checking. */ 309 /* Access to IO BARs might get this far and still not want checking. */
416 if (!(edev->mode & EEH_MODE_SUPPORTED) || 310 if (!pe) {
417 edev->mode & EEH_MODE_NOCHECK) {
418 eeh_stats.ignored_check++; 311 eeh_stats.ignored_check++;
419 pr_debug("EEH: Ignored check (%x) for %s %s\n", 312 pr_debug("EEH: Ignored check for %s %s\n",
420 edev->mode, eeh_pci_name(dev), dn->full_name); 313 eeh_pci_name(dev), dn->full_name);
421 return 0; 314 return 0;
422 } 315 }
423 316
424 if (!edev->config_addr && !edev->pe_config_addr) { 317 if (!pe->addr && !pe->config_addr) {
425 eeh_stats.no_cfg_addr++; 318 eeh_stats.no_cfg_addr++;
426 return 0; 319 return 0;
427 } 320 }
@@ -434,13 +327,13 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
434 */ 327 */
435 raw_spin_lock_irqsave(&confirm_error_lock, flags); 328 raw_spin_lock_irqsave(&confirm_error_lock, flags);
436 rc = 1; 329 rc = 1;
437 if (edev->mode & EEH_MODE_ISOLATED) { 330 if (pe->state & EEH_PE_ISOLATED) {
438 edev->check_count++; 331 pe->check_count++;
439 if (edev->check_count % EEH_MAX_FAILS == 0) { 332 if (pe->check_count % EEH_MAX_FAILS == 0) {
440 location = of_get_property(dn, "ibm,loc-code", NULL); 333 location = of_get_property(dn, "ibm,loc-code", NULL);
441 printk(KERN_ERR "EEH: %d reads ignored for recovering device at " 334 printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
442 "location=%s driver=%s pci addr=%s\n", 335 "location=%s driver=%s pci addr=%s\n",
443 edev->check_count, location, 336 pe->check_count, location,
444 eeh_driver_name(dev), eeh_pci_name(dev)); 337 eeh_driver_name(dev), eeh_pci_name(dev));
445 printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", 338 printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
446 eeh_driver_name(dev)); 339 eeh_driver_name(dev));
@@ -456,7 +349,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
456 * function zero of a multi-function device. 349 * function zero of a multi-function device.
457 * In any case they must share a common PHB. 350 * In any case they must share a common PHB.
458 */ 351 */
459 ret = eeh_ops->get_state(dn, NULL); 352 ret = eeh_ops->get_state(pe, NULL);
460 353
461 /* Note that config-io to empty slots may fail; 354 /* Note that config-io to empty slots may fail;
462 * they are empty when they don't have children. 355 * they are empty when they don't have children.
@@ -469,7 +362,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
469 (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == 362 (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
470 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) { 363 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
471 eeh_stats.false_positives++; 364 eeh_stats.false_positives++;
472 edev->false_positives ++; 365 pe->false_positives++;
473 rc = 0; 366 rc = 0;
474 goto dn_unlock; 367 goto dn_unlock;
475 } 368 }
@@ -480,10 +373,10 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
480 * with other functions on this device, and functions under 373 * with other functions on this device, and functions under
481 * bridges. 374 * bridges.
482 */ 375 */
483 eeh_mark_slot(dn, EEH_MODE_ISOLATED); 376 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
484 raw_spin_unlock_irqrestore(&confirm_error_lock, flags); 377 raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
485 378
486 eeh_send_failure_event(edev); 379 eeh_send_failure_event(pe);
487 380
488 /* Most EEH events are due to device driver bugs. Having 381 /* Most EEH events are due to device driver bugs. Having
489 * a stack trace will help the device-driver authors figure 382 * a stack trace will help the device-driver authors figure
@@ -497,7 +390,7 @@ dn_unlock:
497 return rc; 390 return rc;
498} 391}
499 392
500EXPORT_SYMBOL_GPL(eeh_dn_check_failure); 393EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
501 394
502/** 395/**
503 * eeh_check_failure - Check if all 1's data is due to EEH slot freeze 396 * eeh_check_failure - Check if all 1's data is due to EEH slot freeze
@@ -514,21 +407,19 @@ EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
514unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) 407unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
515{ 408{
516 unsigned long addr; 409 unsigned long addr;
517 struct pci_dev *dev; 410 struct eeh_dev *edev;
518 struct device_node *dn;
519 411
520 /* Finding the phys addr + pci device; this is pretty quick. */ 412 /* Finding the phys addr + pci device; this is pretty quick. */
521 addr = eeh_token_to_phys((unsigned long __force) token); 413 addr = eeh_token_to_phys((unsigned long __force) token);
522 dev = pci_addr_cache_get_device(addr); 414 edev = eeh_addr_cache_get_dev(addr);
523 if (!dev) { 415 if (!edev) {
524 eeh_stats.no_device++; 416 eeh_stats.no_device++;
525 return val; 417 return val;
526 } 418 }
527 419
528 dn = pci_device_to_OF_node(dev); 420 eeh_dev_check_failure(edev);
529 eeh_dn_check_failure(dn, dev);
530 421
531 pci_dev_put(dev); 422 pci_dev_put(eeh_dev_to_pci_dev(edev));
532 return val; 423 return val;
533} 424}
534 425
@@ -537,23 +428,22 @@ EXPORT_SYMBOL(eeh_check_failure);
537 428
538/** 429/**
539 * eeh_pci_enable - Enable MMIO or DMA transfers for this slot 430 * eeh_pci_enable - Enable MMIO or DMA transfers for this slot
540 * @edev: pci device node 431 * @pe: EEH PE
541 * 432 *
542 * This routine should be called to reenable frozen MMIO or DMA 433 * This routine should be called to reenable frozen MMIO or DMA
543 * so that it would work correctly again. It's useful while doing 434 * so that it would work correctly again. It's useful while doing
544 * recovery or log collection on the indicated device. 435 * recovery or log collection on the indicated device.
545 */ 436 */
546int eeh_pci_enable(struct eeh_dev *edev, int function) 437int eeh_pci_enable(struct eeh_pe *pe, int function)
547{ 438{
548 int rc; 439 int rc;
549 struct device_node *dn = eeh_dev_to_of_node(edev);
550 440
551 rc = eeh_ops->set_option(dn, function); 441 rc = eeh_ops->set_option(pe, function);
552 if (rc) 442 if (rc)
553 printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n", 443 pr_warning("%s: Unexpected state change %d on PHB#%d-PE#%x, err=%d\n",
554 function, rc, dn->full_name); 444 __func__, function, pe->phb->global_number, pe->addr, rc);
555 445
556 rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC); 446 rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
557 if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) && 447 if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) &&
558 (function == EEH_OPT_THAW_MMIO)) 448 (function == EEH_OPT_THAW_MMIO))
559 return 0; 449 return 0;
@@ -571,17 +461,24 @@ int eeh_pci_enable(struct eeh_dev *edev, int function)
571 */ 461 */
572int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 462int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
573{ 463{
574 struct device_node *dn = pci_device_to_OF_node(dev); 464 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
465 struct eeh_pe *pe = edev->pe;
466
467 if (!pe) {
468 pr_err("%s: No PE found on PCI device %s\n",
469 __func__, pci_name(dev));
470 return -EINVAL;
471 }
575 472
576 switch (state) { 473 switch (state) {
577 case pcie_deassert_reset: 474 case pcie_deassert_reset:
578 eeh_ops->reset(dn, EEH_RESET_DEACTIVATE); 475 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
579 break; 476 break;
580 case pcie_hot_reset: 477 case pcie_hot_reset:
581 eeh_ops->reset(dn, EEH_RESET_HOT); 478 eeh_ops->reset(pe, EEH_RESET_HOT);
582 break; 479 break;
583 case pcie_warm_reset: 480 case pcie_warm_reset:
584 eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL); 481 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
585 break; 482 break;
586 default: 483 default:
587 return -EINVAL; 484 return -EINVAL;
@@ -591,66 +488,37 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
591} 488}
592 489
593/** 490/**
594 * __eeh_set_pe_freset - Check the required reset for child devices 491 * eeh_set_pe_freset - Check the required reset for the indicated device
595 * @parent: parent device 492 * @data: EEH device
596 * @freset: return value 493 * @flag: return value
597 *
598 * Each device might have its preferred reset type: fundamental or
599 * hot reset. The routine is used to collect the information from
600 * the child devices so that they could be reset accordingly.
601 */
602void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
603{
604 struct device_node *dn;
605
606 for_each_child_of_node(parent, dn) {
607 if (of_node_to_eeh_dev(dn)) {
608 struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
609
610 if (dev && dev->driver)
611 *freset |= dev->needs_freset;
612
613 __eeh_set_pe_freset(dn, freset);
614 }
615 }
616}
617
618/**
619 * eeh_set_pe_freset - Check the required reset for the indicated device and its children
620 * @dn: parent device
621 * @freset: return value
622 * 494 *
623 * Each device might have its preferred reset type: fundamental or 495 * Each device might have its preferred reset type: fundamental or
624 * hot reset. The routine is used to collected the information for 496 * hot reset. The routine is used to collected the information for
625 * the indicated device and its children so that the bunch of the 497 * the indicated device and its children so that the bunch of the
626 * devices could be reset properly. 498 * devices could be reset properly.
627 */ 499 */
628void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset) 500static void *eeh_set_dev_freset(void *data, void *flag)
629{ 501{
630 struct pci_dev *dev; 502 struct pci_dev *dev;
631 dn = eeh_find_device_pe(dn); 503 unsigned int *freset = (unsigned int *)flag;
632 504 struct eeh_dev *edev = (struct eeh_dev *)data;
633 /* Back up one, since config addrs might be shared */
634 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
635 dn = dn->parent;
636 505
637 dev = of_node_to_eeh_dev(dn)->pdev; 506 dev = eeh_dev_to_pci_dev(edev);
638 if (dev) 507 if (dev)
639 *freset |= dev->needs_freset; 508 *freset |= dev->needs_freset;
640 509
641 __eeh_set_pe_freset(dn, freset); 510 return NULL;
642} 511}
643 512
644/** 513/**
645 * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second 514 * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
646 * @edev: pci device node to be reset. 515 * @pe: EEH PE
647 * 516 *
648 * Assert the PCI #RST line for 1/4 second. 517 * Assert the PCI #RST line for 1/4 second.
649 */ 518 */
650static void eeh_reset_pe_once(struct eeh_dev *edev) 519static void eeh_reset_pe_once(struct eeh_pe *pe)
651{ 520{
652 unsigned int freset = 0; 521 unsigned int freset = 0;
653 struct device_node *dn = eeh_dev_to_of_node(edev);
654 522
655 /* Determine type of EEH reset required for 523 /* Determine type of EEH reset required for
656 * Partitionable Endpoint, a hot-reset (1) 524 * Partitionable Endpoint, a hot-reset (1)
@@ -658,12 +526,12 @@ static void eeh_reset_pe_once(struct eeh_dev *edev)
658 * A fundamental reset required by any device under 526 * A fundamental reset required by any device under
659 * Partitionable Endpoint trumps hot-reset. 527 * Partitionable Endpoint trumps hot-reset.
660 */ 528 */
661 eeh_set_pe_freset(dn, &freset); 529 eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
662 530
663 if (freset) 531 if (freset)
664 eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL); 532 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
665 else 533 else
666 eeh_ops->reset(dn, EEH_RESET_HOT); 534 eeh_ops->reset(pe, EEH_RESET_HOT);
667 535
668 /* The PCI bus requires that the reset be held high for at least 536 /* The PCI bus requires that the reset be held high for at least
669 * a 100 milliseconds. We wait a bit longer 'just in case'. 537 * a 100 milliseconds. We wait a bit longer 'just in case'.
@@ -675,9 +543,9 @@ static void eeh_reset_pe_once(struct eeh_dev *edev)
675 * pci slot reset line is dropped. Make sure we don't miss 543 * pci slot reset line is dropped. Make sure we don't miss
676 * these, and clear the flag now. 544 * these, and clear the flag now.
677 */ 545 */
678 eeh_clear_slot(dn, EEH_MODE_ISOLATED); 546 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
679 547
680 eeh_ops->reset(dn, EEH_RESET_DEACTIVATE); 548 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
681 549
682 /* After a PCI slot has been reset, the PCI Express spec requires 550 /* After a PCI slot has been reset, the PCI Express spec requires
683 * a 1.5 second idle time for the bus to stabilize, before starting 551 * a 1.5 second idle time for the bus to stabilize, before starting
@@ -689,116 +557,36 @@ static void eeh_reset_pe_once(struct eeh_dev *edev)
689 557
690/** 558/**
691 * eeh_reset_pe - Reset the indicated PE 559 * eeh_reset_pe - Reset the indicated PE
692 * @edev: PCI device associated EEH device 560 * @pe: EEH PE
693 * 561 *
694 * This routine should be called to reset indicated device, including 562 * This routine should be called to reset indicated device, including
695 * PE. A PE might include multiple PCI devices and sometimes PCI bridges 563 * PE. A PE might include multiple PCI devices and sometimes PCI bridges
696 * might be involved as well. 564 * might be involved as well.
697 */ 565 */
698int eeh_reset_pe(struct eeh_dev *edev) 566int eeh_reset_pe(struct eeh_pe *pe)
699{ 567{
700 int i, rc; 568 int i, rc;
701 struct device_node *dn = eeh_dev_to_of_node(edev);
702 569
703 /* Take three shots at resetting the bus */ 570 /* Take three shots at resetting the bus */
704 for (i=0; i<3; i++) { 571 for (i=0; i<3; i++) {
705 eeh_reset_pe_once(edev); 572 eeh_reset_pe_once(pe);
706 573
707 rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC); 574 rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
708 if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) 575 if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
709 return 0; 576 return 0;
710 577
711 if (rc < 0) { 578 if (rc < 0) {
712 printk(KERN_ERR "EEH: unrecoverable slot failure %s\n", 579 pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
713 dn->full_name); 580 __func__, pe->phb->global_number, pe->addr);
714 return -1; 581 return -1;
715 } 582 }
716 printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n", 583 pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n",
717 i+1, dn->full_name, rc); 584 i+1, pe->phb->global_number, pe->addr, rc);
718 } 585 }
719 586
720 return -1; 587 return -1;
721} 588}
722 589
723/** Save and restore of PCI BARs
724 *
725 * Although firmware will set up BARs during boot, it doesn't
726 * set up device BAR's after a device reset, although it will,
727 * if requested, set up bridge configuration. Thus, we need to
728 * configure the PCI devices ourselves.
729 */
730
731/**
732 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
733 * @edev: PCI device associated EEH device
734 *
735 * Loads the PCI configuration space base address registers,
736 * the expansion ROM base address, the latency timer, and etc.
737 * from the saved values in the device node.
738 */
739static inline void eeh_restore_one_device_bars(struct eeh_dev *edev)
740{
741 int i;
742 u32 cmd;
743 struct device_node *dn = eeh_dev_to_of_node(edev);
744
745 if (!edev->phb)
746 return;
747
748 for (i=4; i<10; i++) {
749 eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
750 }
751
752 /* 12 == Expansion ROM Address */
753 eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
754
755#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
756#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
757
758 eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
759 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
760
761 eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
762 SAVED_BYTE(PCI_LATENCY_TIMER));
763
764 /* max latency, min grant, interrupt pin and line */
765 eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
766
767 /* Restore PERR & SERR bits, some devices require it,
768 * don't touch the other command bits
769 */
770 eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
771 if (edev->config_space[1] & PCI_COMMAND_PARITY)
772 cmd |= PCI_COMMAND_PARITY;
773 else
774 cmd &= ~PCI_COMMAND_PARITY;
775 if (edev->config_space[1] & PCI_COMMAND_SERR)
776 cmd |= PCI_COMMAND_SERR;
777 else
778 cmd &= ~PCI_COMMAND_SERR;
779 eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
780}
781
782/**
783 * eeh_restore_bars - Restore the PCI config space info
784 * @edev: EEH device
785 *
786 * This routine performs a recursive walk to the children
787 * of this device as well.
788 */
789void eeh_restore_bars(struct eeh_dev *edev)
790{
791 struct device_node *dn;
792 if (!edev)
793 return;
794
795 if ((edev->mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(edev->class_code))
796 eeh_restore_one_device_bars(edev);
797
798 for_each_child_of_node(eeh_dev_to_of_node(edev), dn)
799 eeh_restore_bars(of_node_to_eeh_dev(dn));
800}
801
802/** 590/**
803 * eeh_save_bars - Save device bars 591 * eeh_save_bars - Save device bars
804 * @edev: PCI device associated EEH device 592 * @edev: PCI device associated EEH device
@@ -808,7 +596,7 @@ void eeh_restore_bars(struct eeh_dev *edev)
808 * PCI devices are added individually; but, for the restore, 596 * PCI devices are added individually; but, for the restore,
809 * an entire slot is reset at a time. 597 * an entire slot is reset at a time.
810 */ 598 */
811static void eeh_save_bars(struct eeh_dev *edev) 599void eeh_save_bars(struct eeh_dev *edev)
812{ 600{
813 int i; 601 int i;
814 struct device_node *dn; 602 struct device_node *dn;
@@ -822,102 +610,6 @@ static void eeh_save_bars(struct eeh_dev *edev)
822} 610}
823 611
824/** 612/**
825 * eeh_early_enable - Early enable EEH on the indicated device
826 * @dn: device node
827 * @data: BUID
828 *
829 * Enable EEH functionality on the specified PCI device. The function
830 * is expected to be called before real PCI probing is done. However,
831 * the PHBs have been initialized at this point.
832 */
833static void *eeh_early_enable(struct device_node *dn, void *data)
834{
835 int ret;
836 const u32 *class_code = of_get_property(dn, "class-code", NULL);
837 const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL);
838 const u32 *device_id = of_get_property(dn, "device-id", NULL);
839 const u32 *regs;
840 int enable;
841 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
842
843 edev->class_code = 0;
844 edev->mode = 0;
845 edev->check_count = 0;
846 edev->freeze_count = 0;
847 edev->false_positives = 0;
848
849 if (!of_device_is_available(dn))
850 return NULL;
851
852 /* Ignore bad nodes. */
853 if (!class_code || !vendor_id || !device_id)
854 return NULL;
855
856 /* There is nothing to check on PCI to ISA bridges */
857 if (dn->type && !strcmp(dn->type, "isa")) {
858 edev->mode |= EEH_MODE_NOCHECK;
859 return NULL;
860 }
861 edev->class_code = *class_code;
862
863 /* Ok... see if this device supports EEH. Some do, some don't,
864 * and the only way to find out is to check each and every one.
865 */
866 regs = of_get_property(dn, "reg", NULL);
867 if (regs) {
868 /* First register entry is addr (00BBSS00) */
869 /* Try to enable eeh */
870 ret = eeh_ops->set_option(dn, EEH_OPT_ENABLE);
871
872 enable = 0;
873 if (ret == 0) {
874 edev->config_addr = regs[0];
875
876 /* If the newer, better, ibm,get-config-addr-info is supported,
877 * then use that instead.
878 */
879 edev->pe_config_addr = eeh_ops->get_pe_addr(dn);
880
881 /* Some older systems (Power4) allow the
882 * ibm,set-eeh-option call to succeed even on nodes
883 * where EEH is not supported. Verify support
884 * explicitly.
885 */
886 ret = eeh_ops->get_state(dn, NULL);
887 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
888 enable = 1;
889 }
890
891 if (enable) {
892 eeh_subsystem_enabled = 1;
893 edev->mode |= EEH_MODE_SUPPORTED;
894
895 pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
896 dn->full_name, edev->config_addr,
897 edev->pe_config_addr);
898 } else {
899
900 /* This device doesn't support EEH, but it may have an
901 * EEH parent, in which case we mark it as supported.
902 */
903 if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
904 (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
905 /* Parent supports EEH. */
906 edev->mode |= EEH_MODE_SUPPORTED;
907 edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
908 return NULL;
909 }
910 }
911 } else {
912 printk(KERN_WARNING "EEH: %s: unable to get reg property.\n",
913 dn->full_name);
914 }
915
916 eeh_save_bars(edev);
917 return NULL;
918}
919
920/**
921 * eeh_ops_register - Register platform dependent EEH operations 613 * eeh_ops_register - Register platform dependent EEH operations
922 * @ops: platform dependent EEH operations 614 * @ops: platform dependent EEH operations
923 * 615 *
@@ -982,7 +674,7 @@ int __exit eeh_ops_unregister(const char *name)
982 * Even if force-off is set, the EEH hardware is still enabled, so that 674 * Even if force-off is set, the EEH hardware is still enabled, so that
983 * newer systems can boot. 675 * newer systems can boot.
984 */ 676 */
985void __init eeh_init(void) 677static int __init eeh_init(void)
986{ 678{
987 struct pci_controller *hose, *tmp; 679 struct pci_controller *hose, *tmp;
988 struct device_node *phb; 680 struct device_node *phb;
@@ -992,27 +684,34 @@ void __init eeh_init(void)
992 if (!eeh_ops) { 684 if (!eeh_ops) {
993 pr_warning("%s: Platform EEH operation not found\n", 685 pr_warning("%s: Platform EEH operation not found\n",
994 __func__); 686 __func__);
995 return; 687 return -EEXIST;
996 } else if ((ret = eeh_ops->init())) { 688 } else if ((ret = eeh_ops->init())) {
997 pr_warning("%s: Failed to call platform init function (%d)\n", 689 pr_warning("%s: Failed to call platform init function (%d)\n",
998 __func__, ret); 690 __func__, ret);
999 return; 691 return ret;
1000 } 692 }
1001 693
1002 raw_spin_lock_init(&confirm_error_lock); 694 raw_spin_lock_init(&confirm_error_lock);
1003 695
1004 /* Enable EEH for all adapters */ 696 /* Enable EEH for all adapters */
1005 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 697 if (eeh_probe_mode_devtree()) {
1006 phb = hose->dn; 698 list_for_each_entry_safe(hose, tmp,
1007 traverse_pci_devices(phb, eeh_early_enable, NULL); 699 &hose_list, list_node) {
700 phb = hose->dn;
701 traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
702 }
1008 } 703 }
1009 704
1010 if (eeh_subsystem_enabled) 705 if (eeh_subsystem_enabled)
1011 printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n"); 706 pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
1012 else 707 else
1013 printk(KERN_WARNING "EEH: No capable adapters found\n"); 708 pr_warning("EEH: No capable adapters found\n");
709
710 return ret;
1014} 711}
1015 712
713core_initcall_sync(eeh_init);
714
1016/** 715/**
1017 * eeh_add_device_early - Enable EEH for the indicated device_node 716 * eeh_add_device_early - Enable EEH for the indicated device_node
1018 * @dn: device node for which to set up EEH 717 * @dn: device node for which to set up EEH
@@ -1029,7 +728,7 @@ static void eeh_add_device_early(struct device_node *dn)
1029{ 728{
1030 struct pci_controller *phb; 729 struct pci_controller *phb;
1031 730
1032 if (!dn || !of_node_to_eeh_dev(dn)) 731 if (!of_node_to_eeh_dev(dn))
1033 return; 732 return;
1034 phb = of_node_to_eeh_dev(dn)->phb; 733 phb = of_node_to_eeh_dev(dn)->phb;
1035 734
@@ -1037,7 +736,8 @@ static void eeh_add_device_early(struct device_node *dn)
1037 if (NULL == phb || 0 == phb->buid) 736 if (NULL == phb || 0 == phb->buid)
1038 return; 737 return;
1039 738
1040 eeh_early_enable(dn, NULL); 739 /* FIXME: hotplug support on POWERNV */
740 eeh_ops->of_probe(dn, NULL);
1041} 741}
1042 742
1043/** 743/**
@@ -1087,7 +787,7 @@ static void eeh_add_device_late(struct pci_dev *dev)
1087 edev->pdev = dev; 787 edev->pdev = dev;
1088 dev->dev.archdata.edev = edev; 788 dev->dev.archdata.edev = edev;
1089 789
1090 pci_addr_cache_insert_device(dev); 790 eeh_addr_cache_insert_dev(dev);
1091 eeh_sysfs_add_device(dev); 791 eeh_sysfs_add_device(dev);
1092} 792}
1093 793
@@ -1117,6 +817,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
1117/** 817/**
1118 * eeh_remove_device - Undo EEH setup for the indicated pci device 818 * eeh_remove_device - Undo EEH setup for the indicated pci device
1119 * @dev: pci device to be removed 819 * @dev: pci device to be removed
820 * @purge_pe: remove the PE or not
1120 * 821 *
1121 * This routine should be called when a device is removed from 822 * This routine should be called when a device is removed from
1122 * a running system (e.g. by hotplug or dlpar). It unregisters 823 * a running system (e.g. by hotplug or dlpar). It unregisters
@@ -1124,7 +825,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
1124 * this device will no longer be detected after this call; thus, 825 * this device will no longer be detected after this call; thus,
1125 * i/o errors affecting this slot may leave this device unusable. 826 * i/o errors affecting this slot may leave this device unusable.
1126 */ 827 */
1127static void eeh_remove_device(struct pci_dev *dev) 828static void eeh_remove_device(struct pci_dev *dev, int purge_pe)
1128{ 829{
1129 struct eeh_dev *edev; 830 struct eeh_dev *edev;
1130 831
@@ -1143,28 +844,30 @@ static void eeh_remove_device(struct pci_dev *dev)
1143 dev->dev.archdata.edev = NULL; 844 dev->dev.archdata.edev = NULL;
1144 pci_dev_put(dev); 845 pci_dev_put(dev);
1145 846
1146 pci_addr_cache_remove_device(dev); 847 eeh_rmv_from_parent_pe(edev, purge_pe);
848 eeh_addr_cache_rmv_dev(dev);
1147 eeh_sysfs_remove_device(dev); 849 eeh_sysfs_remove_device(dev);
1148} 850}
1149 851
1150/** 852/**
1151 * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device 853 * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
1152 * @dev: PCI device 854 * @dev: PCI device
855 * @purge_pe: remove the corresponding PE or not
1153 * 856 *
1154 * This routine must be called when a device is removed from the 857 * This routine must be called when a device is removed from the
1155 * running system through hotplug or dlpar. The corresponding 858 * running system through hotplug or dlpar. The corresponding
1156 * PCI address cache will be removed. 859 * PCI address cache will be removed.
1157 */ 860 */
1158void eeh_remove_bus_device(struct pci_dev *dev) 861void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe)
1159{ 862{
1160 struct pci_bus *bus = dev->subordinate; 863 struct pci_bus *bus = dev->subordinate;
1161 struct pci_dev *child, *tmp; 864 struct pci_dev *child, *tmp;
1162 865
1163 eeh_remove_device(dev); 866 eeh_remove_device(dev, purge_pe);
1164 867
1165 if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 868 if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1166 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list) 869 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
1167 eeh_remove_bus_device(child); 870 eeh_remove_bus_device(child, purge_pe);
1168 } 871 }
1169} 872}
1170EXPORT_SYMBOL_GPL(eeh_remove_bus_device); 873EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index e5ae1c687c66..5a4c87903057 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -50,6 +50,7 @@ struct pci_io_addr_range {
50 struct rb_node rb_node; 50 struct rb_node rb_node;
51 unsigned long addr_lo; 51 unsigned long addr_lo;
52 unsigned long addr_hi; 52 unsigned long addr_hi;
53 struct eeh_dev *edev;
53 struct pci_dev *pcidev; 54 struct pci_dev *pcidev;
54 unsigned int flags; 55 unsigned int flags;
55}; 56};
@@ -59,7 +60,7 @@ static struct pci_io_addr_cache {
59 spinlock_t piar_lock; 60 spinlock_t piar_lock;
60} pci_io_addr_cache_root; 61} pci_io_addr_cache_root;
61 62
62static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr) 63static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
63{ 64{
64 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node; 65 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
65 66
@@ -74,7 +75,7 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
74 n = n->rb_right; 75 n = n->rb_right;
75 } else { 76 } else {
76 pci_dev_get(piar->pcidev); 77 pci_dev_get(piar->pcidev);
77 return piar->pcidev; 78 return piar->edev;
78 } 79 }
79 } 80 }
80 } 81 }
@@ -83,7 +84,7 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
83} 84}
84 85
85/** 86/**
86 * pci_addr_cache_get_device - Get device, given only address 87 * eeh_addr_cache_get_dev - Get device, given only address
87 * @addr: mmio (PIO) phys address or i/o port number 88 * @addr: mmio (PIO) phys address or i/o port number
88 * 89 *
89 * Given an mmio phys address, or a port number, find a pci device 90 * Given an mmio phys address, or a port number, find a pci device
@@ -92,15 +93,15 @@ static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
92 * from zero (that is, they do *not* have pci_io_addr added in). 93 * from zero (that is, they do *not* have pci_io_addr added in).
93 * It is safe to call this function within an interrupt. 94 * It is safe to call this function within an interrupt.
94 */ 95 */
95struct pci_dev *pci_addr_cache_get_device(unsigned long addr) 96struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr)
96{ 97{
97 struct pci_dev *dev; 98 struct eeh_dev *edev;
98 unsigned long flags; 99 unsigned long flags;
99 100
100 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); 101 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
101 dev = __pci_addr_cache_get_device(addr); 102 edev = __eeh_addr_cache_get_device(addr);
102 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); 103 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
103 return dev; 104 return edev;
104} 105}
105 106
106#ifdef DEBUG 107#ifdef DEBUG
@@ -108,7 +109,7 @@ struct pci_dev *pci_addr_cache_get_device(unsigned long addr)
108 * Handy-dandy debug print routine, does nothing more 109 * Handy-dandy debug print routine, does nothing more
109 * than print out the contents of our addr cache. 110 * than print out the contents of our addr cache.
110 */ 111 */
111static void pci_addr_cache_print(struct pci_io_addr_cache *cache) 112static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
112{ 113{
113 struct rb_node *n; 114 struct rb_node *n;
114 int cnt = 0; 115 int cnt = 0;
@@ -117,7 +118,7 @@ static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
117 while (n) { 118 while (n) {
118 struct pci_io_addr_range *piar; 119 struct pci_io_addr_range *piar;
119 piar = rb_entry(n, struct pci_io_addr_range, rb_node); 120 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
120 printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n", 121 pr_debug("PCI: %s addr range %d [%lx-%lx]: %s\n",
121 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt, 122 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
122 piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev)); 123 piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
123 cnt++; 124 cnt++;
@@ -128,7 +129,7 @@ static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
128 129
129/* Insert address range into the rb tree. */ 130/* Insert address range into the rb tree. */
130static struct pci_io_addr_range * 131static struct pci_io_addr_range *
131pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo, 132eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
132 unsigned long ahi, unsigned int flags) 133 unsigned long ahi, unsigned int flags)
133{ 134{
134 struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node; 135 struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
@@ -146,23 +147,24 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
146 } else { 147 } else {
147 if (dev != piar->pcidev || 148 if (dev != piar->pcidev ||
148 alo != piar->addr_lo || ahi != piar->addr_hi) { 149 alo != piar->addr_lo || ahi != piar->addr_hi) {
149 printk(KERN_WARNING "PIAR: overlapping address range\n"); 150 pr_warning("PIAR: overlapping address range\n");
150 } 151 }
151 return piar; 152 return piar;
152 } 153 }
153 } 154 }
154 piar = kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); 155 piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
155 if (!piar) 156 if (!piar)
156 return NULL; 157 return NULL;
157 158
158 pci_dev_get(dev); 159 pci_dev_get(dev);
159 piar->addr_lo = alo; 160 piar->addr_lo = alo;
160 piar->addr_hi = ahi; 161 piar->addr_hi = ahi;
162 piar->edev = pci_dev_to_eeh_dev(dev);
161 piar->pcidev = dev; 163 piar->pcidev = dev;
162 piar->flags = flags; 164 piar->flags = flags;
163 165
164#ifdef DEBUG 166#ifdef DEBUG
165 printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n", 167 pr_debug("PIAR: insert range=[%lx:%lx] dev=%s\n",
166 alo, ahi, pci_name(dev)); 168 alo, ahi, pci_name(dev));
167#endif 169#endif
168 170
@@ -172,7 +174,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
172 return piar; 174 return piar;
173} 175}
174 176
175static void __pci_addr_cache_insert_device(struct pci_dev *dev) 177static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
176{ 178{
177 struct device_node *dn; 179 struct device_node *dn;
178 struct eeh_dev *edev; 180 struct eeh_dev *edev;
@@ -180,7 +182,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
180 182
181 dn = pci_device_to_OF_node(dev); 183 dn = pci_device_to_OF_node(dev);
182 if (!dn) { 184 if (!dn) {
183 printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev)); 185 pr_warning("PCI: no pci dn found for dev=%s\n", pci_name(dev));
184 return; 186 return;
185 } 187 }
186 188
@@ -192,8 +194,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
192 } 194 }
193 195
194 /* Skip any devices for which EEH is not enabled. */ 196 /* Skip any devices for which EEH is not enabled. */
195 if (!(edev->mode & EEH_MODE_SUPPORTED) || 197 if (!edev->pe) {
196 edev->mode & EEH_MODE_NOCHECK) {
197#ifdef DEBUG 198#ifdef DEBUG
198 pr_info("PCI: skip building address cache for=%s - %s\n", 199 pr_info("PCI: skip building address cache for=%s - %s\n",
199 pci_name(dev), dn->full_name); 200 pci_name(dev), dn->full_name);
@@ -212,19 +213,19 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
212 continue; 213 continue;
213 if (start == 0 || ~start == 0 || end == 0 || ~end == 0) 214 if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
214 continue; 215 continue;
215 pci_addr_cache_insert(dev, start, end, flags); 216 eeh_addr_cache_insert(dev, start, end, flags);
216 } 217 }
217} 218}
218 219
219/** 220/**
220 * pci_addr_cache_insert_device - Add a device to the address cache 221 * eeh_addr_cache_insert_dev - Add a device to the address cache
221 * @dev: PCI device whose I/O addresses we are interested in. 222 * @dev: PCI device whose I/O addresses we are interested in.
222 * 223 *
223 * In order to support the fast lookup of devices based on addresses, 224 * In order to support the fast lookup of devices based on addresses,
224 * we maintain a cache of devices that can be quickly searched. 225 * we maintain a cache of devices that can be quickly searched.
225 * This routine adds a device to that cache. 226 * This routine adds a device to that cache.
226 */ 227 */
227void pci_addr_cache_insert_device(struct pci_dev *dev) 228void eeh_addr_cache_insert_dev(struct pci_dev *dev)
228{ 229{
229 unsigned long flags; 230 unsigned long flags;
230 231
@@ -233,11 +234,11 @@ void pci_addr_cache_insert_device(struct pci_dev *dev)
233 return; 234 return;
234 235
235 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); 236 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
236 __pci_addr_cache_insert_device(dev); 237 __eeh_addr_cache_insert_dev(dev);
237 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); 238 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
238} 239}
239 240
240static inline void __pci_addr_cache_remove_device(struct pci_dev *dev) 241static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev)
241{ 242{
242 struct rb_node *n; 243 struct rb_node *n;
243 244
@@ -258,7 +259,7 @@ restart:
258} 259}
259 260
260/** 261/**
261 * pci_addr_cache_remove_device - remove pci device from addr cache 262 * eeh_addr_cache_rmv_dev - remove pci device from addr cache
262 * @dev: device to remove 263 * @dev: device to remove
263 * 264 *
264 * Remove a device from the addr-cache tree. 265 * Remove a device from the addr-cache tree.
@@ -266,17 +267,17 @@ restart:
266 * the tree multiple times (once per resource). 267 * the tree multiple times (once per resource).
267 * But so what; device removal doesn't need to be that fast. 268 * But so what; device removal doesn't need to be that fast.
268 */ 269 */
269void pci_addr_cache_remove_device(struct pci_dev *dev) 270void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
270{ 271{
271 unsigned long flags; 272 unsigned long flags;
272 273
273 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); 274 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
274 __pci_addr_cache_remove_device(dev); 275 __eeh_addr_cache_rmv_dev(dev);
275 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); 276 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
276} 277}
277 278
278/** 279/**
279 * pci_addr_cache_build - Build a cache of I/O addresses 280 * eeh_addr_cache_build - Build a cache of I/O addresses
280 * 281 *
281 * Build a cache of pci i/o addresses. This cache will be used to 282 * Build a cache of pci i/o addresses. This cache will be used to
282 * find the pci device that corresponds to a given address. 283 * find the pci device that corresponds to a given address.
@@ -284,7 +285,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
284 * Must be run late in boot process, after the pci controllers 285 * Must be run late in boot process, after the pci controllers
285 * have been scanned for devices (after all device resources are known). 286 * have been scanned for devices (after all device resources are known).
286 */ 287 */
287void __init pci_addr_cache_build(void) 288void __init eeh_addr_cache_build(void)
288{ 289{
289 struct device_node *dn; 290 struct device_node *dn;
290 struct eeh_dev *edev; 291 struct eeh_dev *edev;
@@ -293,7 +294,7 @@ void __init pci_addr_cache_build(void)
293 spin_lock_init(&pci_io_addr_cache_root.piar_lock); 294 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
294 295
295 for_each_pci_dev(dev) { 296 for_each_pci_dev(dev) {
296 pci_addr_cache_insert_device(dev); 297 eeh_addr_cache_insert_dev(dev);
297 298
298 dn = pci_device_to_OF_node(dev); 299 dn = pci_device_to_OF_node(dev);
299 if (!dn) 300 if (!dn)
@@ -312,7 +313,7 @@ void __init pci_addr_cache_build(void)
312 313
313#ifdef DEBUG 314#ifdef DEBUG
314 /* Verify tree built up above, echo back the list of addrs. */ 315 /* Verify tree built up above, echo back the list of addrs. */
315 pci_addr_cache_print(&pci_io_addr_cache_root); 316 eeh_addr_cache_print(&pci_io_addr_cache_root);
316#endif 317#endif
317} 318}
318 319
diff --git a/arch/powerpc/platforms/pseries/eeh_dev.c b/arch/powerpc/platforms/pseries/eeh_dev.c
index c4507d095900..66442341d3a6 100644
--- a/arch/powerpc/platforms/pseries/eeh_dev.c
+++ b/arch/powerpc/platforms/pseries/eeh_dev.c
@@ -55,7 +55,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data)
55 struct eeh_dev *edev; 55 struct eeh_dev *edev;
56 56
57 /* Allocate EEH device */ 57 /* Allocate EEH device */
58 edev = zalloc_maybe_bootmem(sizeof(*edev), GFP_KERNEL); 58 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
59 if (!edev) { 59 if (!edev) {
60 pr_warning("%s: out of memory\n", __func__); 60 pr_warning("%s: out of memory\n", __func__);
61 return NULL; 61 return NULL;
@@ -65,6 +65,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data)
65 PCI_DN(dn)->edev = edev; 65 PCI_DN(dn)->edev = edev;
66 edev->dn = dn; 66 edev->dn = dn;
67 edev->phb = phb; 67 edev->phb = phb;
68 INIT_LIST_HEAD(&edev->list);
68 69
69 return NULL; 70 return NULL;
70} 71}
@@ -80,6 +81,9 @@ void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb)
80{ 81{
81 struct device_node *dn = phb->dn; 82 struct device_node *dn = phb->dn;
82 83
84 /* EEH PE for PHB */
85 eeh_phb_pe_create(phb);
86
83 /* EEH device for PHB */ 87 /* EEH device for PHB */
84 eeh_dev_init(dn, phb); 88 eeh_dev_init(dn, phb);
85 89
@@ -93,10 +97,16 @@ void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb)
93 * Scan all the existing PHBs and create EEH devices for their OF 97 * Scan all the existing PHBs and create EEH devices for their OF
94 * nodes and their children OF nodes 98 * nodes and their children OF nodes
95 */ 99 */
96void __init eeh_dev_phb_init(void) 100static int __init eeh_dev_phb_init(void)
97{ 101{
98 struct pci_controller *phb, *tmp; 102 struct pci_controller *phb, *tmp;
99 103
100 list_for_each_entry_safe(phb, tmp, &hose_list, list_node) 104 list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
101 eeh_dev_phb_init_dynamic(phb); 105 eeh_dev_phb_init_dynamic(phb);
106
107 pr_info("EEH: devices created\n");
108
109 return 0;
102} 110}
111
112core_initcall(eeh_dev_phb_init);
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index baf92cd9dfab..a3fefb61097c 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -25,6 +25,7 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/module.h>
28#include <linux/pci.h> 29#include <linux/pci.h>
29#include <asm/eeh.h> 30#include <asm/eeh.h>
30#include <asm/eeh_event.h> 31#include <asm/eeh_event.h>
@@ -47,6 +48,41 @@ static inline const char *eeh_pcid_name(struct pci_dev *pdev)
47 return ""; 48 return "";
48} 49}
49 50
51/**
52 * eeh_pcid_get - Get the PCI device driver
53 * @pdev: PCI device
54 *
55 * The function is used to retrieve the PCI device driver for
56 * the indicated PCI device. Besides, we will increase the reference
57 * of the PCI device driver to prevent that being unloaded on
58 * the fly. Otherwise, kernel crash would be seen.
59 */
60static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
61{
62 if (!pdev || !pdev->driver)
63 return NULL;
64
65 if (!try_module_get(pdev->driver->driver.owner))
66 return NULL;
67
68 return pdev->driver;
69}
70
71/**
72 * eeh_pcid_put - Dereference on the PCI device driver
73 * @pdev: PCI device
74 *
75 * The function is called to do dereference on the PCI device
76 * driver of the indicated PCI device.
77 */
78static inline void eeh_pcid_put(struct pci_dev *pdev)
79{
80 if (!pdev || !pdev->driver)
81 return;
82
83 module_put(pdev->driver->driver.owner);
84}
85
50#if 0 86#if 0
51static void print_device_node_tree(struct pci_dn *pdn, int dent) 87static void print_device_node_tree(struct pci_dn *pdn, int dent)
52{ 88{
@@ -93,7 +129,7 @@ static void eeh_disable_irq(struct pci_dev *dev)
93 if (!irq_has_action(dev->irq)) 129 if (!irq_has_action(dev->irq))
94 return; 130 return;
95 131
96 edev->mode |= EEH_MODE_IRQ_DISABLED; 132 edev->mode |= EEH_DEV_IRQ_DISABLED;
97 disable_irq_nosync(dev->irq); 133 disable_irq_nosync(dev->irq);
98} 134}
99 135
@@ -108,36 +144,44 @@ static void eeh_enable_irq(struct pci_dev *dev)
108{ 144{
109 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
110 146
111 if ((edev->mode) & EEH_MODE_IRQ_DISABLED) { 147 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
112 edev->mode &= ~EEH_MODE_IRQ_DISABLED; 148 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
113 enable_irq(dev->irq); 149 enable_irq(dev->irq);
114 } 150 }
115} 151}
116 152
117/** 153/**
118 * eeh_report_error - Report pci error to each device driver 154 * eeh_report_error - Report pci error to each device driver
119 * @dev: PCI device 155 * @data: eeh device
120 * @userdata: return value 156 * @userdata: return value
121 * 157 *
122 * Report an EEH error to each device driver, collect up and 158 * Report an EEH error to each device driver, collect up and
123 * merge the device driver responses. Cumulative response 159 * merge the device driver responses. Cumulative response
124 * passed back in "userdata". 160 * passed back in "userdata".
125 */ 161 */
126static int eeh_report_error(struct pci_dev *dev, void *userdata) 162static void *eeh_report_error(void *data, void *userdata)
127{ 163{
164 struct eeh_dev *edev = (struct eeh_dev *)data;
165 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
128 enum pci_ers_result rc, *res = userdata; 166 enum pci_ers_result rc, *res = userdata;
129 struct pci_driver *driver = dev->driver; 167 struct pci_driver *driver;
130 168
169 /* We might not have the associated PCI device,
170 * then we should continue for next one.
171 */
172 if (!dev) return NULL;
131 dev->error_state = pci_channel_io_frozen; 173 dev->error_state = pci_channel_io_frozen;
132 174
133 if (!driver) 175 driver = eeh_pcid_get(dev);
134 return 0; 176 if (!driver) return NULL;
135 177
136 eeh_disable_irq(dev); 178 eeh_disable_irq(dev);
137 179
138 if (!driver->err_handler || 180 if (!driver->err_handler ||
139 !driver->err_handler->error_detected) 181 !driver->err_handler->error_detected) {
140 return 0; 182 eeh_pcid_put(dev);
183 return NULL;
184 }
141 185
142 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); 186 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
143 187
@@ -145,27 +189,34 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
145 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 189 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
146 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 190 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
147 191
148 return 0; 192 eeh_pcid_put(dev);
193 return NULL;
149} 194}
150 195
151/** 196/**
152 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled 197 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
153 * @dev: PCI device 198 * @data: eeh device
154 * @userdata: return value 199 * @userdata: return value
155 * 200 *
156 * Tells each device driver that IO ports, MMIO and config space I/O 201 * Tells each device driver that IO ports, MMIO and config space I/O
157 * are now enabled. Collects up and merges the device driver responses. 202 * are now enabled. Collects up and merges the device driver responses.
158 * Cumulative response passed back in "userdata". 203 * Cumulative response passed back in "userdata".
159 */ 204 */
160static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) 205static void *eeh_report_mmio_enabled(void *data, void *userdata)
161{ 206{
207 struct eeh_dev *edev = (struct eeh_dev *)data;
208 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
162 enum pci_ers_result rc, *res = userdata; 209 enum pci_ers_result rc, *res = userdata;
163 struct pci_driver *driver = dev->driver; 210 struct pci_driver *driver;
164 211
165 if (!driver || 212 driver = eeh_pcid_get(dev);
166 !driver->err_handler || 213 if (!driver) return NULL;
167 !driver->err_handler->mmio_enabled) 214
168 return 0; 215 if (!driver->err_handler ||
216 !driver->err_handler->mmio_enabled) {
217 eeh_pcid_put(dev);
218 return NULL;
219 }
169 220
170 rc = driver->err_handler->mmio_enabled(dev); 221 rc = driver->err_handler->mmio_enabled(dev);
171 222
@@ -173,12 +224,13 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
173 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 224 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
174 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 225 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
175 226
176 return 0; 227 eeh_pcid_put(dev);
228 return NULL;
177} 229}
178 230
179/** 231/**
180 * eeh_report_reset - Tell device that slot has been reset 232 * eeh_report_reset - Tell device that slot has been reset
181 * @dev: PCI device 233 * @data: eeh device
182 * @userdata: return value 234 * @userdata: return value
183 * 235 *
184 * This routine must be called while EEH tries to reset particular 236 * This routine must be called while EEH tries to reset particular
@@ -186,21 +238,26 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
186 * some actions, usually to save data the driver needs so that the 238 * some actions, usually to save data the driver needs so that the
187 * driver can work again while the device is recovered. 239 * driver can work again while the device is recovered.
188 */ 240 */
189static int eeh_report_reset(struct pci_dev *dev, void *userdata) 241static void *eeh_report_reset(void *data, void *userdata)
190{ 242{
243 struct eeh_dev *edev = (struct eeh_dev *)data;
244 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
191 enum pci_ers_result rc, *res = userdata; 245 enum pci_ers_result rc, *res = userdata;
192 struct pci_driver *driver = dev->driver; 246 struct pci_driver *driver;
193
194 if (!driver)
195 return 0;
196 247
248 if (!dev) return NULL;
197 dev->error_state = pci_channel_io_normal; 249 dev->error_state = pci_channel_io_normal;
198 250
251 driver = eeh_pcid_get(dev);
252 if (!driver) return NULL;
253
199 eeh_enable_irq(dev); 254 eeh_enable_irq(dev);
200 255
201 if (!driver->err_handler || 256 if (!driver->err_handler ||
202 !driver->err_handler->slot_reset) 257 !driver->err_handler->slot_reset) {
203 return 0; 258 eeh_pcid_put(dev);
259 return NULL;
260 }
204 261
205 rc = driver->err_handler->slot_reset(dev); 262 rc = driver->err_handler->slot_reset(dev);
206 if ((*res == PCI_ERS_RESULT_NONE) || 263 if ((*res == PCI_ERS_RESULT_NONE) ||
@@ -208,109 +265,115 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
208 if (*res == PCI_ERS_RESULT_DISCONNECT && 265 if (*res == PCI_ERS_RESULT_DISCONNECT &&
209 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 266 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
210 267
211 return 0; 268 eeh_pcid_put(dev);
269 return NULL;
212} 270}
213 271
214/** 272/**
215 * eeh_report_resume - Tell device to resume normal operations 273 * eeh_report_resume - Tell device to resume normal operations
216 * @dev: PCI device 274 * @data: eeh device
217 * @userdata: return value 275 * @userdata: return value
218 * 276 *
219 * This routine must be called to notify the device driver that it 277 * This routine must be called to notify the device driver that it
220 * could resume so that the device driver can do some initialization 278 * could resume so that the device driver can do some initialization
221 * to make the recovered device work again. 279 * to make the recovered device work again.
222 */ 280 */
223static int eeh_report_resume(struct pci_dev *dev, void *userdata) 281static void *eeh_report_resume(void *data, void *userdata)
224{ 282{
225 struct pci_driver *driver = dev->driver; 283 struct eeh_dev *edev = (struct eeh_dev *)data;
284 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
285 struct pci_driver *driver;
226 286
287 if (!dev) return NULL;
227 dev->error_state = pci_channel_io_normal; 288 dev->error_state = pci_channel_io_normal;
228 289
229 if (!driver) 290 driver = eeh_pcid_get(dev);
230 return 0; 291 if (!driver) return NULL;
231 292
232 eeh_enable_irq(dev); 293 eeh_enable_irq(dev);
233 294
234 if (!driver->err_handler || 295 if (!driver->err_handler ||
235 !driver->err_handler->resume) 296 !driver->err_handler->resume) {
236 return 0; 297 eeh_pcid_put(dev);
298 return NULL;
299 }
237 300
238 driver->err_handler->resume(dev); 301 driver->err_handler->resume(dev);
239 302
240 return 0; 303 eeh_pcid_put(dev);
304 return NULL;
241} 305}
242 306
243/** 307/**
244 * eeh_report_failure - Tell device driver that device is dead. 308 * eeh_report_failure - Tell device driver that device is dead.
245 * @dev: PCI device 309 * @data: eeh device
246 * @userdata: return value 310 * @userdata: return value
247 * 311 *
248 * This informs the device driver that the device is permanently 312 * This informs the device driver that the device is permanently
249 * dead, and that no further recovery attempts will be made on it. 313 * dead, and that no further recovery attempts will be made on it.
250 */ 314 */
251static int eeh_report_failure(struct pci_dev *dev, void *userdata) 315static void *eeh_report_failure(void *data, void *userdata)
252{ 316{
253 struct pci_driver *driver = dev->driver; 317 struct eeh_dev *edev = (struct eeh_dev *)data;
318 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
319 struct pci_driver *driver;
254 320
321 if (!dev) return NULL;
255 dev->error_state = pci_channel_io_perm_failure; 322 dev->error_state = pci_channel_io_perm_failure;
256 323
257 if (!driver) 324 driver = eeh_pcid_get(dev);
258 return 0; 325 if (!driver) return NULL;
259 326
260 eeh_disable_irq(dev); 327 eeh_disable_irq(dev);
261 328
262 if (!driver->err_handler || 329 if (!driver->err_handler ||
263 !driver->err_handler->error_detected) 330 !driver->err_handler->error_detected) {
264 return 0; 331 eeh_pcid_put(dev);
332 return NULL;
333 }
265 334
266 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); 335 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
267 336
268 return 0; 337 eeh_pcid_put(dev);
338 return NULL;
269} 339}
270 340
271/** 341/**
272 * eeh_reset_device - Perform actual reset of a pci slot 342 * eeh_reset_device - Perform actual reset of a pci slot
273 * @edev: PE associated EEH device 343 * @pe: EEH PE
274 * @bus: PCI bus corresponding to the isolcated slot 344 * @bus: PCI bus corresponding to the isolcated slot
275 * 345 *
276 * This routine must be called to do reset on the indicated PE. 346 * This routine must be called to do reset on the indicated PE.
277 * During the reset, udev might be invoked because those affected 347 * During the reset, udev might be invoked because those affected
278 * PCI devices will be removed and then added. 348 * PCI devices will be removed and then added.
279 */ 349 */
280static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus) 350static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
281{ 351{
282 struct device_node *dn;
283 int cnt, rc; 352 int cnt, rc;
284 353
285 /* pcibios will clear the counter; save the value */ 354 /* pcibios will clear the counter; save the value */
286 cnt = edev->freeze_count; 355 cnt = pe->freeze_count;
287 356
357 /*
358 * We don't remove the corresponding PE instances because
359 * we need the information afterwords. The attached EEH
360 * devices are expected to be attached soon when calling
361 * into pcibios_add_pci_devices().
362 */
288 if (bus) 363 if (bus)
289 pcibios_remove_pci_devices(bus); 364 __pcibios_remove_pci_devices(bus, 0);
290 365
291 /* Reset the pci controller. (Asserts RST#; resets config space). 366 /* Reset the pci controller. (Asserts RST#; resets config space).
292 * Reconfigure bridges and devices. Don't try to bring the system 367 * Reconfigure bridges and devices. Don't try to bring the system
293 * up if the reset failed for some reason. 368 * up if the reset failed for some reason.
294 */ 369 */
295 rc = eeh_reset_pe(edev); 370 rc = eeh_reset_pe(pe);
296 if (rc) 371 if (rc)
297 return rc; 372 return rc;
298 373
299 /* Walk over all functions on this device. */ 374 /* Restore PE */
300 dn = eeh_dev_to_of_node(edev); 375 eeh_ops->configure_bridge(pe);
301 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent)) 376 eeh_pe_restore_bars(pe);
302 dn = dn->parent->child;
303
304 while (dn) {
305 struct eeh_dev *pedev = of_node_to_eeh_dev(dn);
306
307 /* On Power4, always true because eeh_pe_config_addr=0 */
308 if (edev->pe_config_addr == pedev->pe_config_addr) {
309 eeh_ops->configure_bridge(dn);
310 eeh_restore_bars(pedev);
311 }
312 dn = dn->sibling;
313 }
314 377
315 /* Give the system 5 seconds to finish running the user-space 378 /* Give the system 5 seconds to finish running the user-space
316 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, 379 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
@@ -322,7 +385,7 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
322 ssleep(5); 385 ssleep(5);
323 pcibios_add_pci_devices(bus); 386 pcibios_add_pci_devices(bus);
324 } 387 }
325 edev->freeze_count = cnt; 388 pe->freeze_count = cnt;
326 389
327 return 0; 390 return 0;
328} 391}
@@ -334,7 +397,7 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
334 397
335/** 398/**
336 * eeh_handle_event - Reset a PCI device after hard lockup. 399 * eeh_handle_event - Reset a PCI device after hard lockup.
337 * @event: EEH event 400 * @pe: EEH PE
338 * 401 *
339 * While PHB detects address or data parity errors on particular PCI 402 * While PHB detects address or data parity errors on particular PCI
340 * slot, the associated PE will be frozen. Besides, DMA's occurring 403 * slot, the associated PE will be frozen. Besides, DMA's occurring
@@ -349,69 +412,24 @@ static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
349 * drivers (which cause a second set of hotplug events to go out to 412 * drivers (which cause a second set of hotplug events to go out to
350 * userspace). 413 * userspace).
351 */ 414 */
352struct eeh_dev *handle_eeh_events(struct eeh_event *event) 415void eeh_handle_event(struct eeh_pe *pe)
353{ 416{
354 struct device_node *frozen_dn;
355 struct eeh_dev *frozen_edev;
356 struct pci_bus *frozen_bus; 417 struct pci_bus *frozen_bus;
357 int rc = 0; 418 int rc = 0;
358 enum pci_ers_result result = PCI_ERS_RESULT_NONE; 419 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
359 const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str;
360
361 frozen_dn = eeh_find_device_pe(eeh_dev_to_of_node(event->edev));
362 if (!frozen_dn) {
363 location = of_get_property(eeh_dev_to_of_node(event->edev), "ibm,loc-code", NULL);
364 location = location ? location : "unknown";
365 printk(KERN_ERR "EEH: Error: Cannot find partition endpoint "
366 "for location=%s pci addr=%s\n",
367 location, eeh_pci_name(eeh_dev_to_pci_dev(event->edev)));
368 return NULL;
369 }
370
371 frozen_bus = pcibios_find_pci_bus(frozen_dn);
372 location = of_get_property(frozen_dn, "ibm,loc-code", NULL);
373 location = location ? location : "unknown";
374
375 /* There are two different styles for coming up with the PE.
376 * In the old style, it was the highest EEH-capable device
377 * which was always an EADS pci bridge. In the new style,
378 * there might not be any EADS bridges, and even when there are,
379 * the firmware marks them as "EEH incapable". So another
380 * two-step is needed to find the pci bus..
381 */
382 if (!frozen_bus)
383 frozen_bus = pcibios_find_pci_bus(frozen_dn->parent);
384 420
421 frozen_bus = eeh_pe_bus_get(pe);
385 if (!frozen_bus) { 422 if (!frozen_bus) {
386 printk(KERN_ERR "EEH: Cannot find PCI bus " 423 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
387 "for location=%s dn=%s\n", 424 __func__, pe->phb->global_number, pe->addr);
388 location, frozen_dn->full_name); 425 return;
389 return NULL;
390 } 426 }
391 427
392 frozen_edev = of_node_to_eeh_dev(frozen_dn); 428 pe->freeze_count++;
393 frozen_edev->freeze_count++; 429 if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES)
394 pci_str = eeh_pci_name(eeh_dev_to_pci_dev(event->edev));
395 drv_str = eeh_pcid_name(eeh_dev_to_pci_dev(event->edev));
396
397 if (frozen_edev->freeze_count > EEH_MAX_ALLOWED_FREEZES)
398 goto excess_failures; 430 goto excess_failures;
399 431 pr_warning("EEH: This PCI device has failed %d times in the last hour\n",
400 printk(KERN_WARNING 432 pe->freeze_count);
401 "EEH: This PCI device has failed %d times in the last hour:\n",
402 frozen_edev->freeze_count);
403
404 if (frozen_edev->pdev) {
405 bus_pci_str = pci_name(frozen_edev->pdev);
406 bus_drv_str = eeh_pcid_name(frozen_edev->pdev);
407 printk(KERN_WARNING
408 "EEH: Bus location=%s driver=%s pci addr=%s\n",
409 location, bus_drv_str, bus_pci_str);
410 }
411
412 printk(KERN_WARNING
413 "EEH: Device location=%s driver=%s pci addr=%s\n",
414 location, drv_str, pci_str);
415 433
416 /* Walk the various device drivers attached to this slot through 434 /* Walk the various device drivers attached to this slot through
417 * a reset sequence, giving each an opportunity to do what it needs 435 * a reset sequence, giving each an opportunity to do what it needs
@@ -419,12 +437,12 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
419 * status ... if any child can't handle the reset, then the entire 437 * status ... if any child can't handle the reset, then the entire
420 * slot is dlpar removed and added. 438 * slot is dlpar removed and added.
421 */ 439 */
422 pci_walk_bus(frozen_bus, eeh_report_error, &result); 440 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
423 441
424 /* Get the current PCI slot state. This can take a long time, 442 /* Get the current PCI slot state. This can take a long time,
425 * sometimes over 3 seconds for certain systems. 443 * sometimes over 3 seconds for certain systems.
426 */ 444 */
427 rc = eeh_ops->wait_state(eeh_dev_to_of_node(frozen_edev), MAX_WAIT_FOR_RECOVERY*1000); 445 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
428 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { 446 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
429 printk(KERN_WARNING "EEH: Permanent failure\n"); 447 printk(KERN_WARNING "EEH: Permanent failure\n");
430 goto hard_fail; 448 goto hard_fail;
@@ -434,14 +452,14 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
434 * don't post the error log until after all dev drivers 452 * don't post the error log until after all dev drivers
435 * have been informed. 453 * have been informed.
436 */ 454 */
437 eeh_slot_error_detail(frozen_edev, EEH_LOG_TEMP); 455 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
438 456
439 /* If all device drivers were EEH-unaware, then shut 457 /* If all device drivers were EEH-unaware, then shut
440 * down all of the device drivers, and hope they 458 * down all of the device drivers, and hope they
441 * go down willingly, without panicing the system. 459 * go down willingly, without panicing the system.
442 */ 460 */
443 if (result == PCI_ERS_RESULT_NONE) { 461 if (result == PCI_ERS_RESULT_NONE) {
444 rc = eeh_reset_device(frozen_edev, frozen_bus); 462 rc = eeh_reset_device(pe, frozen_bus);
445 if (rc) { 463 if (rc) {
446 printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc); 464 printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc);
447 goto hard_fail; 465 goto hard_fail;
@@ -450,7 +468,7 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
450 468
451 /* If all devices reported they can proceed, then re-enable MMIO */ 469 /* If all devices reported they can proceed, then re-enable MMIO */
452 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 470 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
453 rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_MMIO); 471 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
454 472
455 if (rc < 0) 473 if (rc < 0)
456 goto hard_fail; 474 goto hard_fail;
@@ -458,13 +476,13 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
458 result = PCI_ERS_RESULT_NEED_RESET; 476 result = PCI_ERS_RESULT_NEED_RESET;
459 } else { 477 } else {
460 result = PCI_ERS_RESULT_NONE; 478 result = PCI_ERS_RESULT_NONE;
461 pci_walk_bus(frozen_bus, eeh_report_mmio_enabled, &result); 479 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
462 } 480 }
463 } 481 }
464 482
465 /* If all devices reported they can proceed, then re-enable DMA */ 483 /* If all devices reported they can proceed, then re-enable DMA */
466 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 484 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
467 rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_DMA); 485 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
468 486
469 if (rc < 0) 487 if (rc < 0)
470 goto hard_fail; 488 goto hard_fail;
@@ -482,13 +500,13 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
482 500
483 /* If any device called out for a reset, then reset the slot */ 501 /* If any device called out for a reset, then reset the slot */
484 if (result == PCI_ERS_RESULT_NEED_RESET) { 502 if (result == PCI_ERS_RESULT_NEED_RESET) {
485 rc = eeh_reset_device(frozen_edev, NULL); 503 rc = eeh_reset_device(pe, NULL);
486 if (rc) { 504 if (rc) {
487 printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc); 505 printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc);
488 goto hard_fail; 506 goto hard_fail;
489 } 507 }
490 result = PCI_ERS_RESULT_NONE; 508 result = PCI_ERS_RESULT_NONE;
491 pci_walk_bus(frozen_bus, eeh_report_reset, &result); 509 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
492 } 510 }
493 511
494 /* All devices should claim they have recovered by now. */ 512 /* All devices should claim they have recovered by now. */
@@ -499,9 +517,9 @@ struct eeh_dev *handle_eeh_events(struct eeh_event *event)
499 } 517 }
500 518
501 /* Tell all device drivers that they can resume operations */ 519 /* Tell all device drivers that they can resume operations */
502 pci_walk_bus(frozen_bus, eeh_report_resume, NULL); 520 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
503 521
504 return frozen_edev; 522 return;
505 523
506excess_failures: 524excess_failures:
507 /* 525 /*
@@ -509,30 +527,26 @@ excess_failures:
509 * are due to poorly seated PCI cards. Only 10% or so are 527 * are due to poorly seated PCI cards. Only 10% or so are
510 * due to actual, failed cards. 528 * due to actual, failed cards.
511 */ 529 */
512 printk(KERN_ERR 530 pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
513 "EEH: PCI device at location=%s driver=%s pci addr=%s\n" 531 "last hour and has been permanently disabled.\n"
514 "has failed %d times in the last hour " 532 "Please try reseating or replacing it.\n",
515 "and has been permanently disabled.\n" 533 pe->phb->global_number, pe->addr,
516 "Please try reseating this device or replacing it.\n", 534 pe->freeze_count);
517 location, drv_str, pci_str, frozen_edev->freeze_count);
518 goto perm_error; 535 goto perm_error;
519 536
520hard_fail: 537hard_fail:
521 printk(KERN_ERR 538 pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
522 "EEH: Unable to recover from failure of PCI device " 539 "Please try reseating or replacing it\n",
523 "at location=%s driver=%s pci addr=%s\n" 540 pe->phb->global_number, pe->addr);
524 "Please try reseating this device or replacing it.\n",
525 location, drv_str, pci_str);
526 541
527perm_error: 542perm_error:
528 eeh_slot_error_detail(frozen_edev, EEH_LOG_PERM); 543 eeh_slot_error_detail(pe, EEH_LOG_PERM);
529 544
530 /* Notify all devices that they're about to go down. */ 545 /* Notify all devices that they're about to go down. */
531 pci_walk_bus(frozen_bus, eeh_report_failure, NULL); 546 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
532 547
533 /* Shut down the device drivers for good. */ 548 /* Shut down the device drivers for good. */
534 pcibios_remove_pci_devices(frozen_bus); 549 if (frozen_bus)
535 550 pcibios_remove_pci_devices(frozen_bus);
536 return NULL;
537} 551}
538 552
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index fb506317ebb0..51faaac8abe6 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -57,7 +57,7 @@ static int eeh_event_handler(void * dummy)
57{ 57{
58 unsigned long flags; 58 unsigned long flags;
59 struct eeh_event *event; 59 struct eeh_event *event;
60 struct eeh_dev *edev; 60 struct eeh_pe *pe;
61 61
62 set_task_comm(current, "eehd"); 62 set_task_comm(current, "eehd");
63 63
@@ -76,28 +76,23 @@ static int eeh_event_handler(void * dummy)
76 76
77 /* Serialize processing of EEH events */ 77 /* Serialize processing of EEH events */
78 mutex_lock(&eeh_event_mutex); 78 mutex_lock(&eeh_event_mutex);
79 edev = event->edev; 79 pe = event->pe;
80 eeh_mark_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING); 80 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
81 81 pr_info("EEH: Detected PCI bus error on PHB#%d-PE#%x\n",
82 printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n", 82 pe->phb->global_number, pe->addr);
83 eeh_pci_name(edev->pdev));
84 83
85 set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */ 84 set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */
86 edev = handle_eeh_events(event); 85 eeh_handle_event(pe);
87 86 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
88 if (edev) {
89 eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
90 pci_dev_put(edev->pdev);
91 }
92 87
93 kfree(event); 88 kfree(event);
94 mutex_unlock(&eeh_event_mutex); 89 mutex_unlock(&eeh_event_mutex);
95 90
96 /* If there are no new errors after an hour, clear the counter. */ 91 /* If there are no new errors after an hour, clear the counter. */
97 if (edev && edev->freeze_count>0) { 92 if (pe && pe->freeze_count > 0) {
98 msleep_interruptible(3600*1000); 93 msleep_interruptible(3600*1000);
99 if (edev->freeze_count>0) 94 if (pe->freeze_count > 0)
100 edev->freeze_count--; 95 pe->freeze_count--;
101 96
102 } 97 }
103 98
@@ -119,36 +114,23 @@ static void eeh_thread_launcher(struct work_struct *dummy)
119 114
120/** 115/**
121 * eeh_send_failure_event - Generate a PCI error event 116 * eeh_send_failure_event - Generate a PCI error event
122 * @edev: EEH device 117 * @pe: EEH PE
123 * 118 *
124 * This routine can be called within an interrupt context; 119 * This routine can be called within an interrupt context;
125 * the actual event will be delivered in a normal context 120 * the actual event will be delivered in a normal context
126 * (from a workqueue). 121 * (from a workqueue).
127 */ 122 */
128int eeh_send_failure_event(struct eeh_dev *edev) 123int eeh_send_failure_event(struct eeh_pe *pe)
129{ 124{
130 unsigned long flags; 125 unsigned long flags;
131 struct eeh_event *event; 126 struct eeh_event *event;
132 struct device_node *dn = eeh_dev_to_of_node(edev);
133 const char *location;
134
135 if (!mem_init_done) {
136 printk(KERN_ERR "EEH: event during early boot not handled\n");
137 location = of_get_property(dn, "ibm,loc-code", NULL);
138 printk(KERN_ERR "EEH: device node = %s\n", dn->full_name);
139 printk(KERN_ERR "EEH: PCI location = %s\n", location);
140 return 1;
141 }
142 event = kmalloc(sizeof(*event), GFP_ATOMIC);
143 if (event == NULL) {
144 printk(KERN_ERR "EEH: out of memory, event not handled\n");
145 return 1;
146 }
147
148 if (edev->pdev)
149 pci_dev_get(edev->pdev);
150 127
151 event->edev = edev; 128 event = kzalloc(sizeof(*event), GFP_ATOMIC);
129 if (!event) {
130 pr_err("EEH: out of memory, event not handled\n");
131 return -ENOMEM;
132 }
133 event->pe = pe;
152 134
153 /* We may or may not be called in an interrupt context */ 135 /* We may or may not be called in an interrupt context */
154 spin_lock_irqsave(&eeh_eventlist_lock, flags); 136 spin_lock_irqsave(&eeh_eventlist_lock, flags);
diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c
new file mode 100644
index 000000000000..797cd181dc3f
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_pe.c
@@ -0,0 +1,652 @@
1/*
2 * The file intends to implement PE based on the information from
3 * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device.
4 * All the PEs should be organized as hierarchy tree. The first level
5 * of the tree will be associated to existing PHBs since the particular
6 * PE is only meaningful in one PHB domain.
7 *
8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <linux/export.h>
26#include <linux/gfp.h>
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/pci.h>
30#include <linux/string.h>
31
32#include <asm/pci-bridge.h>
33#include <asm/ppc-pci.h>
34
35static LIST_HEAD(eeh_phb_pe);
36
37/**
38 * eeh_pe_alloc - Allocate PE
39 * @phb: PCI controller
40 * @type: PE type
41 *
42 * Allocate PE instance dynamically.
43 */
44static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
45{
46 struct eeh_pe *pe;
47
48 /* Allocate PHB PE */
49 pe = kzalloc(sizeof(struct eeh_pe), GFP_KERNEL);
50 if (!pe) return NULL;
51
52 /* Initialize PHB PE */
53 pe->type = type;
54 pe->phb = phb;
55 INIT_LIST_HEAD(&pe->child_list);
56 INIT_LIST_HEAD(&pe->child);
57 INIT_LIST_HEAD(&pe->edevs);
58
59 return pe;
60}
61
62/**
63 * eeh_phb_pe_create - Create PHB PE
64 * @phb: PCI controller
65 *
66 * The function should be called while the PHB is detected during
67 * system boot or PCI hotplug in order to create PHB PE.
68 */
69int __devinit eeh_phb_pe_create(struct pci_controller *phb)
70{
71 struct eeh_pe *pe;
72
73 /* Allocate PHB PE */
74 pe = eeh_pe_alloc(phb, EEH_PE_PHB);
75 if (!pe) {
76 pr_err("%s: out of memory!\n", __func__);
77 return -ENOMEM;
78 }
79
80 /* Put it into the list */
81 eeh_lock();
82 list_add_tail(&pe->child, &eeh_phb_pe);
83 eeh_unlock();
84
85 pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number);
86
87 return 0;
88}
89
90/**
91 * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB
92 * @phb: PCI controller
93 *
94 * The overall PEs form hierarchy tree. The first layer of the
95 * hierarchy tree is composed of PHB PEs. The function is used
96 * to retrieve the corresponding PHB PE according to the given PHB.
97 */
98static struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
99{
100 struct eeh_pe *pe;
101
102 list_for_each_entry(pe, &eeh_phb_pe, child) {
103 /*
104 * Actually, we needn't check the type since
105 * the PE for PHB has been determined when that
106 * was created.
107 */
108 if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
109 return pe;
110 }
111
112 return NULL;
113}
114
115/**
116 * eeh_pe_next - Retrieve the next PE in the tree
117 * @pe: current PE
118 * @root: root PE
119 *
120 * The function is used to retrieve the next PE in the
121 * hierarchy PE tree.
122 */
123static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe,
124 struct eeh_pe *root)
125{
126 struct list_head *next = pe->child_list.next;
127
128 if (next == &pe->child_list) {
129 while (1) {
130 if (pe == root)
131 return NULL;
132 next = pe->child.next;
133 if (next != &pe->parent->child_list)
134 break;
135 pe = pe->parent;
136 }
137 }
138
139 return list_entry(next, struct eeh_pe, child);
140}
141
142/**
143 * eeh_pe_traverse - Traverse PEs in the specified PHB
144 * @root: root PE
145 * @fn: callback
146 * @flag: extra parameter to callback
147 *
148 * The function is used to traverse the specified PE and its
149 * child PEs. The traversing is to be terminated once the
150 * callback returns something other than NULL, or no more PEs
151 * to be traversed.
152 */
153static void *eeh_pe_traverse(struct eeh_pe *root,
154 eeh_traverse_func fn, void *flag)
155{
156 struct eeh_pe *pe;
157 void *ret;
158
159 for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
160 ret = fn(pe, flag);
161 if (ret) return ret;
162 }
163
164 return NULL;
165}
166
167/**
168 * eeh_pe_dev_traverse - Traverse the devices from the PE
169 * @root: EEH PE
170 * @fn: function callback
171 * @flag: extra parameter to callback
172 *
173 * The function is used to traverse the devices of the specified
174 * PE and its child PEs.
175 */
176void *eeh_pe_dev_traverse(struct eeh_pe *root,
177 eeh_traverse_func fn, void *flag)
178{
179 struct eeh_pe *pe;
180 struct eeh_dev *edev;
181 void *ret;
182
183 if (!root) {
184 pr_warning("%s: Invalid PE %p\n", __func__, root);
185 return NULL;
186 }
187
188 eeh_lock();
189
190 /* Traverse root PE */
191 for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
192 eeh_pe_for_each_dev(pe, edev) {
193 ret = fn(edev, flag);
194 if (ret) {
195 eeh_unlock();
196 return ret;
197 }
198 }
199 }
200
201 eeh_unlock();
202
203 return NULL;
204}
205
206/**
207 * __eeh_pe_get - Check the PE address
208 * @data: EEH PE
209 * @flag: EEH device
210 *
211 * For one particular PE, it can be identified by PE address
212 * or tranditional BDF address. BDF address is composed of
213 * Bus/Device/Function number. The extra data referred by flag
214 * indicates which type of address should be used.
215 */
216static void *__eeh_pe_get(void *data, void *flag)
217{
218 struct eeh_pe *pe = (struct eeh_pe *)data;
219 struct eeh_dev *edev = (struct eeh_dev *)flag;
220
221 /* Unexpected PHB PE */
222 if (pe->type & EEH_PE_PHB)
223 return NULL;
224
225 /* We prefer PE address */
226 if (edev->pe_config_addr &&
227 (edev->pe_config_addr == pe->addr))
228 return pe;
229
230 /* Try BDF address */
231 if (edev->pe_config_addr &&
232 (edev->config_addr == pe->config_addr))
233 return pe;
234
235 return NULL;
236}
237
238/**
239 * eeh_pe_get - Search PE based on the given address
240 * @edev: EEH device
241 *
242 * Search the corresponding PE based on the specified address which
243 * is included in the eeh device. The function is used to check if
244 * the associated PE has been created against the PE address. It's
245 * notable that the PE address has 2 format: traditional PE address
246 * which is composed of PCI bus/device/function number, or unified
247 * PE address.
248 */
249static struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
250{
251 struct eeh_pe *root = eeh_phb_pe_get(edev->phb);
252 struct eeh_pe *pe;
253
254 pe = eeh_pe_traverse(root, __eeh_pe_get, edev);
255
256 return pe;
257}
258
259/**
260 * eeh_pe_get_parent - Retrieve the parent PE
261 * @edev: EEH device
262 *
263 * The whole PEs existing in the system are organized as hierarchy
264 * tree. The function is used to retrieve the parent PE according
265 * to the parent EEH device.
266 */
267static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev)
268{
269 struct device_node *dn;
270 struct eeh_dev *parent;
271
272 /*
273 * It might have the case for the indirect parent
274 * EEH device already having associated PE, but
275 * the direct parent EEH device doesn't have yet.
276 */
277 dn = edev->dn->parent;
278 while (dn) {
279 /* We're poking out of PCI territory */
280 if (!PCI_DN(dn)) return NULL;
281
282 parent = of_node_to_eeh_dev(dn);
283 /* We're poking out of PCI territory */
284 if (!parent) return NULL;
285
286 if (parent->pe)
287 return parent->pe;
288
289 dn = dn->parent;
290 }
291
292 return NULL;
293}
294
295/**
296 * eeh_add_to_parent_pe - Add EEH device to parent PE
297 * @edev: EEH device
298 *
299 * Add EEH device to the parent PE. If the parent PE already
300 * exists, the PE type will be changed to EEH_PE_BUS. Otherwise,
301 * we have to create new PE to hold the EEH device and the new
302 * PE will be linked to its parent PE as well.
303 */
304int eeh_add_to_parent_pe(struct eeh_dev *edev)
305{
306 struct eeh_pe *pe, *parent;
307
308 eeh_lock();
309
310 /*
311 * Search the PE has been existing or not according
312 * to the PE address. If that has been existing, the
313 * PE should be composed of PCI bus and its subordinate
314 * components.
315 */
316 pe = eeh_pe_get(edev);
317 if (pe && !(pe->type & EEH_PE_INVALID)) {
318 if (!edev->pe_config_addr) {
319 eeh_unlock();
320 pr_err("%s: PE with addr 0x%x already exists\n",
321 __func__, edev->config_addr);
322 return -EEXIST;
323 }
324
325 /* Mark the PE as type of PCI bus */
326 pe->type = EEH_PE_BUS;
327 edev->pe = pe;
328
329 /* Put the edev to PE */
330 list_add_tail(&edev->list, &pe->edevs);
331 eeh_unlock();
332 pr_debug("EEH: Add %s to Bus PE#%x\n",
333 edev->dn->full_name, pe->addr);
334
335 return 0;
336 } else if (pe && (pe->type & EEH_PE_INVALID)) {
337 list_add_tail(&edev->list, &pe->edevs);
338 edev->pe = pe;
339 /*
340 * We're running to here because of PCI hotplug caused by
341 * EEH recovery. We need clear EEH_PE_INVALID until the top.
342 */
343 parent = pe;
344 while (parent) {
345 if (!(parent->type & EEH_PE_INVALID))
346 break;
347 parent->type &= ~EEH_PE_INVALID;
348 parent = parent->parent;
349 }
350 eeh_unlock();
351 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
352 edev->dn->full_name, pe->addr, pe->parent->addr);
353
354 return 0;
355 }
356
357 /* Create a new EEH PE */
358 pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE);
359 if (!pe) {
360 eeh_unlock();
361 pr_err("%s: out of memory!\n", __func__);
362 return -ENOMEM;
363 }
364 pe->addr = edev->pe_config_addr;
365 pe->config_addr = edev->config_addr;
366
367 /*
368 * Put the new EEH PE into hierarchy tree. If the parent
369 * can't be found, the newly created PE will be attached
370 * to PHB directly. Otherwise, we have to associate the
371 * PE with its parent.
372 */
373 parent = eeh_pe_get_parent(edev);
374 if (!parent) {
375 parent = eeh_phb_pe_get(edev->phb);
376 if (!parent) {
377 eeh_unlock();
378 pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
379 __func__, edev->phb->global_number);
380 edev->pe = NULL;
381 kfree(pe);
382 return -EEXIST;
383 }
384 }
385 pe->parent = parent;
386
387 /*
388 * Put the newly created PE into the child list and
389 * link the EEH device accordingly.
390 */
391 list_add_tail(&pe->child, &parent->child_list);
392 list_add_tail(&edev->list, &pe->edevs);
393 edev->pe = pe;
394 eeh_unlock();
395 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
396 edev->dn->full_name, pe->addr, pe->parent->addr);
397
398 return 0;
399}
400
401/**
402 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE
403 * @edev: EEH device
404 * @purge_pe: remove PE or not
405 *
406 * The PE hierarchy tree might be changed when doing PCI hotplug.
407 * Also, the PCI devices or buses could be removed from the system
408 * during EEH recovery. So we have to call the function remove the
409 * corresponding PE accordingly if necessary.
410 */
411int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
412{
413 struct eeh_pe *pe, *parent, *child;
414 int cnt;
415
416 if (!edev->pe) {
417 pr_warning("%s: No PE found for EEH device %s\n",
418 __func__, edev->dn->full_name);
419 return -EEXIST;
420 }
421
422 eeh_lock();
423
424 /* Remove the EEH device */
425 pe = edev->pe;
426 edev->pe = NULL;
427 list_del(&edev->list);
428
429 /*
430 * Check if the parent PE includes any EEH devices.
431 * If not, we should delete that. Also, we should
432 * delete the parent PE if it doesn't have associated
433 * child PEs and EEH devices.
434 */
435 while (1) {
436 parent = pe->parent;
437 if (pe->type & EEH_PE_PHB)
438 break;
439
440 if (purge_pe) {
441 if (list_empty(&pe->edevs) &&
442 list_empty(&pe->child_list)) {
443 list_del(&pe->child);
444 kfree(pe);
445 } else {
446 break;
447 }
448 } else {
449 if (list_empty(&pe->edevs)) {
450 cnt = 0;
451 list_for_each_entry(child, &pe->child_list, child) {
452 if (!(pe->type & EEH_PE_INVALID)) {
453 cnt++;
454 break;
455 }
456 }
457
458 if (!cnt)
459 pe->type |= EEH_PE_INVALID;
460 else
461 break;
462 }
463 }
464
465 pe = parent;
466 }
467
468 eeh_unlock();
469
470 return 0;
471}
472
473/**
474 * __eeh_pe_state_mark - Mark the state for the PE
475 * @data: EEH PE
476 * @flag: state
477 *
478 * The function is used to mark the indicated state for the given
479 * PE. Also, the associated PCI devices will be put into IO frozen
480 * state as well.
481 */
482static void *__eeh_pe_state_mark(void *data, void *flag)
483{
484 struct eeh_pe *pe = (struct eeh_pe *)data;
485 int state = *((int *)flag);
486 struct eeh_dev *tmp;
487 struct pci_dev *pdev;
488
489 /*
490 * Mark the PE with the indicated state. Also,
491 * the associated PCI device will be put into
492 * I/O frozen state to avoid I/O accesses from
493 * the PCI device driver.
494 */
495 pe->state |= state;
496 eeh_pe_for_each_dev(pe, tmp) {
497 pdev = eeh_dev_to_pci_dev(tmp);
498 if (pdev)
499 pdev->error_state = pci_channel_io_frozen;
500 }
501
502 return NULL;
503}
504
505/**
506 * eeh_pe_state_mark - Mark specified state for PE and its associated device
507 * @pe: EEH PE
508 *
509 * EEH error affects the current PE and its child PEs. The function
510 * is used to mark appropriate state for the affected PEs and the
511 * associated devices.
512 */
513void eeh_pe_state_mark(struct eeh_pe *pe, int state)
514{
515 eeh_lock();
516 eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
517 eeh_unlock();
518}
519
520/**
521 * __eeh_pe_state_clear - Clear state for the PE
522 * @data: EEH PE
523 * @flag: state
524 *
525 * The function is used to clear the indicated state from the
526 * given PE. Besides, we also clear the check count of the PE
527 * as well.
528 */
529static void *__eeh_pe_state_clear(void *data, void *flag)
530{
531 struct eeh_pe *pe = (struct eeh_pe *)data;
532 int state = *((int *)flag);
533
534 pe->state &= ~state;
535 pe->check_count = 0;
536
537 return NULL;
538}
539
540/**
541 * eeh_pe_state_clear - Clear state for the PE and its children
542 * @pe: PE
543 * @state: state to be cleared
544 *
545 * When the PE and its children has been recovered from error,
546 * we need clear the error state for that. The function is used
547 * for the purpose.
548 */
549void eeh_pe_state_clear(struct eeh_pe *pe, int state)
550{
551 eeh_lock();
552 eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
553 eeh_unlock();
554}
555
556/**
557 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
558 * @data: EEH device
559 * @flag: Unused
560 *
561 * Loads the PCI configuration space base address registers,
562 * the expansion ROM base address, the latency timer, and etc.
563 * from the saved values in the device node.
564 */
565static void *eeh_restore_one_device_bars(void *data, void *flag)
566{
567 int i;
568 u32 cmd;
569 struct eeh_dev *edev = (struct eeh_dev *)data;
570 struct device_node *dn = eeh_dev_to_of_node(edev);
571
572 for (i = 4; i < 10; i++)
573 eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
574 /* 12 == Expansion ROM Address */
575 eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
576
577#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
578#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
579
580 eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
581 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
582 eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
583 SAVED_BYTE(PCI_LATENCY_TIMER));
584
585 /* max latency, min grant, interrupt pin and line */
586 eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
587
588 /*
589 * Restore PERR & SERR bits, some devices require it,
590 * don't touch the other command bits
591 */
592 eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
593 if (edev->config_space[1] & PCI_COMMAND_PARITY)
594 cmd |= PCI_COMMAND_PARITY;
595 else
596 cmd &= ~PCI_COMMAND_PARITY;
597 if (edev->config_space[1] & PCI_COMMAND_SERR)
598 cmd |= PCI_COMMAND_SERR;
599 else
600 cmd &= ~PCI_COMMAND_SERR;
601 eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
602
603 return NULL;
604}
605
606/**
607 * eeh_pe_restore_bars - Restore the PCI config space info
608 * @pe: EEH PE
609 *
610 * This routine performs a recursive walk to the children
611 * of this device as well.
612 */
613void eeh_pe_restore_bars(struct eeh_pe *pe)
614{
615 /*
616 * We needn't take the EEH lock since eeh_pe_dev_traverse()
617 * will take that.
618 */
619 eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
620}
621
622/**
623 * eeh_pe_bus_get - Retrieve PCI bus according to the given PE
624 * @pe: EEH PE
625 *
626 * Retrieve the PCI bus according to the given PE. Basically,
627 * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the
628 * primary PCI bus will be retrieved. The parent bus will be
629 * returned for BUS PE. However, we don't have associated PCI
630 * bus for DEVICE PE.
631 */
632struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
633{
634 struct pci_bus *bus = NULL;
635 struct eeh_dev *edev;
636 struct pci_dev *pdev;
637
638 eeh_lock();
639
640 if (pe->type & EEH_PE_PHB) {
641 bus = pe->phb->bus;
642 } else if (pe->type & EEH_PE_BUS) {
643 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
644 pdev = eeh_dev_to_pci_dev(edev);
645 if (pdev)
646 bus = pdev->bus;
647 }
648
649 eeh_unlock();
650
651 return bus;
652}
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index c33360ec4f4f..19506f935737 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -129,27 +129,117 @@ static int pseries_eeh_init(void)
129 eeh_error_buf_size = RTAS_ERROR_LOG_MAX; 129 eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
130 } 130 }
131 131
132 /* Set EEH probe mode */
133 eeh_probe_mode_set(EEH_PROBE_MODE_DEVTREE);
134
132 return 0; 135 return 0;
133} 136}
134 137
135/** 138/**
139 * pseries_eeh_of_probe - EEH probe on the given device
140 * @dn: OF node
141 * @flag: Unused
142 *
143 * When EEH module is installed during system boot, all PCI devices
144 * are checked one by one to see if it supports EEH. The function
145 * is introduced for the purpose.
146 */
147static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
148{
149 struct eeh_dev *edev;
150 struct eeh_pe pe;
151 const u32 *class_code, *vendor_id, *device_id;
152 const u32 *regs;
153 int enable = 0;
154 int ret;
155
156 /* Retrieve OF node and eeh device */
157 edev = of_node_to_eeh_dev(dn);
158 if (!of_device_is_available(dn))
159 return NULL;
160
161 /* Retrieve class/vendor/device IDs */
162 class_code = of_get_property(dn, "class-code", NULL);
163 vendor_id = of_get_property(dn, "vendor-id", NULL);
164 device_id = of_get_property(dn, "device-id", NULL);
165
166 /* Skip for bad OF node or PCI-ISA bridge */
167 if (!class_code || !vendor_id || !device_id)
168 return NULL;
169 if (dn->type && !strcmp(dn->type, "isa"))
170 return NULL;
171
172 /* Update class code and mode of eeh device */
173 edev->class_code = *class_code;
174 edev->mode = 0;
175
176 /* Retrieve the device address */
177 regs = of_get_property(dn, "reg", NULL);
178 if (!regs) {
179 pr_warning("%s: OF node property %s::reg not found\n",
180 __func__, dn->full_name);
181 return NULL;
182 }
183
184 /* Initialize the fake PE */
185 memset(&pe, 0, sizeof(struct eeh_pe));
186 pe.phb = edev->phb;
187 pe.config_addr = regs[0];
188
189 /* Enable EEH on the device */
190 ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
191 if (!ret) {
192 edev->config_addr = regs[0];
193 /* Retrieve PE address */
194 edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
195 pe.addr = edev->pe_config_addr;
196
197 /* Some older systems (Power4) allow the ibm,set-eeh-option
198 * call to succeed even on nodes where EEH is not supported.
199 * Verify support explicitly.
200 */
201 ret = eeh_ops->get_state(&pe, NULL);
202 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
203 enable = 1;
204
205 if (enable) {
206 eeh_subsystem_enabled = 1;
207 eeh_add_to_parent_pe(edev);
208
209 pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
210 __func__, dn->full_name, pe.phb->global_number,
211 pe.addr, pe.config_addr);
212 } else if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
213 (of_node_to_eeh_dev(dn->parent))->pe) {
214 /* This device doesn't support EEH, but it may have an
215 * EEH parent, in which case we mark it as supported.
216 */
217 edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
218 edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr;
219 eeh_add_to_parent_pe(edev);
220 }
221 }
222
223 /* Save memory bars */
224 eeh_save_bars(edev);
225
226 return NULL;
227}
228
229/**
136 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable 230 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
137 * @dn: device node 231 * @pe: EEH PE
138 * @option: operation to be issued 232 * @option: operation to be issued
139 * 233 *
140 * The function is used to control the EEH functionality globally. 234 * The function is used to control the EEH functionality globally.
141 * Currently, following options are support according to PAPR: 235 * Currently, following options are support according to PAPR:
142 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 236 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
143 */ 237 */
144static int pseries_eeh_set_option(struct device_node *dn, int option) 238static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
145{ 239{
146 int ret = 0; 240 int ret = 0;
147 struct eeh_dev *edev;
148 const u32 *reg;
149 int config_addr; 241 int config_addr;
150 242
151 edev = of_node_to_eeh_dev(dn);
152
153 /* 243 /*
154 * When we're enabling or disabling EEH functioality on 244 * When we're enabling or disabling EEH functioality on
155 * the particular PE, the PE config address is possibly 245 * the particular PE, the PE config address is possibly
@@ -159,15 +249,11 @@ static int pseries_eeh_set_option(struct device_node *dn, int option)
159 switch (option) { 249 switch (option) {
160 case EEH_OPT_DISABLE: 250 case EEH_OPT_DISABLE:
161 case EEH_OPT_ENABLE: 251 case EEH_OPT_ENABLE:
162 reg = of_get_property(dn, "reg", NULL);
163 config_addr = reg[0];
164 break;
165
166 case EEH_OPT_THAW_MMIO: 252 case EEH_OPT_THAW_MMIO:
167 case EEH_OPT_THAW_DMA: 253 case EEH_OPT_THAW_DMA:
168 config_addr = edev->config_addr; 254 config_addr = pe->config_addr;
169 if (edev->pe_config_addr) 255 if (pe->addr)
170 config_addr = edev->pe_config_addr; 256 config_addr = pe->addr;
171 break; 257 break;
172 258
173 default: 259 default:
@@ -177,15 +263,15 @@ static int pseries_eeh_set_option(struct device_node *dn, int option)
177 } 263 }
178 264
179 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, 265 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
180 config_addr, BUID_HI(edev->phb->buid), 266 config_addr, BUID_HI(pe->phb->buid),
181 BUID_LO(edev->phb->buid), option); 267 BUID_LO(pe->phb->buid), option);
182 268
183 return ret; 269 return ret;
184} 270}
185 271
186/** 272/**
187 * pseries_eeh_get_pe_addr - Retrieve PE address 273 * pseries_eeh_get_pe_addr - Retrieve PE address
188 * @dn: device node 274 * @pe: EEH PE
189 * 275 *
190 * Retrieve the assocated PE address. Actually, there're 2 RTAS 276 * Retrieve the assocated PE address. Actually, there're 2 RTAS
191 * function calls dedicated for the purpose. We need implement 277 * function calls dedicated for the purpose. We need implement
@@ -196,14 +282,11 @@ static int pseries_eeh_set_option(struct device_node *dn, int option)
196 * It's notable that zero'ed return value means invalid PE config 282 * It's notable that zero'ed return value means invalid PE config
197 * address. 283 * address.
198 */ 284 */
199static int pseries_eeh_get_pe_addr(struct device_node *dn) 285static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
200{ 286{
201 struct eeh_dev *edev;
202 int ret = 0; 287 int ret = 0;
203 int rets[3]; 288 int rets[3];
204 289
205 edev = of_node_to_eeh_dev(dn);
206
207 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) { 290 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
208 /* 291 /*
209 * First of all, we need to make sure there has one PE 292 * First of all, we need to make sure there has one PE
@@ -211,18 +294,18 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
211 * meaningless. 294 * meaningless.
212 */ 295 */
213 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 296 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
214 edev->config_addr, BUID_HI(edev->phb->buid), 297 pe->config_addr, BUID_HI(pe->phb->buid),
215 BUID_LO(edev->phb->buid), 1); 298 BUID_LO(pe->phb->buid), 1);
216 if (ret || (rets[0] == 0)) 299 if (ret || (rets[0] == 0))
217 return 0; 300 return 0;
218 301
219 /* Retrieve the associated PE config address */ 302 /* Retrieve the associated PE config address */
220 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets, 303 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
221 edev->config_addr, BUID_HI(edev->phb->buid), 304 pe->config_addr, BUID_HI(pe->phb->buid),
222 BUID_LO(edev->phb->buid), 0); 305 BUID_LO(pe->phb->buid), 0);
223 if (ret) { 306 if (ret) {
224 pr_warning("%s: Failed to get PE address for %s\n", 307 pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n",
225 __func__, dn->full_name); 308 __func__, pe->phb->global_number, pe->config_addr);
226 return 0; 309 return 0;
227 } 310 }
228 311
@@ -231,11 +314,11 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
231 314
232 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) { 315 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
233 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets, 316 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
234 edev->config_addr, BUID_HI(edev->phb->buid), 317 pe->config_addr, BUID_HI(pe->phb->buid),
235 BUID_LO(edev->phb->buid), 0); 318 BUID_LO(pe->phb->buid), 0);
236 if (ret) { 319 if (ret) {
237 pr_warning("%s: Failed to get PE address for %s\n", 320 pr_warning("%s: Failed to get address for PHB#%d-PE#%x\n",
238 __func__, dn->full_name); 321 __func__, pe->phb->global_number, pe->config_addr);
239 return 0; 322 return 0;
240 } 323 }
241 324
@@ -247,7 +330,7 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
247 330
248/** 331/**
249 * pseries_eeh_get_state - Retrieve PE state 332 * pseries_eeh_get_state - Retrieve PE state
250 * @dn: PE associated device node 333 * @pe: EEH PE
251 * @state: return value 334 * @state: return value
252 * 335 *
253 * Retrieve the state of the specified PE. On RTAS compliant 336 * Retrieve the state of the specified PE. On RTAS compliant
@@ -258,30 +341,28 @@ static int pseries_eeh_get_pe_addr(struct device_node *dn)
258 * RTAS calls for the purpose, we need to try the new one and back 341 * RTAS calls for the purpose, we need to try the new one and back
259 * to the old one if the new one couldn't work properly. 342 * to the old one if the new one couldn't work properly.
260 */ 343 */
261static int pseries_eeh_get_state(struct device_node *dn, int *state) 344static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
262{ 345{
263 struct eeh_dev *edev;
264 int config_addr; 346 int config_addr;
265 int ret; 347 int ret;
266 int rets[4]; 348 int rets[4];
267 int result; 349 int result;
268 350
269 /* Figure out PE config address if possible */ 351 /* Figure out PE config address if possible */
270 edev = of_node_to_eeh_dev(dn); 352 config_addr = pe->config_addr;
271 config_addr = edev->config_addr; 353 if (pe->addr)
272 if (edev->pe_config_addr) 354 config_addr = pe->addr;
273 config_addr = edev->pe_config_addr;
274 355
275 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 356 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
276 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets, 357 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
277 config_addr, BUID_HI(edev->phb->buid), 358 config_addr, BUID_HI(pe->phb->buid),
278 BUID_LO(edev->phb->buid)); 359 BUID_LO(pe->phb->buid));
279 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) { 360 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
280 /* Fake PE unavailable info */ 361 /* Fake PE unavailable info */
281 rets[2] = 0; 362 rets[2] = 0;
282 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets, 363 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
283 config_addr, BUID_HI(edev->phb->buid), 364 config_addr, BUID_HI(pe->phb->buid),
284 BUID_LO(edev->phb->buid)); 365 BUID_LO(pe->phb->buid));
285 } else { 366 } else {
286 return EEH_STATE_NOT_SUPPORT; 367 return EEH_STATE_NOT_SUPPORT;
287 } 368 }
@@ -333,34 +414,32 @@ static int pseries_eeh_get_state(struct device_node *dn, int *state)
333 414
334/** 415/**
335 * pseries_eeh_reset - Reset the specified PE 416 * pseries_eeh_reset - Reset the specified PE
336 * @dn: PE associated device node 417 * @pe: EEH PE
337 * @option: reset option 418 * @option: reset option
338 * 419 *
339 * Reset the specified PE 420 * Reset the specified PE
340 */ 421 */
341static int pseries_eeh_reset(struct device_node *dn, int option) 422static int pseries_eeh_reset(struct eeh_pe *pe, int option)
342{ 423{
343 struct eeh_dev *edev;
344 int config_addr; 424 int config_addr;
345 int ret; 425 int ret;
346 426
347 /* Figure out PE address */ 427 /* Figure out PE address */
348 edev = of_node_to_eeh_dev(dn); 428 config_addr = pe->config_addr;
349 config_addr = edev->config_addr; 429 if (pe->addr)
350 if (edev->pe_config_addr) 430 config_addr = pe->addr;
351 config_addr = edev->pe_config_addr;
352 431
353 /* Reset PE through RTAS call */ 432 /* Reset PE through RTAS call */
354 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 433 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
355 config_addr, BUID_HI(edev->phb->buid), 434 config_addr, BUID_HI(pe->phb->buid),
356 BUID_LO(edev->phb->buid), option); 435 BUID_LO(pe->phb->buid), option);
357 436
358 /* If fundamental-reset not supported, try hot-reset */ 437 /* If fundamental-reset not supported, try hot-reset */
359 if (option == EEH_RESET_FUNDAMENTAL && 438 if (option == EEH_RESET_FUNDAMENTAL &&
360 ret == -8) { 439 ret == -8) {
361 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL, 440 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
362 config_addr, BUID_HI(edev->phb->buid), 441 config_addr, BUID_HI(pe->phb->buid),
363 BUID_LO(edev->phb->buid), EEH_RESET_HOT); 442 BUID_LO(pe->phb->buid), EEH_RESET_HOT);
364 } 443 }
365 444
366 return ret; 445 return ret;
@@ -368,13 +447,13 @@ static int pseries_eeh_reset(struct device_node *dn, int option)
368 447
369/** 448/**
370 * pseries_eeh_wait_state - Wait for PE state 449 * pseries_eeh_wait_state - Wait for PE state
371 * @dn: PE associated device node 450 * @pe: EEH PE
372 * @max_wait: maximal period in microsecond 451 * @max_wait: maximal period in microsecond
373 * 452 *
374 * Wait for the state of associated PE. It might take some time 453 * Wait for the state of associated PE. It might take some time
375 * to retrieve the PE's state. 454 * to retrieve the PE's state.
376 */ 455 */
377static int pseries_eeh_wait_state(struct device_node *dn, int max_wait) 456static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait)
378{ 457{
379 int ret; 458 int ret;
380 int mwait; 459 int mwait;
@@ -391,7 +470,7 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
391#define EEH_STATE_MAX_WAIT_TIME (300 * 1000) 470#define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
392 471
393 while (1) { 472 while (1) {
394 ret = pseries_eeh_get_state(dn, &mwait); 473 ret = pseries_eeh_get_state(pe, &mwait);
395 474
396 /* 475 /*
397 * If the PE's state is temporarily unavailable, 476 * If the PE's state is temporarily unavailable,
@@ -426,7 +505,7 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
426 505
427/** 506/**
428 * pseries_eeh_get_log - Retrieve error log 507 * pseries_eeh_get_log - Retrieve error log
429 * @dn: device node 508 * @pe: EEH PE
430 * @severity: temporary or permanent error log 509 * @severity: temporary or permanent error log
431 * @drv_log: driver log to be combined with retrieved error log 510 * @drv_log: driver log to be combined with retrieved error log
432 * @len: length of driver log 511 * @len: length of driver log
@@ -435,24 +514,22 @@ static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
435 * Actually, the error will be retrieved through the dedicated 514 * Actually, the error will be retrieved through the dedicated
436 * RTAS call. 515 * RTAS call.
437 */ 516 */
438static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_log, unsigned long len) 517static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
439{ 518{
440 struct eeh_dev *edev;
441 int config_addr; 519 int config_addr;
442 unsigned long flags; 520 unsigned long flags;
443 int ret; 521 int ret;
444 522
445 edev = of_node_to_eeh_dev(dn);
446 spin_lock_irqsave(&slot_errbuf_lock, flags); 523 spin_lock_irqsave(&slot_errbuf_lock, flags);
447 memset(slot_errbuf, 0, eeh_error_buf_size); 524 memset(slot_errbuf, 0, eeh_error_buf_size);
448 525
449 /* Figure out the PE address */ 526 /* Figure out the PE address */
450 config_addr = edev->config_addr; 527 config_addr = pe->config_addr;
451 if (edev->pe_config_addr) 528 if (pe->addr)
452 config_addr = edev->pe_config_addr; 529 config_addr = pe->addr;
453 530
454 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr, 531 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
455 BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid), 532 BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
456 virt_to_phys(drv_log), len, 533 virt_to_phys(drv_log), len,
457 virt_to_phys(slot_errbuf), eeh_error_buf_size, 534 virt_to_phys(slot_errbuf), eeh_error_buf_size,
458 severity); 535 severity);
@@ -465,40 +542,38 @@ static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_l
465 542
466/** 543/**
467 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE 544 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
468 * @dn: PE associated device node 545 * @pe: EEH PE
469 * 546 *
470 * The function will be called to reconfigure the bridges included 547 * The function will be called to reconfigure the bridges included
471 * in the specified PE so that the mulfunctional PE would be recovered 548 * in the specified PE so that the mulfunctional PE would be recovered
472 * again. 549 * again.
473 */ 550 */
474static int pseries_eeh_configure_bridge(struct device_node *dn) 551static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
475{ 552{
476 struct eeh_dev *edev;
477 int config_addr; 553 int config_addr;
478 int ret; 554 int ret;
479 555
480 /* Figure out the PE address */ 556 /* Figure out the PE address */
481 edev = of_node_to_eeh_dev(dn); 557 config_addr = pe->config_addr;
482 config_addr = edev->config_addr; 558 if (pe->addr)
483 if (edev->pe_config_addr) 559 config_addr = pe->addr;
484 config_addr = edev->pe_config_addr;
485 560
486 /* Use new configure-pe function, if supported */ 561 /* Use new configure-pe function, if supported */
487 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { 562 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
488 ret = rtas_call(ibm_configure_pe, 3, 1, NULL, 563 ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
489 config_addr, BUID_HI(edev->phb->buid), 564 config_addr, BUID_HI(pe->phb->buid),
490 BUID_LO(edev->phb->buid)); 565 BUID_LO(pe->phb->buid));
491 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { 566 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
492 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, 567 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
493 config_addr, BUID_HI(edev->phb->buid), 568 config_addr, BUID_HI(pe->phb->buid),
494 BUID_LO(edev->phb->buid)); 569 BUID_LO(pe->phb->buid));
495 } else { 570 } else {
496 return -EFAULT; 571 return -EFAULT;
497 } 572 }
498 573
499 if (ret) 574 if (ret)
500 pr_warning("%s: Unable to configure bridge %d for %s\n", 575 pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
501 __func__, ret, dn->full_name); 576 __func__, pe->phb->global_number, pe->addr, ret);
502 577
503 return ret; 578 return ret;
504} 579}
@@ -542,6 +617,8 @@ static int pseries_eeh_write_config(struct device_node *dn, int where, int size,
542static struct eeh_ops pseries_eeh_ops = { 617static struct eeh_ops pseries_eeh_ops = {
543 .name = "pseries", 618 .name = "pseries",
544 .init = pseries_eeh_init, 619 .init = pseries_eeh_init,
620 .of_probe = pseries_eeh_of_probe,
621 .dev_probe = NULL,
545 .set_option = pseries_eeh_set_option, 622 .set_option = pseries_eeh_set_option,
546 .get_pe_addr = pseries_eeh_get_pe_addr, 623 .get_pe_addr = pseries_eeh_get_pe_addr,
547 .get_state = pseries_eeh_get_state, 624 .get_state = pseries_eeh_get_state,
@@ -559,7 +636,21 @@ static struct eeh_ops pseries_eeh_ops = {
559 * EEH initialization on pseries platform. This function should be 636 * EEH initialization on pseries platform. This function should be
560 * called before any EEH related functions. 637 * called before any EEH related functions.
561 */ 638 */
562int __init eeh_pseries_init(void) 639static int __init eeh_pseries_init(void)
563{ 640{
564 return eeh_ops_register(&pseries_eeh_ops); 641 int ret = -EINVAL;
642
643 if (!machine_is(pseries))
644 return ret;
645
646 ret = eeh_ops_register(&pseries_eeh_ops);
647 if (!ret)
648 pr_info("EEH: pSeries platform initialized\n");
649 else
650 pr_info("EEH: pSeries platform initialization failure (%d)\n",
651 ret);
652
653 return ret;
565} 654}
655
656early_initcall(eeh_pseries_init);
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c
index 243b3510d70f..d37708360f2e 100644
--- a/arch/powerpc/platforms/pseries/eeh_sysfs.c
+++ b/arch/powerpc/platforms/pseries/eeh_sysfs.c
@@ -53,9 +53,6 @@ static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL);
53EEH_SHOW_ATTR(eeh_mode, mode, "0x%x"); 53EEH_SHOW_ATTR(eeh_mode, mode, "0x%x");
54EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x"); 54EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x");
55EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x"); 55EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
56EEH_SHOW_ATTR(eeh_check_count, check_count, "%d" );
57EEH_SHOW_ATTR(eeh_freeze_count, freeze_count, "%d" );
58EEH_SHOW_ATTR(eeh_false_positives, false_positives, "%d" );
59 56
60void eeh_sysfs_add_device(struct pci_dev *pdev) 57void eeh_sysfs_add_device(struct pci_dev *pdev)
61{ 58{
@@ -64,9 +61,6 @@ void eeh_sysfs_add_device(struct pci_dev *pdev)
64 rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); 61 rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
65 rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); 62 rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
66 rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); 63 rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
67 rc += device_create_file(&pdev->dev, &dev_attr_eeh_check_count);
68 rc += device_create_file(&pdev->dev, &dev_attr_eeh_false_positives);
69 rc += device_create_file(&pdev->dev, &dev_attr_eeh_freeze_count);
70 64
71 if (rc) 65 if (rc)
72 printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); 66 printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
@@ -77,8 +71,5 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev)
77 device_remove_file(&pdev->dev, &dev_attr_eeh_mode); 71 device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
78 device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); 72 device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
79 device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); 73 device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
80 device_remove_file(&pdev->dev, &dev_attr_eeh_check_count);
81 device_remove_file(&pdev->dev, &dev_attr_eeh_false_positives);
82 device_remove_file(&pdev->dev, &dev_attr_eeh_freeze_count);
83} 74}
84 75
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index bca220f2873c..6153eea27ce7 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/mm.h> 30#include <linux/mm.h>
31#include <linux/memblock.h>
31#include <linux/spinlock.h> 32#include <linux/spinlock.h>
32#include <linux/sched.h> /* for show_stack */ 33#include <linux/sched.h> /* for show_stack */
33#include <linux/string.h> 34#include <linux/string.h>
@@ -41,7 +42,6 @@
41#include <asm/iommu.h> 42#include <asm/iommu.h>
42#include <asm/pci-bridge.h> 43#include <asm/pci-bridge.h>
43#include <asm/machdep.h> 44#include <asm/machdep.h>
44#include <asm/abs_addr.h>
45#include <asm/pSeries_reconfig.h> 45#include <asm/pSeries_reconfig.h>
46#include <asm/firmware.h> 46#include <asm/firmware.h>
47#include <asm/tce.h> 47#include <asm/tce.h>
@@ -99,7 +99,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
99 99
100 while (npages--) { 100 while (npages--) {
101 /* can't move this out since we might cross MEMBLOCK boundary */ 101 /* can't move this out since we might cross MEMBLOCK boundary */
102 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 102 rpn = __pa(uaddr) >> TCE_SHIFT;
103 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; 103 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
104 104
105 uaddr += TCE_PAGE_SIZE; 105 uaddr += TCE_PAGE_SIZE;
@@ -148,7 +148,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
148 int ret = 0; 148 int ret = 0;
149 long tcenum_start = tcenum, npages_start = npages; 149 long tcenum_start = tcenum, npages_start = npages;
150 150
151 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 151 rpn = __pa(uaddr) >> TCE_SHIFT;
152 proto_tce = TCE_PCI_READ; 152 proto_tce = TCE_PCI_READ;
153 if (direction != DMA_TO_DEVICE) 153 if (direction != DMA_TO_DEVICE)
154 proto_tce |= TCE_PCI_WRITE; 154 proto_tce |= TCE_PCI_WRITE;
@@ -217,7 +217,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
217 __get_cpu_var(tce_page) = tcep; 217 __get_cpu_var(tce_page) = tcep;
218 } 218 }
219 219
220 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; 220 rpn = __pa(uaddr) >> TCE_SHIFT;
221 proto_tce = TCE_PCI_READ; 221 proto_tce = TCE_PCI_READ;
222 if (direction != DMA_TO_DEVICE) 222 if (direction != DMA_TO_DEVICE)
223 proto_tce |= TCE_PCI_WRITE; 223 proto_tce |= TCE_PCI_WRITE;
@@ -237,7 +237,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
237 237
238 rc = plpar_tce_put_indirect((u64)tbl->it_index, 238 rc = plpar_tce_put_indirect((u64)tbl->it_index,
239 (u64)tcenum << 12, 239 (u64)tcenum << 12,
240 (u64)virt_to_abs(tcep), 240 (u64)__pa(tcep),
241 limit); 241 limit);
242 242
243 npages -= limit; 243 npages -= limit;
@@ -441,7 +441,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
441 441
442 rc = plpar_tce_put_indirect(liobn, 442 rc = plpar_tce_put_indirect(liobn,
443 dma_offset, 443 dma_offset,
444 (u64)virt_to_abs(tcep), 444 (u64)__pa(tcep),
445 limit); 445 limit);
446 446
447 num_tce -= limit; 447 num_tce -= limit;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 5f3ef876ded2..0da39fed355a 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -31,7 +31,6 @@
31#include <asm/page.h> 31#include <asm/page.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/machdep.h> 33#include <asm/machdep.h>
34#include <asm/abs_addr.h>
35#include <asm/mmu_context.h> 34#include <asm/mmu_context.h>
36#include <asm/iommu.h> 35#include <asm/iommu.h>
37#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
@@ -108,9 +107,9 @@ void vpa_init(int cpu)
108} 107}
109 108
110static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 109static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
111 unsigned long va, unsigned long pa, 110 unsigned long vpn, unsigned long pa,
112 unsigned long rflags, unsigned long vflags, 111 unsigned long rflags, unsigned long vflags,
113 int psize, int ssize) 112 int psize, int ssize)
114{ 113{
115 unsigned long lpar_rc; 114 unsigned long lpar_rc;
116 unsigned long flags; 115 unsigned long flags;
@@ -118,11 +117,11 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
118 unsigned long hpte_v, hpte_r; 117 unsigned long hpte_v, hpte_r;
119 118
120 if (!(vflags & HPTE_V_BOLTED)) 119 if (!(vflags & HPTE_V_BOLTED))
121 pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " 120 pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
122 "rflags=%lx, vflags=%lx, psize=%d)\n", 121 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
123 hpte_group, va, pa, rflags, vflags, psize); 122 hpte_group, vpn, pa, rflags, vflags, psize);
124 123
125 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 124 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
126 hpte_r = hpte_encode_r(pa, psize) | rflags; 125 hpte_r = hpte_encode_r(pa, psize) | rflags;
127 126
128 if (!(vflags & HPTE_V_BOLTED)) 127 if (!(vflags & HPTE_V_BOLTED))
@@ -227,22 +226,6 @@ static void pSeries_lpar_hptab_clear(void)
227} 226}
228 227
229/* 228/*
230 * This computes the AVPN and B fields of the first dword of a HPTE,
231 * for use when we want to match an existing PTE. The bottom 7 bits
232 * of the returned value are zero.
233 */
234static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
235 int ssize)
236{
237 unsigned long v;
238
239 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
240 v <<= HPTE_V_AVPN_SHIFT;
241 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
242 return v;
243}
244
245/*
246 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and 229 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
247 * the low 3 bits of flags happen to line up. So no transform is needed. 230 * the low 3 bits of flags happen to line up. So no transform is needed.
248 * We can probably optimize here and assume the high bits of newpp are 231 * We can probably optimize here and assume the high bits of newpp are
@@ -250,14 +233,14 @@ static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
250 */ 233 */
251static long pSeries_lpar_hpte_updatepp(unsigned long slot, 234static long pSeries_lpar_hpte_updatepp(unsigned long slot,
252 unsigned long newpp, 235 unsigned long newpp,
253 unsigned long va, 236 unsigned long vpn,
254 int psize, int ssize, int local) 237 int psize, int ssize, int local)
255{ 238{
256 unsigned long lpar_rc; 239 unsigned long lpar_rc;
257 unsigned long flags = (newpp & 7) | H_AVPN; 240 unsigned long flags = (newpp & 7) | H_AVPN;
258 unsigned long want_v; 241 unsigned long want_v;
259 242
260 want_v = hpte_encode_avpn(va, psize, ssize); 243 want_v = hpte_encode_avpn(vpn, psize, ssize);
261 244
262 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", 245 pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
263 want_v, slot, flags, psize); 246 want_v, slot, flags, psize);
@@ -295,15 +278,15 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
295 return dword0; 278 return dword0;
296} 279}
297 280
298static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize) 281static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
299{ 282{
300 unsigned long hash; 283 unsigned long hash;
301 unsigned long i; 284 unsigned long i;
302 long slot; 285 long slot;
303 unsigned long want_v, hpte_v; 286 unsigned long want_v, hpte_v;
304 287
305 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); 288 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
306 want_v = hpte_encode_avpn(va, psize, ssize); 289 want_v = hpte_encode_avpn(vpn, psize, ssize);
307 290
308 /* Bolted entries are always in the primary group */ 291 /* Bolted entries are always in the primary group */
309 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 292 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -323,12 +306,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
323 unsigned long ea, 306 unsigned long ea,
324 int psize, int ssize) 307 int psize, int ssize)
325{ 308{
326 unsigned long lpar_rc, slot, vsid, va, flags; 309 unsigned long vpn;
310 unsigned long lpar_rc, slot, vsid, flags;
327 311
328 vsid = get_kernel_vsid(ea, ssize); 312 vsid = get_kernel_vsid(ea, ssize);
329 va = hpt_va(ea, vsid, ssize); 313 vpn = hpt_vpn(ea, vsid, ssize);
330 314
331 slot = pSeries_lpar_hpte_find(va, psize, ssize); 315 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
332 BUG_ON(slot == -1); 316 BUG_ON(slot == -1);
333 317
334 flags = newpp & 7; 318 flags = newpp & 7;
@@ -337,17 +321,17 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
337 BUG_ON(lpar_rc != H_SUCCESS); 321 BUG_ON(lpar_rc != H_SUCCESS);
338} 322}
339 323
340static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, 324static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
341 int psize, int ssize, int local) 325 int psize, int ssize, int local)
342{ 326{
343 unsigned long want_v; 327 unsigned long want_v;
344 unsigned long lpar_rc; 328 unsigned long lpar_rc;
345 unsigned long dummy1, dummy2; 329 unsigned long dummy1, dummy2;
346 330
347 pr_devel(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", 331 pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
348 slot, va, psize, local); 332 slot, vpn, psize, local);
349 333
350 want_v = hpte_encode_avpn(va, psize, ssize); 334 want_v = hpte_encode_avpn(vpn, psize, ssize);
351 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); 335 lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
352 if (lpar_rc == H_NOT_FOUND) 336 if (lpar_rc == H_NOT_FOUND)
353 return; 337 return;
@@ -358,15 +342,16 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
358static void pSeries_lpar_hpte_removebolted(unsigned long ea, 342static void pSeries_lpar_hpte_removebolted(unsigned long ea,
359 int psize, int ssize) 343 int psize, int ssize)
360{ 344{
361 unsigned long slot, vsid, va; 345 unsigned long vpn;
346 unsigned long slot, vsid;
362 347
363 vsid = get_kernel_vsid(ea, ssize); 348 vsid = get_kernel_vsid(ea, ssize);
364 va = hpt_va(ea, vsid, ssize); 349 vpn = hpt_vpn(ea, vsid, ssize);
365 350
366 slot = pSeries_lpar_hpte_find(va, psize, ssize); 351 slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
367 BUG_ON(slot == -1); 352 BUG_ON(slot == -1);
368 353
369 pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0); 354 pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0);
370} 355}
371 356
372/* Flag bits for H_BULK_REMOVE */ 357/* Flag bits for H_BULK_REMOVE */
@@ -382,12 +367,12 @@ static void pSeries_lpar_hpte_removebolted(unsigned long ea,
382 */ 367 */
383static void pSeries_lpar_flush_hash_range(unsigned long number, int local) 368static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
384{ 369{
370 unsigned long vpn;
385 unsigned long i, pix, rc; 371 unsigned long i, pix, rc;
386 unsigned long flags = 0; 372 unsigned long flags = 0;
387 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 373 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
388 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 374 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
389 unsigned long param[9]; 375 unsigned long param[9];
390 unsigned long va;
391 unsigned long hash, index, shift, hidx, slot; 376 unsigned long hash, index, shift, hidx, slot;
392 real_pte_t pte; 377 real_pte_t pte;
393 int psize, ssize; 378 int psize, ssize;
@@ -399,21 +384,21 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
399 ssize = batch->ssize; 384 ssize = batch->ssize;
400 pix = 0; 385 pix = 0;
401 for (i = 0; i < number; i++) { 386 for (i = 0; i < number; i++) {
402 va = batch->vaddr[i]; 387 vpn = batch->vpn[i];
403 pte = batch->pte[i]; 388 pte = batch->pte[i];
404 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 389 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
405 hash = hpt_hash(va, shift, ssize); 390 hash = hpt_hash(vpn, shift, ssize);
406 hidx = __rpte_to_hidx(pte, index); 391 hidx = __rpte_to_hidx(pte, index);
407 if (hidx & _PTEIDX_SECONDARY) 392 if (hidx & _PTEIDX_SECONDARY)
408 hash = ~hash; 393 hash = ~hash;
409 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 394 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
410 slot += hidx & _PTEIDX_GROUP_IX; 395 slot += hidx & _PTEIDX_GROUP_IX;
411 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { 396 if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
412 pSeries_lpar_hpte_invalidate(slot, va, psize, 397 pSeries_lpar_hpte_invalidate(slot, vpn, psize,
413 ssize, local); 398 ssize, local);
414 } else { 399 } else {
415 param[pix] = HBR_REQUEST | HBR_AVPN | slot; 400 param[pix] = HBR_REQUEST | HBR_AVPN | slot;
416 param[pix+1] = hpte_encode_avpn(va, psize, 401 param[pix+1] = hpte_encode_avpn(vpn, psize,
417 ssize); 402 ssize);
418 pix += 2; 403 pix += 2;
419 if (pix == 8) { 404 if (pix == 8) {
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 109fdb75578d..d19f4977c834 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -210,6 +210,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
210static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) 210static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
211{ 211{
212 struct device_node *dn; 212 struct device_node *dn;
213 struct eeh_dev *edev;
213 214
214 /* Found our PE and assume 8 at that point. */ 215 /* Found our PE and assume 8 at that point. */
215 216
@@ -217,7 +218,10 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
217 if (!dn) 218 if (!dn)
218 return NULL; 219 return NULL;
219 220
220 dn = eeh_find_device_pe(dn); 221 /* Get the top level device in the PE */
222 edev = of_node_to_eeh_dev(dn);
223 edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
224 dn = eeh_dev_to_of_node(edev);
221 if (!dn) 225 if (!dn)
222 return NULL; 226 return NULL;
223 227
@@ -387,12 +391,13 @@ static int check_msix_entries(struct pci_dev *pdev)
387 return 0; 391 return 0;
388} 392}
389 393
390static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 394static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
391{ 395{
392 struct pci_dn *pdn; 396 struct pci_dn *pdn;
393 int hwirq, virq, i, rc; 397 int hwirq, virq, i, rc;
394 struct msi_desc *entry; 398 struct msi_desc *entry;
395 struct msi_msg msg; 399 struct msi_msg msg;
400 int nvec = nvec_in;
396 401
397 pdn = get_pdn(pdev); 402 pdn = get_pdn(pdev);
398 if (!pdn) 403 if (!pdn)
@@ -402,10 +407,23 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
402 return -EINVAL; 407 return -EINVAL;
403 408
404 /* 409 /*
410 * Firmware currently refuse any non power of two allocation
411 * so we round up if the quota will allow it.
412 */
413 if (type == PCI_CAP_ID_MSIX) {
414 int m = roundup_pow_of_two(nvec);
415 int quota = msi_quota_for_device(pdev, m);
416
417 if (quota >= m)
418 nvec = m;
419 }
420
421 /*
405 * Try the new more explicit firmware interface, if that fails fall 422 * Try the new more explicit firmware interface, if that fails fall
406 * back to the old interface. The old interface is known to never 423 * back to the old interface. The old interface is known to never
407 * return MSI-Xs. 424 * return MSI-Xs.
408 */ 425 */
426again:
409 if (type == PCI_CAP_ID_MSI) { 427 if (type == PCI_CAP_ID_MSI) {
410 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); 428 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
411 429
@@ -417,6 +435,10 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
417 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); 435 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
418 436
419 if (rc != nvec) { 437 if (rc != nvec) {
438 if (nvec != nvec_in) {
439 nvec = nvec_in;
440 goto again;
441 }
420 pr_debug("rtas_msi: rtas_change_msi() failed\n"); 442 pr_debug("rtas_msi: rtas_change_msi() failed\n");
421 return rc; 443 return rc;
422 } 444 }
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 2c6ded29f73d..56b864d777ee 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -73,7 +73,7 @@ void __init pSeries_final_fixup(void)
73{ 73{
74 pSeries_request_regions(); 74 pSeries_request_regions();
75 75
76 pci_addr_cache_build(); 76 eeh_addr_cache_build();
77} 77}
78 78
79/* 79/*
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 3ccebc83dc02..261a577a3dd2 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -65,27 +65,43 @@ pcibios_find_pci_bus(struct device_node *dn)
65EXPORT_SYMBOL_GPL(pcibios_find_pci_bus); 65EXPORT_SYMBOL_GPL(pcibios_find_pci_bus);
66 66
67/** 67/**
68 * pcibios_remove_pci_devices - remove all devices under this bus 68 * __pcibios_remove_pci_devices - remove all devices under this bus
69 * @bus: the indicated PCI bus
70 * @purge_pe: destroy the PE on removal of PCI devices
69 * 71 *
70 * Remove all of the PCI devices under this bus both from the 72 * Remove all of the PCI devices under this bus both from the
71 * linux pci device tree, and from the powerpc EEH address cache. 73 * linux pci device tree, and from the powerpc EEH address cache.
74 * By default, the corresponding PE will be destroied during the
75 * normal PCI hotplug path. For PCI hotplug during EEH recovery,
76 * the corresponding PE won't be destroied and deallocated.
72 */ 77 */
73void pcibios_remove_pci_devices(struct pci_bus *bus) 78void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe)
74{ 79{
75 struct pci_dev *dev, *tmp; 80 struct pci_dev *dev, *tmp;
76 struct pci_bus *child_bus; 81 struct pci_bus *child_bus;
77 82
78 /* First go down child busses */ 83 /* First go down child busses */
79 list_for_each_entry(child_bus, &bus->children, node) 84 list_for_each_entry(child_bus, &bus->children, node)
80 pcibios_remove_pci_devices(child_bus); 85 __pcibios_remove_pci_devices(child_bus, purge_pe);
81 86
82 pr_debug("PCI: Removing devices on bus %04x:%02x\n", 87 pr_debug("PCI: Removing devices on bus %04x:%02x\n",
83 pci_domain_nr(bus), bus->number); 88 pci_domain_nr(bus), bus->number);
84 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { 89 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
85 pr_debug(" * Removing %s...\n", pci_name(dev)); 90 pr_debug(" * Removing %s...\n", pci_name(dev));
86 eeh_remove_bus_device(dev); 91 eeh_remove_bus_device(dev, purge_pe);
87 pci_stop_and_remove_bus_device(dev); 92 pci_stop_and_remove_bus_device(dev);
88 } 93 }
94}
95
96/**
97 * pcibios_remove_pci_devices - remove all devices under this bus
98 *
99 * Remove all of the PCI devices under this bus both from the
100 * linux pci device tree, and from the powerpc EEH address cache.
101 */
102void pcibios_remove_pci_devices(struct pci_bus *bus)
103{
104 __pcibios_remove_pci_devices(bus, 1);
89} 105}
90EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); 106EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
91 107
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 51ecac920dd8..e3cb7ae61658 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -388,10 +388,8 @@ static void __init pSeries_setup_arch(void)
388 388
389 /* Find and initialize PCI host bridges */ 389 /* Find and initialize PCI host bridges */
390 init_pci_config_tokens(); 390 init_pci_config_tokens();
391 eeh_pseries_init();
392 find_and_init_phbs(); 391 find_and_init_phbs();
393 pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb); 392 pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
394 eeh_init();
395 393
396 pSeries_nvram_init(); 394 pSeries_nvram_init();
397 395
@@ -416,16 +414,20 @@ static int __init pSeries_init_panel(void)
416} 414}
417machine_arch_initcall(pseries, pSeries_init_panel); 415machine_arch_initcall(pseries, pSeries_init_panel);
418 416
419static int pseries_set_dabr(unsigned long dabr) 417static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx)
420{ 418{
421 return plpar_hcall_norets(H_SET_DABR, dabr); 419 return plpar_hcall_norets(H_SET_DABR, dabr);
422} 420}
423 421
424static int pseries_set_xdabr(unsigned long dabr) 422static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
425{ 423{
426 /* We want to catch accesses from kernel and userspace */ 424 /* Have to set at least one bit in the DABRX according to PAPR */
427 return plpar_hcall_norets(H_SET_XDABR, dabr, 425 if (dabrx == 0 && dabr == 0)
428 H_DABRX_KERNEL | H_DABRX_USER); 426 dabrx = DABRX_USER;
427 /* PAPR says we can only set kernel and user bits */
428 dabrx &= DABRX_KERNEL | DABRX_USER;
429
430 return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
429} 431}
430 432
431#define CMO_CHARACTERISTICS_TOKEN 44 433#define CMO_CHARACTERISTICS_TOKEN 44
@@ -529,10 +531,10 @@ static void __init pSeries_init_early(void)
529 if (firmware_has_feature(FW_FEATURE_LPAR)) 531 if (firmware_has_feature(FW_FEATURE_LPAR))
530 hvc_vio_init_early(); 532 hvc_vio_init_early();
531#endif 533#endif
532 if (firmware_has_feature(FW_FEATURE_DABR)) 534 if (firmware_has_feature(FW_FEATURE_XDABR))
533 ppc_md.set_dabr = pseries_set_dabr;
534 else if (firmware_has_feature(FW_FEATURE_XDABR))
535 ppc_md.set_dabr = pseries_set_xdabr; 535 ppc_md.set_dabr = pseries_set_xdabr;
536 else if (firmware_has_feature(FW_FEATURE_DABR))
537 ppc_md.set_dabr = pseries_set_dabr;
536 538
537 pSeries_cmo_feature_init(); 539 pSeries_cmo_feature_init();
538 iommu_init_early_pSeries(); 540 iommu_init_early_pSeries();
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 1bd7ecb24620..a57600b3a4e3 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_PPC_DCR_NATIVE) += dcr-low.o
15obj-$(CONFIG_PPC_PMI) += pmi.o 15obj-$(CONFIG_PPC_PMI) += pmi.o
16obj-$(CONFIG_U3_DART) += dart_iommu.o 16obj-$(CONFIG_U3_DART) += dart_iommu.o
17obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o 17obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
18obj-$(CONFIG_FSL_SOC) += fsl_soc.o 18obj-$(CONFIG_FSL_SOC) += fsl_soc.o fsl_mpic_err.o
19obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y) 19obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y)
20obj-$(CONFIG_FSL_PMC) += fsl_pmc.o 20obj-$(CONFIG_FSL_PMC) += fsl_pmc.o
21obj-$(CONFIG_FSL_LBC) += fsl_lbc.o 21obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 4f2680f431b5..bd968a43a48b 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -43,7 +43,6 @@
43#include <asm/iommu.h> 43#include <asm/iommu.h>
44#include <asm/pci-bridge.h> 44#include <asm/pci-bridge.h>
45#include <asm/machdep.h> 45#include <asm/machdep.h>
46#include <asm/abs_addr.h>
47#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
48#include <asm/ppc-pci.h> 47#include <asm/ppc-pci.h>
49 48
@@ -74,11 +73,16 @@ static int dart_is_u4;
74 73
75#define DBG(...) 74#define DBG(...)
76 75
76static DEFINE_SPINLOCK(invalidate_lock);
77
77static inline void dart_tlb_invalidate_all(void) 78static inline void dart_tlb_invalidate_all(void)
78{ 79{
79 unsigned long l = 0; 80 unsigned long l = 0;
80 unsigned int reg, inv_bit; 81 unsigned int reg, inv_bit;
81 unsigned long limit; 82 unsigned long limit;
83 unsigned long flags;
84
85 spin_lock_irqsave(&invalidate_lock, flags);
82 86
83 DBG("dart: flush\n"); 87 DBG("dart: flush\n");
84 88
@@ -111,12 +115,17 @@ retry:
111 panic("DART: TLB did not flush after waiting a long " 115 panic("DART: TLB did not flush after waiting a long "
112 "time. Buggy U3 ?"); 116 "time. Buggy U3 ?");
113 } 117 }
118
119 spin_unlock_irqrestore(&invalidate_lock, flags);
114} 120}
115 121
116static inline void dart_tlb_invalidate_one(unsigned long bus_rpn) 122static inline void dart_tlb_invalidate_one(unsigned long bus_rpn)
117{ 123{
118 unsigned int reg; 124 unsigned int reg;
119 unsigned int l, limit; 125 unsigned int l, limit;
126 unsigned long flags;
127
128 spin_lock_irqsave(&invalidate_lock, flags);
120 129
121 reg = DART_CNTL_U4_ENABLE | DART_CNTL_U4_IONE | 130 reg = DART_CNTL_U4_ENABLE | DART_CNTL_U4_IONE |
122 (bus_rpn & DART_CNTL_U4_IONE_MASK); 131 (bus_rpn & DART_CNTL_U4_IONE_MASK);
@@ -138,6 +147,8 @@ wait_more:
138 panic("DART: TLB did not flush after waiting a long " 147 panic("DART: TLB did not flush after waiting a long "
139 "time. Buggy U4 ?"); 148 "time. Buggy U4 ?");
140 } 149 }
150
151 spin_unlock_irqrestore(&invalidate_lock, flags);
141} 152}
142 153
143static void dart_flush(struct iommu_table *tbl) 154static void dart_flush(struct iommu_table *tbl)
@@ -167,7 +178,7 @@ static int dart_build(struct iommu_table *tbl, long index,
167 */ 178 */
168 l = npages; 179 l = npages;
169 while (l--) { 180 while (l--) {
170 rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT; 181 rpn = __pa(uaddr) >> DART_PAGE_SHIFT;
171 182
172 *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); 183 *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
173 184
@@ -244,7 +255,7 @@ static int __init dart_init(struct device_node *dart_node)
244 panic("DART: Cannot map registers!"); 255 panic("DART: Cannot map registers!");
245 256
246 /* Map in DART table */ 257 /* Map in DART table */
247 dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize); 258 dart_vbase = ioremap(__pa(dart_tablebase), dart_tablesize);
248 259
249 /* Fill initial table */ 260 /* Fill initial table */
250 for (i = 0; i < dart_tablesize/4; i++) 261 for (i = 0; i < dart_tablesize/4; i++)
@@ -463,7 +474,7 @@ void __init alloc_dart_table(void)
463 * will blow up an entire large page anyway in the kernel mapping 474 * will blow up an entire large page anyway in the kernel mapping
464 */ 475 */
465 dart_tablebase = (unsigned long) 476 dart_tablebase = (unsigned long)
466 abs_to_virt(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); 477 __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
467 478
468 printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); 479 printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
469} 480}
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
index 68ac3aacb191..d131c8a1cb15 100644
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -193,6 +193,16 @@ static struct of_device_id mpc85xx_l2ctlr_of_match[] = {
193 { 193 {
194 .compatible = "fsl,mpc8548-l2-cache-controller", 194 .compatible = "fsl,mpc8548-l2-cache-controller",
195 }, 195 },
196 { .compatible = "fsl,mpc8544-l2-cache-controller",},
197 { .compatible = "fsl,mpc8572-l2-cache-controller",},
198 { .compatible = "fsl,mpc8536-l2-cache-controller",},
199 { .compatible = "fsl,p1021-l2-cache-controller",},
200 { .compatible = "fsl,p1012-l2-cache-controller",},
201 { .compatible = "fsl,p1025-l2-cache-controller",},
202 { .compatible = "fsl,p1016-l2-cache-controller",},
203 { .compatible = "fsl,p1024-l2-cache-controller",},
204 { .compatible = "fsl,p1015-l2-cache-controller",},
205 { .compatible = "fsl,p1010-l2-cache-controller",},
196 {}, 206 {},
197}; 207};
198 208
diff --git a/arch/powerpc/sysdev/fsl_ifc.c b/arch/powerpc/sysdev/fsl_ifc.c
index b31f19f61031..097cc9d2585b 100644
--- a/arch/powerpc/sysdev/fsl_ifc.c
+++ b/arch/powerpc/sysdev/fsl_ifc.c
@@ -244,12 +244,6 @@ static int __devinit fsl_ifc_ctrl_probe(struct platform_device *dev)
244 /* get the nand machine irq */ 244 /* get the nand machine irq */
245 fsl_ifc_ctrl_dev->nand_irq = 245 fsl_ifc_ctrl_dev->nand_irq =
246 irq_of_parse_and_map(dev->dev.of_node, 1); 246 irq_of_parse_and_map(dev->dev.of_node, 1);
247 if (fsl_ifc_ctrl_dev->nand_irq == NO_IRQ) {
248 dev_err(&dev->dev, "failed to get irq resource "
249 "for NAND Machine\n");
250 ret = -ENODEV;
251 goto err;
252 }
253 247
254 fsl_ifc_ctrl_dev->dev = &dev->dev; 248 fsl_ifc_ctrl_dev->dev = &dev->dev;
255 249
@@ -267,12 +261,14 @@ static int __devinit fsl_ifc_ctrl_probe(struct platform_device *dev)
267 goto err_irq; 261 goto err_irq;
268 } 262 }
269 263
270 ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq, 0, 264 if (fsl_ifc_ctrl_dev->nand_irq) {
271 "fsl-ifc-nand", fsl_ifc_ctrl_dev); 265 ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq,
272 if (ret != 0) { 266 0, "fsl-ifc-nand", fsl_ifc_ctrl_dev);
273 dev_err(&dev->dev, "failed to install irq (%d)\n", 267 if (ret != 0) {
274 fsl_ifc_ctrl_dev->nand_irq); 268 dev_err(&dev->dev, "failed to install irq (%d)\n",
275 goto err_nandirq; 269 fsl_ifc_ctrl_dev->nand_irq);
270 goto err_nandirq;
271 }
276 } 272 }
277 273
278 return 0; 274 return 0;
diff --git a/arch/powerpc/sysdev/fsl_mpic_err.c b/arch/powerpc/sysdev/fsl_mpic_err.c
new file mode 100644
index 000000000000..b83f32562a37
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_mpic_err.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright (C) 2012 Freescale Semiconductor, Inc.
3 *
4 * Author: Varun Sethi <varun.sethi@freescale.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2 of the
9 * License.
10 *
11 */
12
13#include <linux/irq.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h>
16
17#include <asm/io.h>
18#include <asm/irq.h>
19#include <asm/mpic.h>
20
21#include "mpic.h"
22
23#define MPIC_ERR_INT_BASE 0x3900
24#define MPIC_ERR_INT_EISR 0x0000
25#define MPIC_ERR_INT_EIMR 0x0010
26
27static inline u32 mpic_fsl_err_read(u32 __iomem *base, unsigned int err_reg)
28{
29 return in_be32(base + (err_reg >> 2));
30}
31
32static inline void mpic_fsl_err_write(u32 __iomem *base, u32 value)
33{
34 out_be32(base + (MPIC_ERR_INT_EIMR >> 2), value);
35}
36
37static void fsl_mpic_mask_err(struct irq_data *d)
38{
39 u32 eimr;
40 struct mpic *mpic = irq_data_get_irq_chip_data(d);
41 unsigned int src = virq_to_hw(d->irq) - mpic->err_int_vecs[0];
42
43 eimr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EIMR);
44 eimr |= (1 << (31 - src));
45 mpic_fsl_err_write(mpic->err_regs, eimr);
46}
47
48static void fsl_mpic_unmask_err(struct irq_data *d)
49{
50 u32 eimr;
51 struct mpic *mpic = irq_data_get_irq_chip_data(d);
52 unsigned int src = virq_to_hw(d->irq) - mpic->err_int_vecs[0];
53
54 eimr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EIMR);
55 eimr &= ~(1 << (31 - src));
56 mpic_fsl_err_write(mpic->err_regs, eimr);
57}
58
59static struct irq_chip fsl_mpic_err_chip = {
60 .irq_disable = fsl_mpic_mask_err,
61 .irq_mask = fsl_mpic_mask_err,
62 .irq_unmask = fsl_mpic_unmask_err,
63};
64
65int mpic_setup_error_int(struct mpic *mpic, int intvec)
66{
67 int i;
68
69 mpic->err_regs = ioremap(mpic->paddr + MPIC_ERR_INT_BASE, 0x1000);
70 if (!mpic->err_regs) {
71 pr_err("could not map mpic error registers\n");
72 return -ENOMEM;
73 }
74 mpic->hc_err = fsl_mpic_err_chip;
75 mpic->hc_err.name = mpic->name;
76 mpic->flags |= MPIC_FSL_HAS_EIMR;
77 /* allocate interrupt vectors for error interrupts */
78 for (i = MPIC_MAX_ERR - 1; i >= 0; i--)
79 mpic->err_int_vecs[i] = --intvec;
80
81 return 0;
82}
83
84int mpic_map_error_int(struct mpic *mpic, unsigned int virq, irq_hw_number_t hw)
85{
86 if ((mpic->flags & MPIC_FSL_HAS_EIMR) &&
87 (hw >= mpic->err_int_vecs[0] &&
88 hw <= mpic->err_int_vecs[MPIC_MAX_ERR - 1])) {
89 WARN_ON(mpic->flags & MPIC_SECONDARY);
90
91 pr_debug("mpic: mapping as Error Interrupt\n");
92 irq_set_chip_data(virq, mpic);
93 irq_set_chip_and_handler(virq, &mpic->hc_err,
94 handle_level_irq);
95 return 1;
96 }
97
98 return 0;
99}
100
101static irqreturn_t fsl_error_int_handler(int irq, void *data)
102{
103 struct mpic *mpic = (struct mpic *) data;
104 u32 eisr, eimr;
105 int errint;
106 unsigned int cascade_irq;
107
108 eisr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EISR);
109 eimr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EIMR);
110
111 if (!(eisr & ~eimr))
112 return IRQ_NONE;
113
114 while (eisr) {
115 errint = __builtin_clz(eisr);
116 cascade_irq = irq_linear_revmap(mpic->irqhost,
117 mpic->err_int_vecs[errint]);
118 WARN_ON(cascade_irq == NO_IRQ);
119 if (cascade_irq != NO_IRQ) {
120 generic_handle_irq(cascade_irq);
121 } else {
122 eimr |= 1 << (31 - errint);
123 mpic_fsl_err_write(mpic->err_regs, eimr);
124 }
125 eisr &= ~(1 << (31 - errint));
126 }
127
128 return IRQ_HANDLED;
129}
130
131void mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum)
132{
133 unsigned int virq;
134 int ret;
135
136 virq = irq_create_mapping(mpic->irqhost, irqnum);
137 if (virq == NO_IRQ) {
138 pr_err("Error interrupt setup failed\n");
139 return;
140 }
141
142 /* Mask all error interrupts */
143 mpic_fsl_err_write(mpic->err_regs, ~0);
144
145 ret = request_irq(virq, fsl_error_int_handler, IRQF_NO_THREAD,
146 "mpic-error-int", mpic);
147 if (ret)
148 pr_err("Failed to register error interrupt handler\n");
149}
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index c37f46136321..ffb93ae9379b 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -38,15 +38,15 @@ static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
38 38
39static void __devinit quirk_fsl_pcie_header(struct pci_dev *dev) 39static void __devinit quirk_fsl_pcie_header(struct pci_dev *dev)
40{ 40{
41 u8 progif; 41 u8 hdr_type;
42 42
43 /* if we aren't a PCIe don't bother */ 43 /* if we aren't a PCIe don't bother */
44 if (!pci_find_capability(dev, PCI_CAP_ID_EXP)) 44 if (!pci_find_capability(dev, PCI_CAP_ID_EXP))
45 return; 45 return;
46 46
47 /* if we aren't in host mode don't bother */ 47 /* if we aren't in host mode don't bother */
48 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 48 pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
49 if (progif & 0x1) 49 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
50 return; 50 return;
51 51
52 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 52 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
@@ -143,18 +143,20 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
143 pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", 143 pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
144 (u64)rsrc->start, (u64)resource_size(rsrc)); 144 (u64)rsrc->start, (u64)resource_size(rsrc));
145 145
146 if (of_device_is_compatible(hose->dn, "fsl,qoriq-pcie-v2.2")) {
147 win_idx = 2;
148 start_idx = 0;
149 end_idx = 3;
150 }
151
152 pci = ioremap(rsrc->start, resource_size(rsrc)); 146 pci = ioremap(rsrc->start, resource_size(rsrc));
153 if (!pci) { 147 if (!pci) {
154 dev_err(hose->parent, "Unable to map ATMU registers\n"); 148 dev_err(hose->parent, "Unable to map ATMU registers\n");
155 return; 149 return;
156 } 150 }
157 151
152 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
153 if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
154 win_idx = 2;
155 start_idx = 0;
156 end_idx = 3;
157 }
158 }
159
158 /* Disable all windows (except powar0 since it's ignored) */ 160 /* Disable all windows (except powar0 since it's ignored) */
159 for(i = 1; i < 5; i++) 161 for(i = 1; i < 5; i++)
160 out_be32(&pci->pow[i].powar, 0); 162 out_be32(&pci->pow[i].powar, 0);
@@ -425,7 +427,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
425 struct pci_controller *hose; 427 struct pci_controller *hose;
426 struct resource rsrc; 428 struct resource rsrc;
427 const int *bus_range; 429 const int *bus_range;
428 u8 progif; 430 u8 hdr_type, progif;
429 431
430 if (!of_device_is_available(dev)) { 432 if (!of_device_is_available(dev)) {
431 pr_warning("%s: disabled\n", dev->full_name); 433 pr_warning("%s: disabled\n", dev->full_name);
@@ -457,15 +459,17 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
457 setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, 459 setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
458 PPC_INDIRECT_TYPE_BIG_ENDIAN); 460 PPC_INDIRECT_TYPE_BIG_ENDIAN);
459 461
460 early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif); 462 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
461 if ((progif & 1) == 1) { 463 /* For PCIE read HEADER_TYPE to identify controler mode */
462 /* unmap cfg_data & cfg_addr separately if not on same page */ 464 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
463 if (((unsigned long)hose->cfg_data & PAGE_MASK) != 465 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
464 ((unsigned long)hose->cfg_addr & PAGE_MASK)) 466 goto no_bridge;
465 iounmap(hose->cfg_data); 467
466 iounmap(hose->cfg_addr); 468 } else {
467 pcibios_free_controller(hose); 469 /* For PCI read PROG to identify controller mode */
468 return -ENODEV; 470 early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
471 if ((progif & 1) == 1)
472 goto no_bridge;
469 } 473 }
470 474
471 setup_pci_cmd(hose); 475 setup_pci_cmd(hose);
@@ -494,6 +498,15 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
494 setup_pci_atmu(hose, &rsrc); 498 setup_pci_atmu(hose, &rsrc);
495 499
496 return 0; 500 return 0;
501
502no_bridge:
503 /* unmap cfg_data & cfg_addr separately if not on same page */
504 if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
505 ((unsigned long)hose->cfg_addr & PAGE_MASK))
506 iounmap(hose->cfg_data);
507 iounmap(hose->cfg_addr);
508 pcibios_free_controller(hose);
509 return -ENODEV;
497} 510}
498#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ 511#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
499 512
@@ -818,6 +831,7 @@ static const struct of_device_id pci_ids[] = {
818 { .compatible = "fsl,p1010-pcie", }, 831 { .compatible = "fsl,p1010-pcie", },
819 { .compatible = "fsl,p1023-pcie", }, 832 { .compatible = "fsl,p1023-pcie", },
820 { .compatible = "fsl,p4080-pcie", }, 833 { .compatible = "fsl,p4080-pcie", },
834 { .compatible = "fsl,qoriq-pcie-v2.4", },
821 { .compatible = "fsl,qoriq-pcie-v2.3", }, 835 { .compatible = "fsl,qoriq-pcie-v2.3", },
822 { .compatible = "fsl,qoriq-pcie-v2.2", }, 836 { .compatible = "fsl,qoriq-pcie-v2.2", },
823 {}, 837 {},
@@ -825,57 +839,80 @@ static const struct of_device_id pci_ids[] = {
825 839
826struct device_node *fsl_pci_primary; 840struct device_node *fsl_pci_primary;
827 841
828void __devinit fsl_pci_init(void) 842void fsl_pci_assign_primary(void)
829{ 843{
830 int ret; 844 struct device_node *np;
831 struct device_node *node;
832 struct pci_controller *hose;
833 dma_addr_t max = 0xffffffff;
834 845
835 /* Callers can specify the primary bus using other means. */ 846 /* Callers can specify the primary bus using other means. */
836 if (!fsl_pci_primary) { 847 if (fsl_pci_primary)
837 /* If a PCI host bridge contains an ISA node, it's primary. */ 848 return;
838 node = of_find_node_by_type(NULL, "isa"); 849
839 while ((fsl_pci_primary = of_get_parent(node))) { 850 /* If a PCI host bridge contains an ISA node, it's primary. */
840 of_node_put(node); 851 np = of_find_node_by_type(NULL, "isa");
841 node = fsl_pci_primary; 852 while ((fsl_pci_primary = of_get_parent(np))) {
842 853 of_node_put(np);
843 if (of_match_node(pci_ids, node)) 854 np = fsl_pci_primary;
844 break; 855
845 } 856 if (of_match_node(pci_ids, np) && of_device_is_available(np))
857 return;
846 } 858 }
847 859
848 node = NULL; 860 /*
849 for_each_node_by_type(node, "pci") { 861 * If there's no PCI host bridge with ISA, arbitrarily
850 if (of_match_node(pci_ids, node)) { 862 * designate one as primary. This can go away once
851 /* 863 * various bugs with primary-less systems are fixed.
852 * If there's no PCI host bridge with ISA, arbitrarily 864 */
853 * designate one as primary. This can go away once 865 for_each_matching_node(np, pci_ids) {
854 * various bugs with primary-less systems are fixed. 866 if (of_device_is_available(np)) {
855 */ 867 fsl_pci_primary = np;
856 if (!fsl_pci_primary) 868 of_node_put(np);
857 fsl_pci_primary = node; 869 return;
858
859 ret = fsl_add_bridge(node, fsl_pci_primary == node);
860 if (ret == 0) {
861 hose = pci_find_hose_for_OF_device(node);
862 max = min(max, hose->dma_window_base_cur +
863 hose->dma_window_size);
864 }
865 } 870 }
866 } 871 }
872}
867 873
874static int __devinit fsl_pci_probe(struct platform_device *pdev)
875{
876 int ret;
877 struct device_node *node;
868#ifdef CONFIG_SWIOTLB 878#ifdef CONFIG_SWIOTLB
869 /* 879 struct pci_controller *hose;
870 * if we couldn't map all of DRAM via the dma windows 880#endif
871 * we need SWIOTLB to handle buffers located outside of 881
872 * dma capable memory region 882 node = pdev->dev.of_node;
873 */ 883 ret = fsl_add_bridge(node, fsl_pci_primary == node);
874 if (memblock_end_of_DRAM() - 1 > max) { 884
875 ppc_swiotlb_enable = 1; 885#ifdef CONFIG_SWIOTLB
876 set_pci_dma_ops(&swiotlb_dma_ops); 886 if (ret == 0) {
877 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; 887 hose = pci_find_hose_for_OF_device(pdev->dev.of_node);
888
889 /*
890 * if we couldn't map all of DRAM via the dma windows
891 * we need SWIOTLB to handle buffers located outside of
892 * dma capable memory region
893 */
894 if (memblock_end_of_DRAM() - 1 > hose->dma_window_base_cur +
895 hose->dma_window_size)
896 ppc_swiotlb_enable = 1;
878 } 897 }
879#endif 898#endif
899
900 mpc85xx_pci_err_probe(pdev);
901
902 return 0;
903}
904
905static struct platform_driver fsl_pci_driver = {
906 .driver = {
907 .name = "fsl-pci",
908 .of_match_table = pci_ids,
909 },
910 .probe = fsl_pci_probe,
911};
912
913static int __init fsl_pci_init(void)
914{
915 return platform_driver_register(&fsl_pci_driver);
880} 916}
917arch_initcall(fsl_pci_init);
881#endif 918#endif
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index baa0fd18289f..d078537adece 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -16,6 +16,7 @@
16 16
17#define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */ 17#define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */
18#define PCIE_LTSSM_L0 0x16 /* L0 state */ 18#define PCIE_LTSSM_L0 0x16 /* L0 state */
19#define PCIE_IP_REV_2_2 0x02080202 /* PCIE IP block version Rev2.2 */
19#define PIWAR_EN 0x80000000 /* Enable */ 20#define PIWAR_EN 0x80000000 /* Enable */
20#define PIWAR_PF 0x20000000 /* prefetch */ 21#define PIWAR_PF 0x20000000 /* prefetch */
21#define PIWAR_TGI_LOCAL 0x00f00000 /* target - local memory */ 22#define PIWAR_TGI_LOCAL 0x00f00000 /* target - local memory */
@@ -57,7 +58,9 @@ struct ccsr_pci {
57 __be32 pex_pme_mes_disr; /* 0x.024 - PCIE PME and message disable register */ 58 __be32 pex_pme_mes_disr; /* 0x.024 - PCIE PME and message disable register */
58 __be32 pex_pme_mes_ier; /* 0x.028 - PCIE PME and message interrupt enable register */ 59 __be32 pex_pme_mes_ier; /* 0x.028 - PCIE PME and message interrupt enable register */
59 __be32 pex_pmcr; /* 0x.02c - PCIE power management command register */ 60 __be32 pex_pmcr; /* 0x.02c - PCIE power management command register */
60 u8 res3[3024]; 61 u8 res3[3016];
62 __be32 block_rev1; /* 0x.bf8 - PCIE Block Revision register 1 */
63 __be32 block_rev2; /* 0x.bfc - PCIE Block Revision register 2 */
61 64
62/* PCI/PCI Express outbound window 0-4 65/* PCI/PCI Express outbound window 0-4
63 * Window 0 is the default window and is the only window enabled upon reset. 66 * Window 0 is the default window and is the only window enabled upon reset.
@@ -95,10 +98,19 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose);
95 98
96extern struct device_node *fsl_pci_primary; 99extern struct device_node *fsl_pci_primary;
97 100
98#ifdef CONFIG_FSL_PCI 101#ifdef CONFIG_PCI
99void fsl_pci_init(void); 102void fsl_pci_assign_primary(void);
100#else 103#else
101static inline void fsl_pci_init(void) {} 104static inline void fsl_pci_assign_primary(void) {}
105#endif
106
107#ifdef CONFIG_EDAC_MPC85XX
108int mpc85xx_pci_err_probe(struct platform_device *op);
109#else
110static inline int mpc85xx_pci_err_probe(struct platform_device *op)
111{
112 return -ENOTSUPP;
113}
102#endif 114#endif
103 115
104#endif /* __POWERPC_FSL_PCI_H */ 116#endif /* __POWERPC_FSL_PCI_H */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index bfc6211e5422..9c6e535daad2 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -6,7 +6,7 @@
6 * with various broken implementations of this HW. 6 * with various broken implementations of this HW.
7 * 7 *
8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9 * Copyright 2010-2011 Freescale Semiconductor, Inc. 9 * Copyright 2010-2012 Freescale Semiconductor, Inc.
10 * 10 *
11 * This file is subject to the terms and conditions of the GNU General Public 11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive 12 * License. See the file COPYING in the main directory of this archive
@@ -221,24 +221,24 @@ static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 valu
221 _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); 221 _mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
222} 222}
223 223
224static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm) 224static inline unsigned int mpic_tm_offset(struct mpic *mpic, unsigned int tm)
225{ 225{
226 unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + 226 return (tm >> 2) * MPIC_TIMER_GROUP_STRIDE +
227 ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); 227 (tm & 3) * MPIC_INFO(TIMER_STRIDE);
228}
228 229
229 if (tm >= 4) 230static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm)
230 offset += 0x1000 / 4; 231{
232 unsigned int offset = mpic_tm_offset(mpic, tm) +
233 MPIC_INFO(TIMER_VECTOR_PRI);
231 234
232 return _mpic_read(mpic->reg_type, &mpic->tmregs, offset); 235 return _mpic_read(mpic->reg_type, &mpic->tmregs, offset);
233} 236}
234 237
235static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value) 238static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value)
236{ 239{
237 unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + 240 unsigned int offset = mpic_tm_offset(mpic, tm) +
238 ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); 241 MPIC_INFO(TIMER_VECTOR_PRI);
239
240 if (tm >= 4)
241 offset += 0x1000 / 4;
242 242
243 _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value); 243 _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value);
244} 244}
@@ -1026,6 +1026,9 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq,
1026 return 0; 1026 return 0;
1027 } 1027 }
1028 1028
1029 if (mpic_map_error_int(mpic, virq, hw))
1030 return 0;
1031
1029 if (hw >= mpic->num_sources) 1032 if (hw >= mpic->num_sources)
1030 return -EINVAL; 1033 return -EINVAL;
1031 1034
@@ -1085,7 +1088,16 @@ static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct,
1085 */ 1088 */
1086 switch (intspec[2]) { 1089 switch (intspec[2]) {
1087 case 0: 1090 case 0:
1088 case 1: /* no EISR/EIMR support for now, treat as shared IRQ */ 1091 break;
1092 case 1:
1093 if (!(mpic->flags & MPIC_FSL_HAS_EIMR))
1094 break;
1095
1096 if (intspec[3] >= ARRAY_SIZE(mpic->err_int_vecs))
1097 return -EINVAL;
1098
1099 *out_hwirq = mpic->err_int_vecs[intspec[3]];
1100
1089 break; 1101 break;
1090 case 2: 1102 case 2:
1091 if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs)) 1103 if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs))
@@ -1301,6 +1313,42 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1301 mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); 1313 mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
1302 mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); 1314 mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
1303 1315
1316 if (mpic->flags & MPIC_FSL) {
1317 u32 brr1, version;
1318 int ret;
1319
1320 /*
1321 * Yes, Freescale really did put global registers in the
1322 * magic per-cpu area -- and they don't even show up in the
1323 * non-magic per-cpu copies that this driver normally uses.
1324 */
1325 mpic_map(mpic, mpic->paddr, &mpic->thiscpuregs,
1326 MPIC_CPU_THISBASE, 0x1000);
1327
1328 brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs,
1329 MPIC_FSL_BRR1);
1330 version = brr1 & MPIC_FSL_BRR1_VER;
1331
1332 /* Error interrupt mask register (EIMR) is required for
1333 * handling individual device error interrupts. EIMR
1334 * was added in MPIC version 4.1.
1335 *
1336 * Over here we reserve vector number space for error
1337 * interrupt vectors. This space is stolen from the
1338 * global vector number space, as in case of ipis
1339 * and timer interrupts.
1340 *
1341 * Available vector space = intvec_top - 12, where 12
1342 * is the number of vectors which have been consumed by
1343 * ipis and timer interrupts.
1344 */
1345 if (version >= 0x401) {
1346 ret = mpic_setup_error_int(mpic, intvec_top - 12);
1347 if (ret)
1348 return NULL;
1349 }
1350 }
1351
1304 /* Reset */ 1352 /* Reset */
1305 1353
1306 /* When using a device-node, reset requests are only honored if the MPIC 1354 /* When using a device-node, reset requests are only honored if the MPIC
@@ -1440,6 +1488,7 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
1440void __init mpic_init(struct mpic *mpic) 1488void __init mpic_init(struct mpic *mpic)
1441{ 1489{
1442 int i, cpu; 1490 int i, cpu;
1491 int num_timers = 4;
1443 1492
1444 BUG_ON(mpic->num_sources == 0); 1493 BUG_ON(mpic->num_sources == 0);
1445 1494
@@ -1448,15 +1497,34 @@ void __init mpic_init(struct mpic *mpic)
1448 /* Set current processor priority to max */ 1497 /* Set current processor priority to max */
1449 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); 1498 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1450 1499
1500 if (mpic->flags & MPIC_FSL) {
1501 u32 brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs,
1502 MPIC_FSL_BRR1);
1503 u32 version = brr1 & MPIC_FSL_BRR1_VER;
1504
1505 /*
1506 * Timer group B is present at the latest in MPIC 3.1 (e.g.
1507 * mpc8536). It is not present in MPIC 2.0 (e.g. mpc8544).
1508 * I don't know about the status of intermediate versions (or
1509 * whether they even exist).
1510 */
1511 if (version >= 0x0301)
1512 num_timers = 8;
1513 }
1514
1515 /* FSL mpic error interrupt intialization */
1516 if (mpic->flags & MPIC_FSL_HAS_EIMR)
1517 mpic_err_int_init(mpic, MPIC_FSL_ERR_INT);
1518
1451 /* Initialize timers to our reserved vectors and mask them for now */ 1519 /* Initialize timers to our reserved vectors and mask them for now */
1452 for (i = 0; i < 4; i++) { 1520 for (i = 0; i < num_timers; i++) {
1521 unsigned int offset = mpic_tm_offset(mpic, i);
1522
1453 mpic_write(mpic->tmregs, 1523 mpic_write(mpic->tmregs,
1454 i * MPIC_INFO(TIMER_STRIDE) + 1524 offset + MPIC_INFO(TIMER_DESTINATION),
1455 MPIC_INFO(TIMER_DESTINATION),
1456 1 << hard_smp_processor_id()); 1525 1 << hard_smp_processor_id());
1457 mpic_write(mpic->tmregs, 1526 mpic_write(mpic->tmregs,
1458 i * MPIC_INFO(TIMER_STRIDE) + 1527 offset + MPIC_INFO(TIMER_VECTOR_PRI),
1459 MPIC_INFO(TIMER_VECTOR_PRI),
1460 MPIC_VECPRI_MASK | 1528 MPIC_VECPRI_MASK |
1461 (9 << MPIC_VECPRI_PRIORITY_SHIFT) | 1529 (9 << MPIC_VECPRI_PRIORITY_SHIFT) |
1462 (mpic->timer_vecs[0] + i)); 1530 (mpic->timer_vecs[0] + i));
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
index 13f3e8913a93..24bf07a63924 100644
--- a/arch/powerpc/sysdev/mpic.h
+++ b/arch/powerpc/sysdev/mpic.h
@@ -40,4 +40,26 @@ extern int mpic_set_affinity(struct irq_data *d,
40 const struct cpumask *cpumask, bool force); 40 const struct cpumask *cpumask, bool force);
41extern void mpic_reset_core(int cpu); 41extern void mpic_reset_core(int cpu);
42 42
43#ifdef CONFIG_FSL_SOC
44extern int mpic_map_error_int(struct mpic *mpic, unsigned int virq, irq_hw_number_t hw);
45extern void mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum);
46extern int mpic_setup_error_int(struct mpic *mpic, int intvec);
47#else
48static inline int mpic_map_error_int(struct mpic *mpic, unsigned int virq, irq_hw_number_t hw)
49{
50 return 0;
51}
52
53
54static inline void mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum)
55{
56 return;
57}
58
59static inline int mpic_setup_error_int(struct mpic *mpic, int intvec)
60{
61 return -1;
62}
63#endif
64
43#endif /* _POWERPC_SYSDEV_MPIC_H */ 65#endif /* _POWERPC_SYSDEV_MPIC_H */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 9b49c65ee7a4..3a56a639a92e 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -60,6 +60,8 @@ static cpumask_t cpus_in_xmon = CPU_MASK_NONE;
60static unsigned long xmon_taken = 1; 60static unsigned long xmon_taken = 1;
61static int xmon_owner; 61static int xmon_owner;
62static int xmon_gate; 62static int xmon_gate;
63#else
64#define xmon_owner 0
63#endif /* CONFIG_SMP */ 65#endif /* CONFIG_SMP */
64 66
65static unsigned long in_xmon __read_mostly = 0; 67static unsigned long in_xmon __read_mostly = 0;
@@ -202,7 +204,13 @@ Commands:\n\
202 di dump instructions\n\ 204 di dump instructions\n\
203 df dump float values\n\ 205 df dump float values\n\
204 dd dump double values\n\ 206 dd dump double values\n\
205 dl dump the kernel log buffer\n\ 207 dl dump the kernel log buffer\n"
208#ifdef CONFIG_PPC64
209 "\
210 dp[#] dump paca for current cpu, or cpu #\n\
211 dpa dump paca for all possible cpus\n"
212#endif
213 "\
206 dr dump stream of raw bytes\n\ 214 dr dump stream of raw bytes\n\
207 e print exception information\n\ 215 e print exception information\n\
208 f flush cache\n\ 216 f flush cache\n\
@@ -740,7 +748,7 @@ static void insert_bpts(void)
740static void insert_cpu_bpts(void) 748static void insert_cpu_bpts(void)
741{ 749{
742 if (dabr.enabled) 750 if (dabr.enabled)
743 set_dabr(dabr.address | (dabr.enabled & 7)); 751 set_dabr(dabr.address | (dabr.enabled & 7), DABRX_ALL);
744 if (iabr && cpu_has_feature(CPU_FTR_IABR)) 752 if (iabr && cpu_has_feature(CPU_FTR_IABR))
745 mtspr(SPRN_IABR, iabr->address 753 mtspr(SPRN_IABR, iabr->address
746 | (iabr->enabled & (BP_IABR|BP_IABR_TE))); 754 | (iabr->enabled & (BP_IABR|BP_IABR_TE)));
@@ -768,7 +776,7 @@ static void remove_bpts(void)
768 776
769static void remove_cpu_bpts(void) 777static void remove_cpu_bpts(void)
770{ 778{
771 set_dabr(0); 779 set_dabr(0, 0);
772 if (cpu_has_feature(CPU_FTR_IABR)) 780 if (cpu_has_feature(CPU_FTR_IABR))
773 mtspr(SPRN_IABR, 0); 781 mtspr(SPRN_IABR, 0);
774} 782}
@@ -2009,6 +2017,95 @@ static void xmon_rawdump (unsigned long adrs, long ndump)
2009 printf("\n"); 2017 printf("\n");
2010} 2018}
2011 2019
2020#ifdef CONFIG_PPC64
2021static void dump_one_paca(int cpu)
2022{
2023 struct paca_struct *p;
2024
2025 if (setjmp(bus_error_jmp) != 0) {
2026 printf("*** Error dumping paca for cpu 0x%x!\n", cpu);
2027 return;
2028 }
2029
2030 catch_memory_errors = 1;
2031 sync();
2032
2033 p = &paca[cpu];
2034
2035 printf("paca for cpu 0x%x @ %p:\n", cpu, p);
2036
2037 printf(" %-*s = %s\n", 16, "possible", cpu_possible(cpu) ? "yes" : "no");
2038 printf(" %-*s = %s\n", 16, "present", cpu_present(cpu) ? "yes" : "no");
2039 printf(" %-*s = %s\n", 16, "online", cpu_online(cpu) ? "yes" : "no");
2040
2041#define DUMP(paca, name, format) \
2042 printf(" %-*s = %#-*"format"\t(0x%lx)\n", 16, #name, 18, paca->name, \
2043 offsetof(struct paca_struct, name));
2044
2045 DUMP(p, lock_token, "x");
2046 DUMP(p, paca_index, "x");
2047 DUMP(p, kernel_toc, "lx");
2048 DUMP(p, kernelbase, "lx");
2049 DUMP(p, kernel_msr, "lx");
2050#ifdef CONFIG_PPC_STD_MMU_64
2051 DUMP(p, stab_real, "lx");
2052 DUMP(p, stab_addr, "lx");
2053#endif
2054 DUMP(p, emergency_sp, "p");
2055 DUMP(p, data_offset, "lx");
2056 DUMP(p, hw_cpu_id, "x");
2057 DUMP(p, cpu_start, "x");
2058 DUMP(p, kexec_state, "x");
2059 DUMP(p, __current, "p");
2060 DUMP(p, kstack, "lx");
2061 DUMP(p, stab_rr, "lx");
2062 DUMP(p, saved_r1, "lx");
2063 DUMP(p, trap_save, "x");
2064 DUMP(p, soft_enabled, "x");
2065 DUMP(p, irq_happened, "x");
2066 DUMP(p, io_sync, "x");
2067 DUMP(p, irq_work_pending, "x");
2068 DUMP(p, nap_state_lost, "x");
2069
2070#undef DUMP
2071
2072 catch_memory_errors = 0;
2073 sync();
2074}
2075
2076static void dump_all_pacas(void)
2077{
2078 int cpu;
2079
2080 if (num_possible_cpus() == 0) {
2081 printf("No possible cpus, use 'dp #' to dump individual cpus\n");
2082 return;
2083 }
2084
2085 for_each_possible_cpu(cpu)
2086 dump_one_paca(cpu);
2087}
2088
2089static void dump_pacas(void)
2090{
2091 unsigned long num;
2092 int c;
2093
2094 c = inchar();
2095 if (c == 'a') {
2096 dump_all_pacas();
2097 return;
2098 }
2099
2100 termch = c; /* Put c back, it wasn't 'a' */
2101
2102 if (scanhex(&num))
2103 dump_one_paca(num);
2104 else
2105 dump_one_paca(xmon_owner);
2106}
2107#endif
2108
2012#define isxdigit(c) (('0' <= (c) && (c) <= '9') \ 2109#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
2013 || ('a' <= (c) && (c) <= 'f') \ 2110 || ('a' <= (c) && (c) <= 'f') \
2014 || ('A' <= (c) && (c) <= 'F')) 2111 || ('A' <= (c) && (c) <= 'F'))
@@ -2018,6 +2115,14 @@ dump(void)
2018 int c; 2115 int c;
2019 2116
2020 c = inchar(); 2117 c = inchar();
2118
2119#ifdef CONFIG_PPC64
2120 if (c == 'p') {
2121 dump_pacas();
2122 return;
2123 }
2124#endif
2125
2021 if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n') 2126 if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n')
2022 termch = c; 2127 termch = c;
2023 scanhex((void *)&adrs); 2128 scanhex((void *)&adrs);
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index d7f179cc2e98..638110efae9b 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -34,7 +34,6 @@
34#include <linux/device.h> 34#include <linux/device.h>
35#include <linux/of.h> 35#include <linux/of.h>
36#include <asm/pSeries_reconfig.h> 36#include <asm/pSeries_reconfig.h>
37#include <asm/abs_addr.h>
38#include <asm/hvcall.h> 37#include <asm/hvcall.h>
39#include <asm/vio.h> 38#include <asm/vio.h>
40 39
@@ -104,10 +103,10 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
104 /* determine the start and end for this address range - slightly 103 /* determine the start and end for this address range - slightly
105 * different if this is in VMALLOC_REGION */ 104 * different if this is in VMALLOC_REGION */
106 if (is_vmalloc_addr(start_addr)) 105 if (is_vmalloc_addr(start_addr))
107 sg_addr = phys_to_abs(page_to_phys(vmalloc_to_page(start_addr))) 106 sg_addr = page_to_phys(vmalloc_to_page(start_addr))
108 + offset_in_page(sg_addr); 107 + offset_in_page(sg_addr);
109 else 108 else
110 sg_addr = virt_to_abs(sg_addr); 109 sg_addr = __pa(sg_addr);
111 110
112 end_addr = sg_addr + len; 111 end_addr = sg_addr + len;
113 112
@@ -265,17 +264,17 @@ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
265 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; 264 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
266 265
267 nx_ctx->op.flags = function; 266 nx_ctx->op.flags = function;
268 nx_ctx->op.csbcpb = virt_to_abs(nx_ctx->csbcpb); 267 nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb);
269 nx_ctx->op.in = virt_to_abs(nx_ctx->in_sg); 268 nx_ctx->op.in = __pa(nx_ctx->in_sg);
270 nx_ctx->op.out = virt_to_abs(nx_ctx->out_sg); 269 nx_ctx->op.out = __pa(nx_ctx->out_sg);
271 270
272 if (nx_ctx->csbcpb_aead) { 271 if (nx_ctx->csbcpb_aead) {
273 nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT; 272 nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
274 273
275 nx_ctx->op_aead.flags = function; 274 nx_ctx->op_aead.flags = function;
276 nx_ctx->op_aead.csbcpb = virt_to_abs(nx_ctx->csbcpb_aead); 275 nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead);
277 nx_ctx->op_aead.in = virt_to_abs(nx_ctx->in_sg); 276 nx_ctx->op_aead.in = __pa(nx_ctx->in_sg);
278 nx_ctx->op_aead.out = virt_to_abs(nx_ctx->out_sg); 277 nx_ctx->op_aead.out = __pa(nx_ctx->out_sg);
279 } 278 }
280} 279}
281 280
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index a1e791ec25d3..4fe66fa183ec 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -212,7 +212,7 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
212 return IRQ_HANDLED; 212 return IRQ_HANDLED;
213} 213}
214 214
215static int __devinit mpc85xx_pci_err_probe(struct platform_device *op) 215int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
216{ 216{
217 struct edac_pci_ctl_info *pci; 217 struct edac_pci_ctl_info *pci;
218 struct mpc85xx_pci_pdata *pdata; 218 struct mpc85xx_pci_pdata *pdata;
@@ -226,6 +226,16 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
226 if (!pci) 226 if (!pci)
227 return -ENOMEM; 227 return -ENOMEM;
228 228
229 /* make sure error reporting method is sane */
230 switch (edac_op_state) {
231 case EDAC_OPSTATE_POLL:
232 case EDAC_OPSTATE_INT:
233 break;
234 default:
235 edac_op_state = EDAC_OPSTATE_INT;
236 break;
237 }
238
229 pdata = pci->pvt_info; 239 pdata = pci->pvt_info;
230 pdata->name = "mpc85xx_pci_err"; 240 pdata->name = "mpc85xx_pci_err";
231 pdata->irq = NO_IRQ; 241 pdata->irq = NO_IRQ;
@@ -315,6 +325,7 @@ err:
315 devres_release_group(&op->dev, mpc85xx_pci_err_probe); 325 devres_release_group(&op->dev, mpc85xx_pci_err_probe);
316 return res; 326 return res;
317} 327}
328EXPORT_SYMBOL(mpc85xx_pci_err_probe);
318 329
319static int mpc85xx_pci_err_remove(struct platform_device *op) 330static int mpc85xx_pci_err_remove(struct platform_device *op)
320{ 331{
@@ -338,27 +349,6 @@ static int mpc85xx_pci_err_remove(struct platform_device *op)
338 return 0; 349 return 0;
339} 350}
340 351
341static struct of_device_id mpc85xx_pci_err_of_match[] = {
342 {
343 .compatible = "fsl,mpc8540-pcix",
344 },
345 {
346 .compatible = "fsl,mpc8540-pci",
347 },
348 {},
349};
350MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
351
352static struct platform_driver mpc85xx_pci_err_driver = {
353 .probe = mpc85xx_pci_err_probe,
354 .remove = __devexit_p(mpc85xx_pci_err_remove),
355 .driver = {
356 .name = "mpc85xx_pci_err",
357 .owner = THIS_MODULE,
358 .of_match_table = mpc85xx_pci_err_of_match,
359 },
360};
361
362#endif /* CONFIG_PCI */ 352#endif /* CONFIG_PCI */
363 353
364/**************************** L2 Err device ***************************/ 354/**************************** L2 Err device ***************************/
@@ -1210,12 +1200,6 @@ static int __init mpc85xx_mc_init(void)
1210 if (res) 1200 if (res)
1211 printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n"); 1201 printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
1212 1202
1213#ifdef CONFIG_PCI
1214 res = platform_driver_register(&mpc85xx_pci_err_driver);
1215 if (res)
1216 printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
1217#endif
1218
1219#ifdef CONFIG_FSL_SOC_BOOKE 1203#ifdef CONFIG_FSL_SOC_BOOKE
1220 pvr = mfspr(SPRN_PVR); 1204 pvr = mfspr(SPRN_PVR);
1221 1205
@@ -1252,9 +1236,6 @@ static void __exit mpc85xx_mc_exit(void)
1252 on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0); 1236 on_each_cpu(mpc85xx_mc_restore_hid1, NULL, 0);
1253 } 1237 }
1254#endif 1238#endif
1255#ifdef CONFIG_PCI
1256 platform_driver_unregister(&mpc85xx_pci_err_driver);
1257#endif
1258 platform_driver_unregister(&mpc85xx_l2_err_driver); 1239 platform_driver_unregister(&mpc85xx_l2_err_driver);
1259 platform_driver_unregister(&mpc85xx_mc_err_driver); 1240 platform_driver_unregister(&mpc85xx_mc_err_driver);
1260} 1241}
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index d9b0ebcb67d7..8f5290147e8a 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -220,7 +220,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
220 cq = ERR_PTR(-EAGAIN); 220 cq = ERR_PTR(-EAGAIN);
221 goto create_cq_exit4; 221 goto create_cq_exit4;
222 } 222 }
223 rpage = virt_to_abs(vpage); 223 rpage = __pa(vpage);
224 224
225 h_ret = hipz_h_register_rpage_cq(adapter_handle, 225 h_ret = hipz_h_register_rpage_cq(adapter_handle,
226 my_cq->ipz_cq_handle, 226 my_cq->ipz_cq_handle,
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 818d721fc448..90da6747d395 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -101,7 +101,7 @@ int ehca_create_eq(struct ehca_shca *shca,
101 if (!vpage) 101 if (!vpage)
102 goto create_eq_exit2; 102 goto create_eq_exit2;
103 103
104 rpage = virt_to_abs(vpage); 104 rpage = __pa(vpage);
105 h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle, 105 h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
106 eq->ipz_eq_handle, 106 eq->ipz_eq_handle,
107 &eq->pf, 107 &eq->pf,
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index b781b2cb0624..87844869dcc2 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1136,7 +1136,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1136 } 1136 }
1137 1137
1138 if (rnum > 1) { 1138 if (rnum > 1) {
1139 rpage = virt_to_abs(kpage); 1139 rpage = __pa(kpage);
1140 if (!rpage) { 1140 if (!rpage) {
1141 ehca_err(&shca->ib_device, "kpage=%p i=%x", 1141 ehca_err(&shca->ib_device, "kpage=%p i=%x",
1142 kpage, i); 1142 kpage, i);
@@ -1231,7 +1231,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1231 pginfo->num_kpages, pginfo->num_hwpages, kpage); 1231 pginfo->num_kpages, pginfo->num_hwpages, kpage);
1232 goto ehca_rereg_mr_rereg1_exit1; 1232 goto ehca_rereg_mr_rereg1_exit1;
1233 } 1233 }
1234 rpage = virt_to_abs(kpage); 1234 rpage = __pa(kpage);
1235 if (!rpage) { 1235 if (!rpage) {
1236 ehca_err(&shca->ib_device, "kpage=%p", kpage); 1236 ehca_err(&shca->ib_device, "kpage=%p", kpage);
1237 ret = -EFAULT; 1237 ret = -EFAULT;
@@ -1525,7 +1525,7 @@ static inline void *ehca_calc_sectbase(int top, int dir, int idx)
1525 unsigned long ret = idx; 1525 unsigned long ret = idx;
1526 ret |= dir << EHCA_DIR_INDEX_SHIFT; 1526 ret |= dir << EHCA_DIR_INDEX_SHIFT;
1527 ret |= top << EHCA_TOP_INDEX_SHIFT; 1527 ret |= top << EHCA_TOP_INDEX_SHIFT;
1528 return abs_to_virt(ret << SECTION_SIZE_BITS); 1528 return __va(ret << SECTION_SIZE_BITS);
1529} 1529}
1530 1530
1531#define ehca_bmap_valid(entry) \ 1531#define ehca_bmap_valid(entry) \
@@ -1537,7 +1537,7 @@ static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
1537{ 1537{
1538 u64 h_ret = 0; 1538 u64 h_ret = 0;
1539 unsigned long page = 0; 1539 unsigned long page = 0;
1540 u64 rpage = virt_to_abs(kpage); 1540 u64 rpage = __pa(kpage);
1541 int page_count; 1541 int page_count;
1542 1542
1543 void *sectbase = ehca_calc_sectbase(top, dir, idx); 1543 void *sectbase = ehca_calc_sectbase(top, dir, idx);
@@ -1553,7 +1553,7 @@ static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
1553 for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count); 1553 for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
1554 rnum++) { 1554 rnum++) {
1555 void *pg = sectbase + ((page++) * pginfo->hwpage_size); 1555 void *pg = sectbase + ((page++) * pginfo->hwpage_size);
1556 kpage[rnum] = virt_to_abs(pg); 1556 kpage[rnum] = __pa(pg);
1557 } 1557 }
1558 1558
1559 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr, 1559 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
@@ -1870,9 +1870,8 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1870 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { 1870 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1871 pgaddr = page_to_pfn(sg_page(&chunk->page_list[i])) 1871 pgaddr = page_to_pfn(sg_page(&chunk->page_list[i]))
1872 << PAGE_SHIFT ; 1872 << PAGE_SHIFT ;
1873 *kpage = phys_to_abs(pgaddr + 1873 *kpage = pgaddr + (pginfo->next_hwpage *
1874 (pginfo->next_hwpage * 1874 pginfo->hwpage_size);
1875 pginfo->hwpage_size));
1876 if ( !(*kpage) ) { 1875 if ( !(*kpage) ) {
1877 ehca_gen_err("pgaddr=%llx " 1876 ehca_gen_err("pgaddr=%llx "
1878 "chunk->page_list[i]=%llx " 1877 "chunk->page_list[i]=%llx "
@@ -1927,7 +1926,7 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
1927 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; 1926 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
1928 if (ehca_debug_level >= 3) 1927 if (ehca_debug_level >= 3)
1929 ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, 1928 ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
1930 *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1929 *(u64 *)__va(pgaddr));
1931 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1930 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1932 ehca_gen_err("uncontiguous page found pgaddr=%llx " 1931 ehca_gen_err("uncontiguous page found pgaddr=%llx "
1933 "prev_pgaddr=%llx page_list_i=%x", 1932 "prev_pgaddr=%llx page_list_i=%x",
@@ -1962,7 +1961,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1962 if (nr_kpages == kpages_per_hwpage) { 1961 if (nr_kpages == kpages_per_hwpage) {
1963 pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i])) 1962 pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
1964 << PAGE_SHIFT ); 1963 << PAGE_SHIFT );
1965 *kpage = phys_to_abs(pgaddr); 1964 *kpage = pgaddr;
1966 if ( !(*kpage) ) { 1965 if ( !(*kpage) ) {
1967 ehca_gen_err("pgaddr=%llx i=%x", 1966 ehca_gen_err("pgaddr=%llx i=%x",
1968 pgaddr, i); 1967 pgaddr, i);
@@ -1990,13 +1989,11 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1990 (pginfo->hwpage_size - 1)) >> 1989 (pginfo->hwpage_size - 1)) >>
1991 PAGE_SHIFT; 1990 PAGE_SHIFT;
1992 nr_kpages -= pginfo->kpage_cnt; 1991 nr_kpages -= pginfo->kpage_cnt;
1993 *kpage = phys_to_abs( 1992 *kpage = pgaddr &
1994 pgaddr & 1993 ~(pginfo->hwpage_size - 1);
1995 ~(pginfo->hwpage_size - 1));
1996 } 1994 }
1997 if (ehca_debug_level >= 3) { 1995 if (ehca_debug_level >= 3) {
1998 u64 val = *(u64 *)abs_to_virt( 1996 u64 val = *(u64 *)__va(pgaddr);
1999 phys_to_abs(pgaddr));
2000 ehca_gen_dbg("kpage=%llx chunk_page=%llx " 1997 ehca_gen_dbg("kpage=%llx chunk_page=%llx "
2001 "value=%016llx", 1998 "value=%016llx",
2002 *kpage, pgaddr, val); 1999 *kpage, pgaddr, val);
@@ -2084,9 +2081,8 @@ static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
2084 pginfo->num_hwpages, i); 2081 pginfo->num_hwpages, i);
2085 return -EFAULT; 2082 return -EFAULT;
2086 } 2083 }
2087 *kpage = phys_to_abs( 2084 *kpage = (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
2088 (pbuf->addr & ~(pginfo->hwpage_size - 1)) + 2085 (pginfo->next_hwpage * pginfo->hwpage_size);
2089 (pginfo->next_hwpage * pginfo->hwpage_size));
2090 if ( !(*kpage) && pbuf->addr ) { 2086 if ( !(*kpage) && pbuf->addr ) {
2091 ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx " 2087 ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
2092 "next_hwpage=%llx", pbuf->addr, 2088 "next_hwpage=%llx", pbuf->addr,
@@ -2124,8 +2120,8 @@ static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
2124 /* loop over desired page_list entries */ 2120 /* loop over desired page_list entries */
2125 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; 2121 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
2126 for (i = 0; i < number; i++) { 2122 for (i = 0; i < number; i++) {
2127 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + 2123 *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
2128 pginfo->next_hwpage * pginfo->hwpage_size); 2124 pginfo->next_hwpage * pginfo->hwpage_size;
2129 if ( !(*kpage) ) { 2125 if ( !(*kpage) ) {
2130 ehca_gen_err("*fmrlist=%llx fmrlist=%p " 2126 ehca_gen_err("*fmrlist=%llx fmrlist=%p "
2131 "next_listelem=%llx next_hwpage=%llx", 2127 "next_listelem=%llx next_hwpage=%llx",
@@ -2152,8 +2148,7 @@ static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
2152 u64 prev = *kpage; 2148 u64 prev = *kpage;
2153 /* check if adrs are contiguous */ 2149 /* check if adrs are contiguous */
2154 for (j = 1; j < cnt_per_hwpage; j++) { 2150 for (j = 1; j < cnt_per_hwpage; j++) {
2155 u64 p = phys_to_abs(fmrlist[j] & 2151 u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
2156 ~(pginfo->hwpage_size - 1));
2157 if (prev + pginfo->u.fmr.fmr_pgsize != p) { 2152 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
2158 ehca_gen_err("uncontiguous fmr pages " 2153 ehca_gen_err("uncontiguous fmr pages "
2159 "found prev=%llx p=%llx " 2154 "found prev=%llx p=%llx "
@@ -2388,8 +2383,8 @@ static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
2388 memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE); 2383 memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
2389 } 2384 }
2390 2385
2391 start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE; 2386 start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
2392 end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE; 2387 end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
2393 for (i = start_section; i < end_section; i++) { 2388 for (i = start_section; i < end_section; i++) {
2394 int ret; 2389 int ret;
2395 top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT); 2390 top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
@@ -2508,7 +2503,7 @@ static u64 ehca_map_vaddr(void *caddr)
2508 if (!ehca_bmap) 2503 if (!ehca_bmap)
2509 return EHCA_INVAL_ADDR; 2504 return EHCA_INVAL_ADDR;
2510 2505
2511 abs_addr = virt_to_abs(caddr); 2506 abs_addr = __pa(caddr);
2512 top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT); 2507 top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
2513 if (!ehca_bmap_valid(ehca_bmap->top[top])) 2508 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2514 return EHCA_INVAL_ADDR; 2509 return EHCA_INVAL_ADDR;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 964f85520798..149393915ae5 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -321,7 +321,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
321 ret = -EINVAL; 321 ret = -EINVAL;
322 goto init_qp_queue1; 322 goto init_qp_queue1;
323 } 323 }
324 rpage = virt_to_abs(vpage); 324 rpage = __pa(vpage);
325 325
326 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, 326 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
327 my_qp->ipz_qp_handle, 327 my_qp->ipz_qp_handle,
@@ -1094,7 +1094,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1094 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p", 1094 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
1095 qp_num, bad_send_wqe_p); 1095 qp_num, bad_send_wqe_p);
1096 /* convert wqe pointer to vadr */ 1096 /* convert wqe pointer to vadr */
1097 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); 1097 bad_send_wqe_v = __va((u64)bad_send_wqe_p);
1098 if (ehca_debug_level >= 2) 1098 if (ehca_debug_level >= 2)
1099 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); 1099 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
1100 squeue = &my_qp->ipz_squeue; 1100 squeue = &my_qp->ipz_squeue;
@@ -1138,7 +1138,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1138 /* convert real to abs address */ 1138 /* convert real to abs address */
1139 wqe_p = wqe_p & (~(1UL << 63)); 1139 wqe_p = wqe_p & (~(1UL << 63));
1140 1140
1141 wqe_v = abs_to_virt(wqe_p); 1141 wqe_v = __va(wqe_p);
1142 1142
1143 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) { 1143 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1144 ehca_gen_err("Invalid offset for calculating left cqes " 1144 ehca_gen_err("Invalid offset for calculating left cqes "
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index fd05f48f6b0b..47f94984353d 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -135,7 +135,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
135 mad_hdr->attr_mod); 135 mad_hdr->attr_mod);
136 } 136 }
137 for (j = 0; j < send_wr->num_sge; j++) { 137 for (j = 0; j < send_wr->num_sge; j++) {
138 u8 *data = (u8 *)abs_to_virt(sge->addr); 138 u8 *data = __va(sge->addr);
139 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x " 139 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
140 "lkey=%x", 140 "lkey=%x",
141 idx, j, data, sge->length, sge->lkey); 141 idx, j, data, sge->length, sge->lkey);
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 54c0d23bad92..d280b12aae64 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -59,7 +59,6 @@
59#include <linux/device.h> 59#include <linux/device.h>
60 60
61#include <linux/atomic.h> 61#include <linux/atomic.h>
62#include <asm/abs_addr.h>
63#include <asm/ibmebus.h> 62#include <asm/ibmebus.h>
64#include <asm/io.h> 63#include <asm/io.h>
65#include <asm/pgtable.h> 64#include <asm/pgtable.h>
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index e6f9cdd94c7a..2d41d04fd959 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -396,7 +396,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
396 struct hipz_query_port *query_port_response_block) 396 struct hipz_query_port *query_port_response_block)
397{ 397{
398 u64 ret; 398 u64 ret;
399 u64 r_cb = virt_to_abs(query_port_response_block); 399 u64 r_cb = __pa(query_port_response_block);
400 400
401 if (r_cb & (EHCA_PAGESIZE-1)) { 401 if (r_cb & (EHCA_PAGESIZE-1)) {
402 ehca_gen_err("response block not page aligned"); 402 ehca_gen_err("response block not page aligned");
@@ -438,7 +438,7 @@ u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
438u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, 438u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
439 struct hipz_query_hca *query_hca_rblock) 439 struct hipz_query_hca *query_hca_rblock)
440{ 440{
441 u64 r_cb = virt_to_abs(query_hca_rblock); 441 u64 r_cb = __pa(query_hca_rblock);
442 442
443 if (r_cb & (EHCA_PAGESIZE-1)) { 443 if (r_cb & (EHCA_PAGESIZE-1)) {
444 ehca_gen_err("response_block=%p not page aligned", 444 ehca_gen_err("response_block=%p not page aligned",
@@ -577,7 +577,7 @@ u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
577 adapter_handle.handle, /* r4 */ 577 adapter_handle.handle, /* r4 */
578 qp_handle.handle, /* r5 */ 578 qp_handle.handle, /* r5 */
579 update_mask, /* r6 */ 579 update_mask, /* r6 */
580 virt_to_abs(mqpcb), /* r7 */ 580 __pa(mqpcb), /* r7 */
581 0, 0, 0, 0, 0); 581 0, 0, 0, 0, 0);
582 582
583 if (ret == H_NOT_ENOUGH_RESOURCES) 583 if (ret == H_NOT_ENOUGH_RESOURCES)
@@ -595,7 +595,7 @@ u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
595 return ehca_plpar_hcall_norets(H_QUERY_QP, 595 return ehca_plpar_hcall_norets(H_QUERY_QP,
596 adapter_handle.handle, /* r4 */ 596 adapter_handle.handle, /* r4 */
597 qp_handle.handle, /* r5 */ 597 qp_handle.handle, /* r5 */
598 virt_to_abs(qqpcb), /* r6 */ 598 __pa(qqpcb), /* r6 */
599 0, 0, 0, 0); 599 0, 0, 0, 0);
600} 600}
601 601
@@ -787,7 +787,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
787 if (count > 1) { 787 if (count > 1) {
788 u64 *kpage; 788 u64 *kpage;
789 int i; 789 int i;
790 kpage = (u64 *)abs_to_virt(logical_address_of_page); 790 kpage = __va(logical_address_of_page);
791 for (i = 0; i < count; i++) 791 for (i = 0; i < count; i++)
792 ehca_gen_dbg("kpage[%d]=%p", 792 ehca_gen_dbg("kpage[%d]=%p",
793 i, (void *)kpage[i]); 793 i, (void *)kpage[i]);
@@ -944,7 +944,7 @@ u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
944 void *rblock, 944 void *rblock,
945 unsigned long *byte_count) 945 unsigned long *byte_count)
946{ 946{
947 u64 r_cb = virt_to_abs(rblock); 947 u64 r_cb = __pa(rblock);
948 948
949 if (r_cb & (EHCA_PAGESIZE-1)) { 949 if (r_cb & (EHCA_PAGESIZE-1)) {
950 ehca_gen_err("rblock not page aligned."); 950 ehca_gen_err("rblock not page aligned.");
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index 1898d6e7cce5..62c71fadb4d9 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -81,7 +81,7 @@ int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
81{ 81{
82 int i; 82 int i;
83 for (i = 0; i < queue->queue_length / queue->pagesize; i++) { 83 for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
84 u64 page = (u64)virt_to_abs(queue->queue_pages[i]); 84 u64 page = __pa(queue->queue_pages[i]);
85 if (addr >= page && addr < page + queue->pagesize) { 85 if (addr >= page && addr < page + queue->pagesize) {
86 *q_offset = addr - page + i * queue->pagesize; 86 *q_offset = addr - page + i * queue->pagesize;
87 return 0; 87 return 0;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 54ac7ffacb40..7d5a6b40b31c 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -45,7 +45,6 @@
45#include <asm/pmac_feature.h> 45#include <asm/pmac_feature.h>
46#include <asm/smu.h> 46#include <asm/smu.h>
47#include <asm/sections.h> 47#include <asm/sections.h>
48#include <asm/abs_addr.h>
49#include <asm/uaccess.h> 48#include <asm/uaccess.h>
50 49
51#define VERSION "0.7" 50#define VERSION "0.7"
@@ -502,7 +501,7 @@ int __init smu_init (void)
502 * 32 bits value safely 501 * 32 bits value safely
503 */ 502 */
504 smu->cmd_buf_abs = (u32)smu_cmdbuf_abs; 503 smu->cmd_buf_abs = (u32)smu_cmdbuf_abs;
505 smu->cmd_buf = (struct smu_cmd_buf *)abs_to_virt(smu_cmdbuf_abs); 504 smu->cmd_buf = __va(smu_cmdbuf_abs);
506 505
507 smu->db_node = of_find_node_by_name(NULL, "smu-doorbell"); 506 smu->db_node = of_find_node_by_name(NULL, "smu-doorbell");
508 if (smu->db_node == NULL) { 507 if (smu->db_node == NULL) {
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 9602c1b7e27e..01e2f2e87d8c 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -31,6 +31,7 @@
31#include <linux/mtd/nand_ecc.h> 31#include <linux/mtd/nand_ecc.h>
32#include <asm/fsl_ifc.h> 32#include <asm/fsl_ifc.h>
33 33
34#define FSL_IFC_V1_1_0 0x01010000
34#define ERR_BYTE 0xFF /* Value returned for read 35#define ERR_BYTE 0xFF /* Value returned for read
35 bytes when read failed */ 36 bytes when read failed */
36#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait 37#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
@@ -773,13 +774,62 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
773 return 0; 774 return 0;
774} 775}
775 776
777static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
778{
779 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
780 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
781 uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
782 uint32_t cs = priv->bank;
783
784 /* Save CSOR and CSOR_ext */
785 csor = in_be32(&ifc->csor_cs[cs].csor);
786 csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext);
787
788 /* chage PageSize 8K and SpareSize 1K*/
789 csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
790 out_be32(&ifc->csor_cs[cs].csor, csor_8k);
791 out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400);
792
793 /* READID */
794 out_be32(&ifc->ifc_nand.nand_fir0,
795 (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
796 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
797 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
798 out_be32(&ifc->ifc_nand.nand_fcr0,
799 NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
800 out_be32(&ifc->ifc_nand.row3, 0x0);
801
802 out_be32(&ifc->ifc_nand.nand_fbcr, 0x0);
803
804 /* Program ROW0/COL0 */
805 out_be32(&ifc->ifc_nand.row0, 0x0);
806 out_be32(&ifc->ifc_nand.col0, 0x0);
807
808 /* set the chip select for NAND Transaction */
809 out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT);
810
811 /* start read seq */
812 out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
813
814 /* wait for command complete flag or timeout */
815 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
816 IFC_TIMEOUT_MSECS * HZ/1000);
817
818 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
819 printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
820
821 /* Restore CSOR and CSOR_ext */
822 out_be32(&ifc->csor_cs[cs].csor, csor);
823 out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext);
824}
825
776static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) 826static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
777{ 827{
778 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 828 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
779 struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 829 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
780 struct nand_chip *chip = &priv->chip; 830 struct nand_chip *chip = &priv->chip;
781 struct nand_ecclayout *layout; 831 struct nand_ecclayout *layout;
782 u32 csor; 832 u32 csor, ver;
783 833
784 /* Fill in fsl_ifc_mtd structure */ 834 /* Fill in fsl_ifc_mtd structure */
785 priv->mtd.priv = chip; 835 priv->mtd.priv = chip;
@@ -874,6 +924,10 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
874 chip->ecc.mode = NAND_ECC_SOFT; 924 chip->ecc.mode = NAND_ECC_SOFT;
875 } 925 }
876 926
927 ver = in_be32(&ifc->ifc_rev);
928 if (ver == FSL_IFC_V1_1_0)
929 fsl_ifc_sram_init(priv);
930
877 return 0; 931 return 0;
878} 932}
879 933
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index b8e46cc31e53..6be7b9839f35 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -35,7 +35,6 @@
35#include <linux/if_vlan.h> 35#include <linux/if_vlan.h>
36 36
37#include <asm/ibmebus.h> 37#include <asm/ibmebus.h>
38#include <asm/abs_addr.h>
39#include <asm/io.h> 38#include <asm/io.h>
40 39
41#define DRV_NAME "ehea" 40#define DRV_NAME "ehea"
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
index 30f903332e92..d3a130ccdcc8 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
@@ -141,7 +141,7 @@ u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
141 qp_category, /* R5 */ 141 qp_category, /* R5 */
142 qp_handle, /* R6 */ 142 qp_handle, /* R6 */
143 sel_mask, /* R7 */ 143 sel_mask, /* R7 */
144 virt_to_abs(cb_addr), /* R8 */ 144 __pa(cb_addr), /* R8 */
145 0, 0); 145 0, 0);
146} 146}
147 147
@@ -415,7 +415,7 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
415 (u64) cat, /* R5 */ 415 (u64) cat, /* R5 */
416 qp_handle, /* R6 */ 416 qp_handle, /* R6 */
417 sel_mask, /* R7 */ 417 sel_mask, /* R7 */
418 virt_to_abs(cb_addr), /* R8 */ 418 __pa(cb_addr), /* R8 */
419 0, 0, 0, 0); /* R9-R12 */ 419 0, 0, 0, 0); /* R9-R12 */
420 420
421 *inv_attr_id = outs[0]; 421 *inv_attr_id = outs[0];
@@ -528,7 +528,7 @@ u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
528{ 528{
529 u64 hret, cb_logaddr; 529 u64 hret, cb_logaddr;
530 530
531 cb_logaddr = virt_to_abs(cb_addr); 531 cb_logaddr = __pa(cb_addr);
532 532
533 hret = ehea_plpar_hcall_norets(H_QUERY_HEA, 533 hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
534 adapter_handle, /* R4 */ 534 adapter_handle, /* R4 */
@@ -545,7 +545,7 @@ u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
545 void *cb_addr) 545 void *cb_addr)
546{ 546{
547 u64 port_info; 547 u64 port_info;
548 u64 cb_logaddr = virt_to_abs(cb_addr); 548 u64 cb_logaddr = __pa(cb_addr);
549 u64 arr_index = 0; 549 u64 arr_index = 0;
550 550
551 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) 551 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
@@ -567,7 +567,7 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
567 unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 567 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
568 u64 port_info; 568 u64 port_info;
569 u64 arr_index = 0; 569 u64 arr_index = 0;
570 u64 cb_logaddr = virt_to_abs(cb_addr); 570 u64 cb_logaddr = __pa(cb_addr);
571 571
572 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) 572 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
573 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num); 573 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
@@ -621,6 +621,6 @@ u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
621 return ehea_plpar_hcall_norets(H_ERROR_DATA, 621 return ehea_plpar_hcall_norets(H_ERROR_DATA,
622 adapter_handle, /* R4 */ 622 adapter_handle, /* R4 */
623 ressource_handle, /* R5 */ 623 ressource_handle, /* R5 */
624 virt_to_abs(rblock), /* R6 */ 624 __pa(rblock), /* R6 */
625 0, 0, 0, 0); /* R7-R12 */ 625 0, 0, 0, 0); /* R7-R12 */
626} 626}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index cb66f574dc97..27f881758d16 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -163,7 +163,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
163 goto out_kill_hwq; 163 goto out_kill_hwq;
164 } 164 }
165 165
166 rpage = virt_to_abs(vpage); 166 rpage = __pa(vpage);
167 hret = ehea_h_register_rpage(adapter->handle, 167 hret = ehea_h_register_rpage(adapter->handle,
168 0, EHEA_CQ_REGISTER_ORIG, 168 0, EHEA_CQ_REGISTER_ORIG,
169 cq->fw_handle, rpage, 1); 169 cq->fw_handle, rpage, 1);
@@ -290,7 +290,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
290 goto out_kill_hwq; 290 goto out_kill_hwq;
291 } 291 }
292 292
293 rpage = virt_to_abs(vpage); 293 rpage = __pa(vpage);
294 294
295 hret = ehea_h_register_rpage(adapter->handle, 0, 295 hret = ehea_h_register_rpage(adapter->handle, 0,
296 EHEA_EQ_REGISTER_ORIG, 296 EHEA_EQ_REGISTER_ORIG,
@@ -395,7 +395,7 @@ static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
395 pr_err("hw_qpageit_get_inc failed\n"); 395 pr_err("hw_qpageit_get_inc failed\n");
396 goto out_kill_hwq; 396 goto out_kill_hwq;
397 } 397 }
398 rpage = virt_to_abs(vpage); 398 rpage = __pa(vpage);
399 hret = ehea_h_register_rpage(adapter->handle, 399 hret = ehea_h_register_rpage(adapter->handle,
400 0, h_call_q_selector, 400 0, h_call_q_selector,
401 qp->fw_handle, rpage, 1); 401 qp->fw_handle, rpage, 1);
@@ -790,7 +790,7 @@ u64 ehea_map_vaddr(void *caddr)
790 if (!ehea_bmap) 790 if (!ehea_bmap)
791 return EHEA_INVAL_ADDR; 791 return EHEA_INVAL_ADDR;
792 792
793 index = virt_to_abs(caddr) >> SECTION_SIZE_BITS; 793 index = __pa(caddr) >> SECTION_SIZE_BITS;
794 top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK; 794 top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
795 if (!ehea_bmap->top[top]) 795 if (!ehea_bmap->top[top])
796 return EHEA_INVAL_ADDR; 796 return EHEA_INVAL_ADDR;
@@ -812,7 +812,7 @@ static inline void *ehea_calc_sectbase(int top, int dir, int idx)
812 unsigned long ret = idx; 812 unsigned long ret = idx;
813 ret |= dir << EHEA_DIR_INDEX_SHIFT; 813 ret |= dir << EHEA_DIR_INDEX_SHIFT;
814 ret |= top << EHEA_TOP_INDEX_SHIFT; 814 ret |= top << EHEA_TOP_INDEX_SHIFT;
815 return abs_to_virt(ret << SECTION_SIZE_BITS); 815 return __va(ret << SECTION_SIZE_BITS);
816} 816}
817 817
818static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, 818static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
@@ -822,7 +822,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
822 void *pg; 822 void *pg;
823 u64 j, m, hret; 823 u64 j, m, hret;
824 unsigned long k = 0; 824 unsigned long k = 0;
825 u64 pt_abs = virt_to_abs(pt); 825 u64 pt_abs = __pa(pt);
826 826
827 void *sectbase = ehea_calc_sectbase(top, dir, idx); 827 void *sectbase = ehea_calc_sectbase(top, dir, idx);
828 828
@@ -830,7 +830,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
830 830
831 for (m = 0; m < EHEA_MAX_RPAGE; m++) { 831 for (m = 0; m < EHEA_MAX_RPAGE; m++) {
832 pg = sectbase + ((k++) * EHEA_PAGESIZE); 832 pg = sectbase + ((k++) * EHEA_PAGESIZE);
833 pt[m] = virt_to_abs(pg); 833 pt[m] = __pa(pg);
834 } 834 }
835 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, 835 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
836 0, pt_abs, EHEA_MAX_RPAGE); 836 0, pt_abs, EHEA_MAX_RPAGE);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 1e117c2a3cad..b29e20b7862f 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -388,7 +388,7 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
388 /* Remove the EADS bridge device itself */ 388 /* Remove the EADS bridge device itself */
389 BUG_ON(!bus->self); 389 BUG_ON(!bus->self);
390 pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self)); 390 pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
391 eeh_remove_bus_device(bus->self); 391 eeh_remove_bus_device(bus->self, true);
392 pci_stop_and_remove_bus_device(bus->self); 392 pci_stop_and_remove_bus_device(bus->self);
393 393
394 return 0; 394 return 0;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e3f29f61cbc3..fe6029f4df16 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6373,14 +6373,14 @@ static struct ata_port_info sata_port_info = {
6373 6373
6374#ifdef CONFIG_PPC_PSERIES 6374#ifdef CONFIG_PPC_PSERIES
6375static const u16 ipr_blocked_processors[] = { 6375static const u16 ipr_blocked_processors[] = {
6376 PV_NORTHSTAR, 6376 PVR_NORTHSTAR,
6377 PV_PULSAR, 6377 PVR_PULSAR,
6378 PV_POWER4, 6378 PVR_POWER4,
6379 PV_ICESTAR, 6379 PVR_ICESTAR,
6380 PV_SSTAR, 6380 PVR_SSTAR,
6381 PV_POWER4p, 6381 PVR_POWER4p,
6382 PV_630, 6382 PVR_630,
6383 PV_630p 6383 PVR_630p
6384}; 6384};
6385 6385
6386/** 6386/**
@@ -6400,7 +6400,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6400 6400
6401 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { 6401 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6402 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) { 6402 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6403 if (__is_processor(ipr_blocked_processors[i])) 6403 if (pvr_version_is(ipr_blocked_processors[i]))
6404 return 1; 6404 return 1;
6405 } 6405 }
6406 } 6406 }
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 4a652999380f..a5dec1ca1b82 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -245,6 +245,20 @@ static void hvc_port_destruct(struct tty_port *port)
245 kfree(hp); 245 kfree(hp);
246} 246}
247 247
248static void hvc_check_console(int index)
249{
250 /* Already enabled, bail out */
251 if (hvc_console.flags & CON_ENABLED)
252 return;
253
254 /* If this index is what the user requested, then register
255 * now (setup won't fail at this point). It's ok to just
256 * call register again if previously .setup failed.
257 */
258 if (index == hvc_console.index)
259 register_console(&hvc_console);
260}
261
248/* 262/*
249 * hvc_instantiate() is an early console discovery method which locates 263 * hvc_instantiate() is an early console discovery method which locates
250 * consoles * prior to the vio subsystem discovering them. Hotplugged 264 * consoles * prior to the vio subsystem discovering them. Hotplugged
@@ -275,12 +289,8 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
275 if (last_hvc < index) 289 if (last_hvc < index)
276 last_hvc = index; 290 last_hvc = index;
277 291
278 /* if this index is what the user requested, then register 292 /* check if we need to re-register the kernel console */
279 * now (setup won't fail at this point). It's ok to just 293 hvc_check_console(index);
280 * call register again if previously .setup failed.
281 */
282 if (index == hvc_console.index)
283 register_console(&hvc_console);
284 294
285 return 0; 295 return 0;
286} 296}
@@ -877,10 +887,15 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
877 i = ++last_hvc; 887 i = ++last_hvc;
878 888
879 hp->index = i; 889 hp->index = i;
890 cons_ops[i] = ops;
891 vtermnos[i] = vtermno;
880 892
881 list_add_tail(&(hp->next), &hvc_structs); 893 list_add_tail(&(hp->next), &hvc_structs);
882 spin_unlock(&hvc_structs_lock); 894 spin_unlock(&hvc_structs_lock);
883 895
896 /* check if we need to re-register the kernel console */
897 hvc_check_console(i);
898
884 return hp; 899 return hp;
885} 900}
886EXPORT_SYMBOL_GPL(hvc_alloc); 901EXPORT_SYMBOL_GPL(hvc_alloc);
@@ -893,8 +908,12 @@ int hvc_remove(struct hvc_struct *hp)
893 tty = tty_port_tty_get(&hp->port); 908 tty = tty_port_tty_get(&hp->port);
894 909
895 spin_lock_irqsave(&hp->lock, flags); 910 spin_lock_irqsave(&hp->lock, flags);
896 if (hp->index < MAX_NR_HVC_CONSOLES) 911 if (hp->index < MAX_NR_HVC_CONSOLES) {
912 console_lock();
897 vtermnos[hp->index] = -1; 913 vtermnos[hp->index] = -1;
914 cons_ops[hp->index] = NULL;
915 console_unlock();
916 }
898 917
899 /* Don't whack hp->irq because tty_hangup() will need to free the irq. */ 918 /* Don't whack hp->irq because tty_hangup() will need to free the irq. */
900 919
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index ee307799271a..070c0ee68642 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -230,6 +230,69 @@ static const struct hv_ops hvterm_hvsi_ops = {
230 .tiocmset = hvterm_hvsi_tiocmset, 230 .tiocmset = hvterm_hvsi_tiocmset,
231}; 231};
232 232
233static void udbg_hvc_putc(char c)
234{
235 int count = -1;
236
237 if (!hvterm_privs[0])
238 return;
239
240 if (c == '\n')
241 udbg_hvc_putc('\r');
242
243 do {
244 switch(hvterm_privs[0]->proto) {
245 case HV_PROTOCOL_RAW:
246 count = hvterm_raw_put_chars(0, &c, 1);
247 break;
248 case HV_PROTOCOL_HVSI:
249 count = hvterm_hvsi_put_chars(0, &c, 1);
250 break;
251 }
252 } while(count == 0);
253}
254
255static int udbg_hvc_getc_poll(void)
256{
257 int rc = 0;
258 char c;
259
260 if (!hvterm_privs[0])
261 return -1;
262
263 switch(hvterm_privs[0]->proto) {
264 case HV_PROTOCOL_RAW:
265 rc = hvterm_raw_get_chars(0, &c, 1);
266 break;
267 case HV_PROTOCOL_HVSI:
268 rc = hvterm_hvsi_get_chars(0, &c, 1);
269 break;
270 }
271 if (!rc)
272 return -1;
273 return c;
274}
275
276static int udbg_hvc_getc(void)
277{
278 int ch;
279
280 if (!hvterm_privs[0])
281 return -1;
282
283 for (;;) {
284 ch = udbg_hvc_getc_poll();
285 if (ch == -1) {
286 /* This shouldn't be needed...but... */
287 volatile unsigned long delay;
288 for (delay=0; delay < 2000000; delay++)
289 ;
290 } else {
291 return ch;
292 }
293 }
294}
295
233static int __devinit hvc_vio_probe(struct vio_dev *vdev, 296static int __devinit hvc_vio_probe(struct vio_dev *vdev,
234 const struct vio_device_id *id) 297 const struct vio_device_id *id)
235{ 298{
@@ -289,6 +352,13 @@ static int __devinit hvc_vio_probe(struct vio_dev *vdev,
289 return PTR_ERR(hp); 352 return PTR_ERR(hp);
290 dev_set_drvdata(&vdev->dev, hp); 353 dev_set_drvdata(&vdev->dev, hp);
291 354
355 /* register udbg if it's not there already for console 0 */
356 if (hp->index == 0 && !udbg_putc) {
357 udbg_putc = udbg_hvc_putc;
358 udbg_getc = udbg_hvc_getc;
359 udbg_getc_poll = udbg_hvc_getc_poll;
360 }
361
292 return 0; 362 return 0;
293} 363}
294 364
@@ -331,59 +401,6 @@ static void __exit hvc_vio_exit(void)
331} 401}
332module_exit(hvc_vio_exit); 402module_exit(hvc_vio_exit);
333 403
334static void udbg_hvc_putc(char c)
335{
336 int count = -1;
337
338 if (c == '\n')
339 udbg_hvc_putc('\r');
340
341 do {
342 switch(hvterm_priv0.proto) {
343 case HV_PROTOCOL_RAW:
344 count = hvterm_raw_put_chars(0, &c, 1);
345 break;
346 case HV_PROTOCOL_HVSI:
347 count = hvterm_hvsi_put_chars(0, &c, 1);
348 break;
349 }
350 } while(count == 0);
351}
352
353static int udbg_hvc_getc_poll(void)
354{
355 int rc = 0;
356 char c;
357
358 switch(hvterm_priv0.proto) {
359 case HV_PROTOCOL_RAW:
360 rc = hvterm_raw_get_chars(0, &c, 1);
361 break;
362 case HV_PROTOCOL_HVSI:
363 rc = hvterm_hvsi_get_chars(0, &c, 1);
364 break;
365 }
366 if (!rc)
367 return -1;
368 return c;
369}
370
371static int udbg_hvc_getc(void)
372{
373 int ch;
374 for (;;) {
375 ch = udbg_hvc_getc_poll();
376 if (ch == -1) {
377 /* This shouldn't be needed...but... */
378 volatile unsigned long delay;
379 for (delay=0; delay < 2000000; delay++)
380 ;
381 } else {
382 return ch;
383 }
384 }
385}
386
387void __init hvc_vio_init_early(void) 404void __init hvc_vio_init_early(void)
388{ 405{
389 struct device_node *stdout_node; 406 struct device_node *stdout_node;
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 213fbbcf613b..4e292f29bf5d 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -31,7 +31,6 @@
31#include <linux/fb.h> 31#include <linux/fb.h>
32#include <linux/init.h> 32#include <linux/init.h>
33 33
34#include <asm/abs_addr.h>
35#include <asm/cell-regs.h> 34#include <asm/cell-regs.h>
36#include <asm/lv1call.h> 35#include <asm/lv1call.h>
37#include <asm/ps3av.h> 36#include <asm/ps3av.h>
@@ -1141,7 +1140,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
1141 */ 1140 */
1142 fb_start = ps3fb_videomemory.address + GPU_FB_START; 1141 fb_start = ps3fb_videomemory.address + GPU_FB_START;
1143 info->screen_base = (char __force __iomem *)fb_start; 1142 info->screen_base = (char __force __iomem *)fb_start;
1144 info->fix.smem_start = virt_to_abs(fb_start); 1143 info->fix.smem_start = __pa(fb_start);
1145 info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START; 1144 info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START;
1146 1145
1147 info->pseudo_palette = par->pseudo_palette; 1146 info->pseudo_palette = par->pseudo_palette;