aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xDocumentation/devicetree/bindings/net/can/fsl-flexcan.txt61
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/ifc.txt76
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/mpic-timer.txt38
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/mpic.txt2
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/Kconfig.debug5
-rw-r--r--arch/powerpc/boot/Makefile6
-rw-r--r--arch/powerpc/boot/crt0.S116
-rw-r--r--arch/powerpc/boot/dts/p1020rdb.dts332
-rw-r--r--arch/powerpc/boot/dts/p1020rdb_camp_core0.dts213
-rw-r--r--arch/powerpc/boot/dts/p1020rdb_camp_core1.dts148
-rw-r--r--arch/powerpc/boot/dts/p1020si.dtsi377
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts106
-rw-r--r--arch/powerpc/boot/dts/p2020ds.dts374
-rw-r--r--arch/powerpc/boot/dts/p2020rdb.dts378
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core0.dts245
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core1.dts150
-rw-r--r--arch/powerpc/boot/dts/p2020si.dtsi382
-rw-r--r--arch/powerpc/boot/epapr.c66
-rwxr-xr-xarch/powerpc/boot/wrapper19
-rw-r--r--arch/powerpc/boot/zImage.coff.lds.S6
-rw-r--r--arch/powerpc/boot/zImage.lds.S57
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc8315_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/mpc8540_ads_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/mpc8560_ads_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/mpc85xx_cds_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig1
-rw-r--r--arch/powerpc/configs/c2k_defconfig4
-rw-r--r--arch/powerpc/configs/e55xx_smp_defconfig39
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/configs/mpc86xx_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig4
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig4
-rw-r--r--arch/powerpc/configs/ps3_defconfig4
-rw-r--r--arch/powerpc/configs/pseries_defconfig8
-rw-r--r--arch/powerpc/include/asm/cputable.h55
-rw-r--r--arch/powerpc/include/asm/cputhreads.h12
-rw-r--r--arch/powerpc/include/asm/dbell.h3
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h113
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h15
-rw-r--r--arch/powerpc/include/asm/firmware.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h12
-rw-r--r--arch/powerpc/include/asm/io-workarounds.h (renamed from arch/powerpc/platforms/cell/io-workarounds.h)1
-rw-r--r--arch/powerpc/include/asm/io.h33
-rw-r--r--arch/powerpc/include/asm/io_event_irq.h54
-rw-r--r--arch/powerpc/include/asm/irq.h18
-rw-r--r--arch/powerpc/include/asm/kexec.h2
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h1
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/include/asm/machdep.h22
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h20
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h6
-rw-r--r--arch/powerpc/include/asm/mmu.h52
-rw-r--r--arch/powerpc/include/asm/mmu_context.h12
-rw-r--r--arch/powerpc/include/asm/mpic.h5
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h5
-rw-r--r--arch/powerpc/include/asm/paca.h11
-rw-r--r--arch/powerpc/include/asm/page_64.h21
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h13
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h35
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h1
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/reg.h104
-rw-r--r--arch/powerpc/include/asm/reg_a2.h165
-rw-r--r--arch/powerpc/include/asm/reg_booke.h10
-rw-r--r--arch/powerpc/include/asm/rtas.h45
-rw-r--r--arch/powerpc/include/asm/scom.h156
-rw-r--r--arch/powerpc/include/asm/smp.h38
-rw-r--r--arch/powerpc/include/asm/system.h2
-rw-r--r--arch/powerpc/include/asm/tlbflush.h2
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/include/asm/wsp.h14
-rw-r--r--arch/powerpc/include/asm/xics.h142
-rw-r--r--arch/powerpc/kernel/Makefile6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_a2.S114
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S3
-rw-r--r--arch/powerpc/kernel/cpu_setup_power7.S91
-rw-r--r--arch/powerpc/kernel/cputable.c66
-rw-r--r--arch/powerpc/kernel/crash.c91
-rw-r--r--arch/powerpc/kernel/dbell.c65
-rw-r--r--arch/powerpc/kernel/entry_64.S27
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S202
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S216
-rw-r--r--arch/powerpc/kernel/head_32.S22
-rw-r--r--arch/powerpc/kernel/head_64.S49
-rw-r--r--arch/powerpc/kernel/idle_power7.S97
-rw-r--r--arch/powerpc/kernel/io-workarounds.c (renamed from arch/powerpc/platforms/cell/io-workarounds.c)31
-rw-r--r--arch/powerpc/kernel/irq.c166
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/lparcfg.c53
-rw-r--r--arch/powerpc/kernel/misc_32.S11
-rw-r--r--arch/powerpc/kernel/misc_64.S13
-rw-r--r--arch/powerpc/kernel/paca.c30
-rw-r--r--arch/powerpc/kernel/pci_dn.c3
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kernel/process.c20
-rw-r--r--arch/powerpc/kernel/prom.c64
-rw-r--r--arch/powerpc/kernel/prom_init.c30
-rw-r--r--arch/powerpc/kernel/rtas.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c22
-rw-r--r--arch/powerpc/kernel/setup_32.c1
-rw-r--r--arch/powerpc/kernel/setup_64.c44
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/smp.c138
-rw-r--r--arch/powerpc/kernel/sysfs.c38
-rw-r--r--arch/powerpc/kernel/traps.c28
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c51
-rw-r--r--arch/powerpc/kernel/vector.S2
-rw-r--r--arch/powerpc/kvm/book3s.c2
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S13
-rw-r--r--arch/powerpc/kvm/book3s_segment.S12
-rw-r--r--arch/powerpc/lib/alloc.c8
-rw-r--r--arch/powerpc/lib/copypage_64.S7
-rw-r--r--arch/powerpc/lib/devres.c6
-rw-r--r--arch/powerpc/lib/sstep.c61
-rw-r--r--arch/powerpc/mm/hash_low_64.S8
-rw-r--r--arch/powerpc/mm/hash_native_64.c18
-rw-r--r--arch/powerpc/mm/hash_utils_64.c62
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c214
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c18
-rw-r--r--arch/powerpc/mm/numa.c17
-rw-r--r--arch/powerpc/mm/pgtable_32.c12
-rw-r--r--arch/powerpc/mm/pgtable_64.c15
-rw-r--r--arch/powerpc/mm/slb.c10
-rw-r--r--arch/powerpc/mm/slb_low.S8
-rw-r--r--arch/powerpc/mm/stab.c2
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c6
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c10
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c4
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c83
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c12
-rw-r--r--arch/powerpc/platforms/85xx/smp.c12
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c26
-rw-r--r--arch/powerpc/platforms/86xx/gef_pic.c10
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c99
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c6
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/Kconfig31
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype24
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig4
-rw-r--r--arch/powerpc/platforms/cell/Makefile9
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c3
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c27
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.h3
-rw-r--r--arch/powerpc/platforms/cell/beat_smp.c124
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c11
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c25
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.h3
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c4
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/qpace_setup.c1
-rw-r--r--arch/powerpc/platforms/cell/setup.c4
-rw-r--r--arch/powerpc/platforms/cell/smp.c37
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c3
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c21
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/chrp/smp.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c15
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c15
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig4
-rw-r--r--arch/powerpc/platforms/iseries/exception.S62
-rw-r--r--arch/powerpc/platforms/iseries/irq.c13
-rw-r--r--arch/powerpc/platforms/iseries/setup.c9
-rw-r--r--arch/powerpc/platforms/iseries/smp.c45
-rw-r--r--arch/powerpc/platforms/iseries/smp.h6
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig11
-rw-r--r--arch/powerpc/platforms/powermac/pic.c25
-rw-r--r--arch/powerpc/platforms/powermac/pic.h11
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h1
-rw-r--r--arch/powerpc/platforms/powermac/smp.c97
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c8
-rw-r--r--arch/powerpc/platforms/ps3/smp.c22
-rw-r--r--arch/powerpc/platforms/ps3/spu.c4
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig23
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c20
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c82
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c22
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c5
-rw-r--r--arch/powerpc/platforms/pseries/io_event_irq.c231
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c117
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c5
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c48
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h27
-rw-r--r--arch/powerpc/platforms/pseries/ras.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c50
-rw-r--r--arch/powerpc/platforms/pseries/smp.c24
-rw-r--r--arch/powerpc/platforms/pseries/xics.c949
-rw-r--r--arch/powerpc/platforms/pseries/xics.h23
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig28
-rw-r--r--arch/powerpc/platforms/wsp/Makefile6
-rw-r--r--arch/powerpc/platforms/wsp/ics.c712
-rw-r--r--arch/powerpc/platforms/wsp/ics.h20
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c332
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c95
-rw-r--r--arch/powerpc/platforms/wsp/scom_smp.c427
-rw-r--r--arch/powerpc/platforms/wsp/scom_wsp.c77
-rw-r--r--arch/powerpc/platforms/wsp/setup.c36
-rw-r--r--arch/powerpc/platforms/wsp/smp.c88
-rw-r--r--arch/powerpc/platforms/wsp/wsp.h17
-rw-r--r--arch/powerpc/sysdev/Kconfig10
-rw-r--r--arch/powerpc/sysdev/Makefile6
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/powerpc/sysdev/cpm1.c8
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c10
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c10
-rw-r--r--arch/powerpc/sysdev/i8259.c13
-rw-r--r--arch/powerpc/sysdev/ipic.c16
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c10
-rw-r--r--arch/powerpc/sysdev/mpc8xxx_gpio.c12
-rw-r--r--arch/powerpc/sysdev/mpic.c209
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c14
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c6
-rw-r--r--arch/powerpc/sysdev/scom.c192
-rw-r--r--arch/powerpc/sysdev/uic.c12
-rw-r--r--arch/powerpc/sysdev/xics/Kconfig13
-rw-r--r--arch/powerpc/sysdev/xics/Makefile6
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c164
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c293
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c240
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c443
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c8
-rw-r--r--arch/powerpc/xmon/xmon.c38
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h5
-rw-r--r--drivers/macintosh/via-pmu.c56
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/carma/Kconfig17
-rw-r--r--drivers/misc/carma/Makefile2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c1141
-rw-r--r--drivers/misc/carma/carma-fpga.c1433
-rw-r--r--drivers/of/irq.c2
-rw-r--r--include/linux/of_irq.h1
244 files changed, 11490 insertions, 4116 deletions
diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
new file mode 100755
index 000000000000..1a729f089866
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
@@ -0,0 +1,61 @@
1CAN Device Tree Bindings
2------------------------
32011 Freescale Semiconductor, Inc.
4
5fsl,flexcan-v1.0 nodes
6-----------------------
7In addition to the required compatible-, reg- and interrupt-properties, you can
8also specify which clock source shall be used for the controller.
9
10CPI Clock- Can Protocol Interface Clock
11 This CLK_SRC bit of CTRL(control register) selects the clock source to
12 the CAN Protocol Interface(CPI) to be either the peripheral clock
13 (driven by the PLL) or the crystal oscillator clock. The selected clock
14 is the one fed to the prescaler to generate the Serial Clock (Sclock).
15 The PRESDIV field of CTRL(control register) controls a prescaler that
16 generates the Serial Clock (Sclock), whose period defines the
17 time quantum used to compose the CAN waveform.
18
19Can Engine Clock Source
20 There are two sources for CAN clock
21 - Platform Clock It represents the bus clock
22 - Oscillator Clock
23
24 Peripheral Clock (PLL)
25 --------------
26 |
27 --------- -------------
28 | |CPI Clock | Prescaler | Sclock
29 | |---------------->| (1.. 256) |------------>
30 --------- -------------
31 | |
32 -------------- ---------------------CLK_SRC
33 Oscillator Clock
34
35- fsl,flexcan-clock-source : CAN Engine Clock Source.This property selects
36 the peripheral clock. PLL clock is fed to the
37 prescaler to generate the Serial Clock (Sclock).
38 Valid values are "oscillator" and "platform"
39 "oscillator": CAN engine clock source is oscillator clock.
40 "platform" The CAN engine clock source is the bus clock
41 (platform clock).
42
43- fsl,flexcan-clock-divider : for the reference and system clock, an additional
44 clock divider can be specified.
45- clock-frequency: frequency required to calculate the bitrate for FlexCAN.
46
47Note:
48 - v1.0 of flexcan-v1.0 represent the IP block version for P1010 SOC.
49 - P1010 does not have oscillator as the Clock Source.So the default
50 Clock Source is platform clock.
51Examples:
52
53 can0@1c000 {
54 compatible = "fsl,flexcan-v1.0";
55 reg = <0x1c000 0x1000>;
56 interrupts = <48 0x2>;
57 interrupt-parent = <&mpic>;
58 fsl,flexcan-clock-source = "platform";
59 fsl,flexcan-clock-divider = <2>;
60 clock-frequency = <fixed by u-boot>;
61 };
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/ifc.txt b/Documentation/devicetree/bindings/powerpc/fsl/ifc.txt
new file mode 100644
index 000000000000..939a26d541f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/ifc.txt
@@ -0,0 +1,76 @@
1Integrated Flash Controller
2
3Properties:
4- name : Should be ifc
5- compatible : should contain "fsl,ifc". The version of the integrated
6 flash controller can be found in the IFC_REV register at
7 offset zero.
8
9- #address-cells : Should be either two or three. The first cell is the
10 chipselect number, and the remaining cells are the
11 offset into the chipselect.
12- #size-cells : Either one or two, depending on how large each chipselect
13 can be.
14- reg : Offset and length of the register set for the device
15- interrupts : IFC has two interrupts. The first one is the "common"
16 interrupt(CM_EVTER_STAT), and second is the NAND interrupt
17 (NAND_EVTER_STAT).
18
19- ranges : Each range corresponds to a single chipselect, and covers
20 the entire access window as configured.
21
22Child device nodes describe the devices connected to IFC such as NOR (e.g.
23cfi-flash) and NAND (fsl,ifc-nand). There might be board specific devices
24like FPGAs, CPLDs, etc.
25
26Example:
27
28 ifc@ffe1e000 {
29 compatible = "fsl,ifc", "simple-bus";
30 #address-cells = <2>;
31 #size-cells = <1>;
32 reg = <0x0 0xffe1e000 0 0x2000>;
33 interrupts = <16 2 19 2>;
34
35 /* NOR, NAND Flashes and CPLD on board */
36 ranges = <0x0 0x0 0x0 0xee000000 0x02000000
37 0x1 0x0 0x0 0xffa00000 0x00010000
38 0x3 0x0 0x0 0xffb00000 0x00020000>;
39
40 flash@0,0 {
41 #address-cells = <1>;
42 #size-cells = <1>;
43 compatible = "cfi-flash";
44 reg = <0x0 0x0 0x2000000>;
45 bank-width = <2>;
46 device-width = <1>;
47
48 partition@0 {
49 /* 32MB for user data */
50 reg = <0x0 0x02000000>;
51 label = "NOR Data";
52 };
53 };
54
55 flash@1,0 {
56 #address-cells = <1>;
57 #size-cells = <1>;
58 compatible = "fsl,ifc-nand";
59 reg = <0x1 0x0 0x10000>;
60
61 partition@0 {
62 /* This location must not be altered */
63 /* 1MB for u-boot Bootloader Image */
64 reg = <0x0 0x00100000>;
65 label = "NAND U-Boot Image";
66 read-only;
67 };
68 };
69
70 cpld@3,0 {
71 #address-cells = <1>;
72 #size-cells = <1>;
73 compatible = "fsl,p1010rdb-cpld";
74 reg = <0x3 0x0 0x000001f>;
75 };
76 };
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mpic-timer.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpic-timer.txt
new file mode 100644
index 000000000000..df41958140e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mpic-timer.txt
@@ -0,0 +1,38 @@
1* Freescale MPIC timers
2
3Required properties:
4- compatible: "fsl,mpic-global-timer"
5
6- reg : Contains two regions. The first is the main timer register bank
7 (GTCCRxx, GTBCRxx, GTVPRxx, GTDRxx). The second is the timer control
8 register (TCRx) for the group.
9
10- fsl,available-ranges: use <start count> style section to define which
11 timer interrupts can be used. This property is optional; without this,
12 all timers within the group can be used.
13
14- interrupts: one interrupt per timer in the group, in order, starting
15 with timer zero. If timer-available-ranges is present, only the
16 interrupts that correspond to available timers shall be present.
17
18Example:
19 /* Note that this requires #interrupt-cells to be 4 */
20 timer0: timer@41100 {
21 compatible = "fsl,mpic-global-timer";
22 reg = <0x41100 0x100 0x41300 4>;
23
24 /* Another AMP partition is using timers 0 and 1 */
25 fsl,available-ranges = <2 2>;
26
27 interrupts = <2 0 3 0
28 3 0 3 0>;
29 };
30
31 timer1: timer@42100 {
32 compatible = "fsl,mpic-global-timer";
33 reg = <0x42100 0x100 0x42300 4>;
34 interrupts = <4 0 3 0
35 5 0 3 0
36 6 0 3 0
37 7 0 3 0>;
38 };
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
index 4f6145859aab..2cf38bd841fd 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
@@ -190,7 +190,7 @@ EXAMPLE 4
190 */ 190 */
191 timer0: timer@41100 { 191 timer0: timer@41100 {
192 compatible = "fsl,mpic-global-timer"; 192 compatible = "fsl,mpic-global-timer";
193 reg = <0x41100 0x100>; 193 reg = <0x41100 0x100 0x41300 4>;
194 interrupts = <0 0 3 0 194 interrupts = <0 0 3 0
195 1 0 3 0 195 1 0 3 0
196 2 0 3 0 196 2 0 3 0
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8f4d50b0adfa..a3128ca0fe11 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -193,6 +193,12 @@ config SYS_SUPPORTS_APM_EMULATION
193 default y if PMAC_APM_EMU 193 default y if PMAC_APM_EMU
194 bool 194 bool
195 195
196config EPAPR_BOOT
197 bool
198 help
199 Used to allow a board to specify it wants an ePAPR compliant wrapper.
200 default n
201
196config DEFAULT_UIMAGE 202config DEFAULT_UIMAGE
197 bool 203 bool
198 help 204 help
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 2d38a50e66ba..a597dd77b903 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -267,6 +267,11 @@ config PPC_EARLY_DEBUG_USBGECKO
267 Select this to enable early debugging for Nintendo GameCube/Wii 267 Select this to enable early debugging for Nintendo GameCube/Wii
268 consoles via an external USB Gecko adapter. 268 consoles via an external USB Gecko adapter.
269 269
270config PPC_EARLY_DEBUG_WSP
271 bool "Early debugging via WSP's internal UART"
272 depends on PPC_WSP
273 select PPC_UDBG_16550
274
270endchoice 275endchoice
271 276
272config PPC_EARLY_DEBUG_44x_PHYSLOW 277config PPC_EARLY_DEBUG_44x_PHYSLOW
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 89178164af5e..c26200b40a47 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -69,7 +69,8 @@ src-wlib := string.S crt0.S crtsavres.S stdio.c main.c \
69 cpm-serial.c stdlib.c mpc52xx-psc.c planetcore.c uartlite.c \ 69 cpm-serial.c stdlib.c mpc52xx-psc.c planetcore.c uartlite.c \
70 fsl-soc.c mpc8xx.c pq2.c ugecon.c 70 fsl-soc.c mpc8xx.c pq2.c ugecon.c
71src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c \ 71src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c \
72 cuboot-ebony.c cuboot-hotfoot.c treeboot-ebony.c prpmc2800.c \ 72 cuboot-ebony.c cuboot-hotfoot.c epapr.c treeboot-ebony.c \
73 prpmc2800.c \
73 ps3-head.S ps3-hvcall.S ps3.c treeboot-bamboo.c cuboot-8xx.c \ 74 ps3-head.S ps3-hvcall.S ps3.c treeboot-bamboo.c cuboot-8xx.c \
74 cuboot-pq2.c cuboot-sequoia.c treeboot-walnut.c \ 75 cuboot-pq2.c cuboot-sequoia.c treeboot-walnut.c \
75 cuboot-bamboo.c cuboot-mpc7448hpc2.c cuboot-taishan.c \ 76 cuboot-bamboo.c cuboot-mpc7448hpc2.c cuboot-taishan.c \
@@ -127,7 +128,7 @@ quiet_cmd_bootas = BOOTAS $@
127 cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $< 128 cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $<
128 129
129quiet_cmd_bootar = BOOTAR $@ 130quiet_cmd_bootar = BOOTAR $@
130 cmd_bootar = $(CROSS32AR) -cr $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@ 131 cmd_bootar = $(CROSS32AR) -cr$(KBUILD_ARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@
131 132
132$(obj-libfdt): $(obj)/%.o: $(srctree)/scripts/dtc/libfdt/%.c FORCE 133$(obj-libfdt): $(obj)/%.o: $(srctree)/scripts/dtc/libfdt/%.c FORCE
133 $(call if_changed_dep,bootcc) 134 $(call if_changed_dep,bootcc)
@@ -182,6 +183,7 @@ image-$(CONFIG_PPC_HOLLY) += dtbImage.holly
182image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800 183image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800
183image-$(CONFIG_PPC_ISERIES) += zImage.iseries 184image-$(CONFIG_PPC_ISERIES) += zImage.iseries
184image-$(CONFIG_DEFAULT_UIMAGE) += uImage 185image-$(CONFIG_DEFAULT_UIMAGE) += uImage
186image-$(CONFIG_EPAPR_BOOT) += zImage.epapr
185 187
186# 188#
187# Targets which embed a device tree blob 189# Targets which embed a device tree blob
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index f1c4dfc635be..0f7428a37efb 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -6,16 +6,28 @@
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 * 8 *
9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 9 * NOTE: this code runs in 32 bit mode, is position-independent,
10 * and is packaged as ELF32.
10 */ 11 */
11 12
12#include "ppc_asm.h" 13#include "ppc_asm.h"
13 14
14 .text 15 .text
15 /* a procedure descriptor used when booting this as a COFF file */ 16 /* A procedure descriptor used when booting this as a COFF file.
17 * When making COFF, this comes first in the link and we're
18 * linked at 0x500000.
19 */
16 .globl _zimage_start_opd 20 .globl _zimage_start_opd
17_zimage_start_opd: 21_zimage_start_opd:
18 .long _zimage_start, 0, 0, 0 22 .long 0x500000, 0, 0, 0
23
24p_start: .long _start
25p_etext: .long _etext
26p_bss_start: .long __bss_start
27p_end: .long _end
28
29 .weak _platform_stack_top
30p_pstack: .long _platform_stack_top
19 31
20 .weak _zimage_start 32 .weak _zimage_start
21 .globl _zimage_start 33 .globl _zimage_start
@@ -24,37 +36,65 @@ _zimage_start:
24_zimage_start_lib: 36_zimage_start_lib:
25 /* Work out the offset between the address we were linked at 37 /* Work out the offset between the address we were linked at
26 and the address where we're running. */ 38 and the address where we're running. */
27 bl 1f 39 bl .+4
281: mflr r0 40p_base: mflr r10 /* r10 now points to runtime addr of p_base */
29 lis r9,1b@ha 41 /* grab the link address of the dynamic section in r11 */
30 addi r9,r9,1b@l 42 addis r11,r10,(_GLOBAL_OFFSET_TABLE_-p_base)@ha
31 subf. r0,r9,r0 43 lwz r11,(_GLOBAL_OFFSET_TABLE_-p_base)@l(r11)
32 beq 3f /* if running at same address as linked */ 44 cmpwi r11,0
45 beq 3f /* if not linked -pie */
46 /* get the runtime address of the dynamic section in r12 */
47 .weak __dynamic_start
48 addis r12,r10,(__dynamic_start-p_base)@ha
49 addi r12,r12,(__dynamic_start-p_base)@l
50 subf r11,r11,r12 /* runtime - linktime offset */
51
52 /* The dynamic section contains a series of tagged entries.
53 * We need the RELA and RELACOUNT entries. */
54RELA = 7
55RELACOUNT = 0x6ffffff9
56 li r9,0
57 li r0,0
589: lwz r8,0(r12) /* get tag */
59 cmpwi r8,0
60 beq 10f /* end of list */
61 cmpwi r8,RELA
62 bne 11f
63 lwz r9,4(r12) /* get RELA pointer in r9 */
64 b 12f
6511: addis r8,r8,(-RELACOUNT)@ha
66 cmpwi r8,RELACOUNT@l
67 bne 12f
68 lwz r0,4(r12) /* get RELACOUNT value in r0 */
6912: addi r12,r12,8
70 b 9b
33 71
34 /* The .got2 section contains a list of addresses, so add 72 /* The relocation section contains a list of relocations.
35 the address offset onto each entry. */ 73 * We now do the R_PPC_RELATIVE ones, which point to words
36 lis r9,__got2_start@ha 74 * which need to be initialized with addend + offset.
37 addi r9,r9,__got2_start@l 75 * The R_PPC_RELATIVE ones come first and there are RELACOUNT
38 lis r8,__got2_end@ha 76 * of them. */
39 addi r8,r8,__got2_end@l 7710: /* skip relocation if we don't have both */
40 subf. r8,r9,r8 78 cmpwi r0,0
41 beq 3f 79 beq 3f
42 srwi. r8,r8,2 80 cmpwi r9,0
43 mtctr r8 81 beq 3f
44 add r9,r0,r9 82
452: lwz r8,0(r9) 83 add r9,r9,r11 /* Relocate RELA pointer */
46 add r8,r8,r0 84 mtctr r0
47 stw r8,0(r9) 852: lbz r0,4+3(r9) /* ELF32_R_INFO(reloc->r_info) */
48 addi r9,r9,4 86 cmpwi r0,22 /* R_PPC_RELATIVE */
87 bne 3f
88 lwz r12,0(r9) /* reloc->r_offset */
89 lwz r0,8(r9) /* reloc->r_addend */
90 add r0,r0,r11
91 stwx r0,r11,r12
92 addi r9,r9,12
49 bdnz 2b 93 bdnz 2b
50 94
51 /* Do a cache flush for our text, in case the loader didn't */ 95 /* Do a cache flush for our text, in case the loader didn't */
523: lis r9,_start@ha 963: lwz r9,p_start-p_base(r10) /* note: these are relocated now */
53 addi r9,r9,_start@l 97 lwz r8,p_etext-p_base(r10)
54 add r9,r0,r9
55 lis r8,_etext@ha
56 addi r8,r8,_etext@l
57 add r8,r0,r8
584: dcbf r0,r9 984: dcbf r0,r9
59 icbi r0,r9 99 icbi r0,r9
60 addi r9,r9,0x20 100 addi r9,r9,0x20
@@ -64,27 +104,19 @@ _zimage_start_lib:
64 isync 104 isync
65 105
66 /* Clear the BSS */ 106 /* Clear the BSS */
67 lis r9,__bss_start@ha 107 lwz r9,p_bss_start-p_base(r10)
68 addi r9,r9,__bss_start@l 108 lwz r8,p_end-p_base(r10)
69 add r9,r0,r9 109 li r0,0
70 lis r8,_end@ha 1105: stw r0,0(r9)
71 addi r8,r8,_end@l
72 add r8,r0,r8
73 li r10,0
745: stw r10,0(r9)
75 addi r9,r9,4 111 addi r9,r9,4
76 cmplw cr0,r9,r8 112 cmplw cr0,r9,r8
77 blt 5b 113 blt 5b
78 114
79 /* Possibly set up a custom stack */ 115 /* Possibly set up a custom stack */
80.weak _platform_stack_top 116 lwz r8,p_pstack-p_base(r10)
81 lis r8,_platform_stack_top@ha
82 addi r8,r8,_platform_stack_top@l
83 cmpwi r8,0 117 cmpwi r8,0
84 beq 6f 118 beq 6f
85 add r8,r0,r8
86 lwz r1,0(r8) 119 lwz r1,0(r8)
87 add r1,r0,r1
88 li r0,0 120 li r0,0
89 stwu r0,-16(r1) /* establish a stack frame */ 121 stwu r0,-16(r1) /* establish a stack frame */
906: 1226:
diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts
index e0668f877794..d6a8ae458137 100644
--- a/arch/powerpc/boot/dts/p1020rdb.dts
+++ b/arch/powerpc/boot/dts/p1020rdb.dts
@@ -9,12 +9,11 @@
9 * option) any later version. 9 * option) any later version.
10 */ 10 */
11 11
12/dts-v1/; 12/include/ "p1020si.dtsi"
13
13/ { 14/ {
14 model = "fsl,P1020"; 15 model = "fsl,P1020RDB";
15 compatible = "fsl,P1020RDB"; 16 compatible = "fsl,P1020RDB";
16 #address-cells = <2>;
17 #size-cells = <2>;
18 17
19 aliases { 18 aliases {
20 serial0 = &serial0; 19 serial0 = &serial0;
@@ -26,34 +25,11 @@
26 pci1 = &pci1; 25 pci1 = &pci1;
27 }; 26 };
28 27
29 cpus {
30 #address-cells = <1>;
31 #size-cells = <0>;
32
33 PowerPC,P1020@0 {
34 device_type = "cpu";
35 reg = <0x0>;
36 next-level-cache = <&L2>;
37 };
38
39 PowerPC,P1020@1 {
40 device_type = "cpu";
41 reg = <0x1>;
42 next-level-cache = <&L2>;
43 };
44 };
45
46 memory { 28 memory {
47 device_type = "memory"; 29 device_type = "memory";
48 }; 30 };
49 31
50 localbus@ffe05000 { 32 localbus@ffe05000 {
51 #address-cells = <2>;
52 #size-cells = <1>;
53 compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
54 reg = <0 0xffe05000 0 0x1000>;
55 interrupts = <19 2>;
56 interrupt-parent = <&mpic>;
57 33
58 /* NOR, NAND Flashes and Vitesse 5 port L2 switch */ 34 /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
59 ranges = <0x0 0x0 0x0 0xef000000 0x01000000 35 ranges = <0x0 0x0 0x0 0xef000000 0x01000000
@@ -165,88 +141,14 @@
165 }; 141 };
166 142
167 soc@ffe00000 { 143 soc@ffe00000 {
168 #address-cells = <1>;
169 #size-cells = <1>;
170 device_type = "soc";
171 compatible = "fsl,p1020-immr", "simple-bus";
172 ranges = <0x0 0x0 0xffe00000 0x100000>;
173 bus-frequency = <0>; // Filled out by uboot.
174
175 ecm-law@0 {
176 compatible = "fsl,ecm-law";
177 reg = <0x0 0x1000>;
178 fsl,num-laws = <12>;
179 };
180
181 ecm@1000 {
182 compatible = "fsl,p1020-ecm", "fsl,ecm";
183 reg = <0x1000 0x1000>;
184 interrupts = <16 2>;
185 interrupt-parent = <&mpic>;
186 };
187
188 memory-controller@2000 {
189 compatible = "fsl,p1020-memory-controller";
190 reg = <0x2000 0x1000>;
191 interrupt-parent = <&mpic>;
192 interrupts = <16 2>;
193 };
194
195 i2c@3000 { 144 i2c@3000 {
196 #address-cells = <1>;
197 #size-cells = <0>;
198 cell-index = <0>;
199 compatible = "fsl-i2c";
200 reg = <0x3000 0x100>;
201 interrupts = <43 2>;
202 interrupt-parent = <&mpic>;
203 dfsrr;
204 rtc@68 { 145 rtc@68 {
205 compatible = "dallas,ds1339"; 146 compatible = "dallas,ds1339";
206 reg = <0x68>; 147 reg = <0x68>;
207 }; 148 };
208 }; 149 };
209 150
210 i2c@3100 {
211 #address-cells = <1>;
212 #size-cells = <0>;
213 cell-index = <1>;
214 compatible = "fsl-i2c";
215 reg = <0x3100 0x100>;
216 interrupts = <43 2>;
217 interrupt-parent = <&mpic>;
218 dfsrr;
219 };
220
221 serial0: serial@4500 {
222 cell-index = <0>;
223 device_type = "serial";
224 compatible = "ns16550";
225 reg = <0x4500 0x100>;
226 clock-frequency = <0>;
227 interrupts = <42 2>;
228 interrupt-parent = <&mpic>;
229 };
230
231 serial1: serial@4600 {
232 cell-index = <1>;
233 device_type = "serial";
234 compatible = "ns16550";
235 reg = <0x4600 0x100>;
236 clock-frequency = <0>;
237 interrupts = <42 2>;
238 interrupt-parent = <&mpic>;
239 };
240
241 spi@7000 { 151 spi@7000 {
242 cell-index = <0>;
243 #address-cells = <1>;
244 #size-cells = <0>;
245 compatible = "fsl,espi";
246 reg = <0x7000 0x1000>;
247 interrupts = <59 0x2>;
248 interrupt-parent = <&mpic>;
249 mode = "cpu";
250 152
251 fsl_m25p80@0 { 153 fsl_m25p80@0 {
252 #address-cells = <1>; 154 #address-cells = <1>;
@@ -294,66 +196,7 @@
294 }; 196 };
295 }; 197 };
296 198
297 gpio: gpio-controller@f000 {
298 #gpio-cells = <2>;
299 compatible = "fsl,mpc8572-gpio";
300 reg = <0xf000 0x100>;
301 interrupts = <47 0x2>;
302 interrupt-parent = <&mpic>;
303 gpio-controller;
304 };
305
306 L2: l2-cache-controller@20000 {
307 compatible = "fsl,p1020-l2-cache-controller";
308 reg = <0x20000 0x1000>;
309 cache-line-size = <32>; // 32 bytes
310 cache-size = <0x40000>; // L2,256K
311 interrupt-parent = <&mpic>;
312 interrupts = <16 2>;
313 };
314
315 dma@21300 {
316 #address-cells = <1>;
317 #size-cells = <1>;
318 compatible = "fsl,eloplus-dma";
319 reg = <0x21300 0x4>;
320 ranges = <0x0 0x21100 0x200>;
321 cell-index = <0>;
322 dma-channel@0 {
323 compatible = "fsl,eloplus-dma-channel";
324 reg = <0x0 0x80>;
325 cell-index = <0>;
326 interrupt-parent = <&mpic>;
327 interrupts = <20 2>;
328 };
329 dma-channel@80 {
330 compatible = "fsl,eloplus-dma-channel";
331 reg = <0x80 0x80>;
332 cell-index = <1>;
333 interrupt-parent = <&mpic>;
334 interrupts = <21 2>;
335 };
336 dma-channel@100 {
337 compatible = "fsl,eloplus-dma-channel";
338 reg = <0x100 0x80>;
339 cell-index = <2>;
340 interrupt-parent = <&mpic>;
341 interrupts = <22 2>;
342 };
343 dma-channel@180 {
344 compatible = "fsl,eloplus-dma-channel";
345 reg = <0x180 0x80>;
346 cell-index = <3>;
347 interrupt-parent = <&mpic>;
348 interrupts = <23 2>;
349 };
350 };
351
352 mdio@24000 { 199 mdio@24000 {
353 #address-cells = <1>;
354 #size-cells = <0>;
355 compatible = "fsl,etsec2-mdio";
356 reg = <0x24000 0x1000 0xb0030 0x4>;
357 200
358 phy0: ethernet-phy@0 { 201 phy0: ethernet-phy@0 {
359 interrupt-parent = <&mpic>; 202 interrupt-parent = <&mpic>;
@@ -369,10 +212,6 @@
369 }; 212 };
370 213
371 mdio@25000 { 214 mdio@25000 {
372 #address-cells = <1>;
373 #size-cells = <0>;
374 compatible = "fsl,etsec2-tbi";
375 reg = <0x25000 0x1000 0xb1030 0x4>;
376 215
377 tbi0: tbi-phy@11 { 216 tbi0: tbi-phy@11 {
378 reg = <0x11>; 217 reg = <0x11>;
@@ -381,97 +220,25 @@
381 }; 220 };
382 221
383 enet0: ethernet@b0000 { 222 enet0: ethernet@b0000 {
384 #address-cells = <1>;
385 #size-cells = <1>;
386 device_type = "network";
387 model = "eTSEC";
388 compatible = "fsl,etsec2";
389 fsl,num_rx_queues = <0x8>;
390 fsl,num_tx_queues = <0x8>;
391 local-mac-address = [ 00 00 00 00 00 00 ];
392 interrupt-parent = <&mpic>;
393 fixed-link = <1 1 1000 0 0>; 223 fixed-link = <1 1 1000 0 0>;
394 phy-connection-type = "rgmii-id"; 224 phy-connection-type = "rgmii-id";
395 225
396 queue-group@0 {
397 #address-cells = <1>;
398 #size-cells = <1>;
399 reg = <0xb0000 0x1000>;
400 interrupts = <29 2 30 2 34 2>;
401 };
402
403 queue-group@1 {
404 #address-cells = <1>;
405 #size-cells = <1>;
406 reg = <0xb4000 0x1000>;
407 interrupts = <17 2 18 2 24 2>;
408 };
409 }; 226 };
410 227
411 enet1: ethernet@b1000 { 228 enet1: ethernet@b1000 {
412 #address-cells = <1>;
413 #size-cells = <1>;
414 device_type = "network";
415 model = "eTSEC";
416 compatible = "fsl,etsec2";
417 fsl,num_rx_queues = <0x8>;
418 fsl,num_tx_queues = <0x8>;
419 local-mac-address = [ 00 00 00 00 00 00 ];
420 interrupt-parent = <&mpic>;
421 phy-handle = <&phy0>; 229 phy-handle = <&phy0>;
422 tbi-handle = <&tbi0>; 230 tbi-handle = <&tbi0>;
423 phy-connection-type = "sgmii"; 231 phy-connection-type = "sgmii";
424 232
425 queue-group@0 {
426 #address-cells = <1>;
427 #size-cells = <1>;
428 reg = <0xb1000 0x1000>;
429 interrupts = <35 2 36 2 40 2>;
430 };
431
432 queue-group@1 {
433 #address-cells = <1>;
434 #size-cells = <1>;
435 reg = <0xb5000 0x1000>;
436 interrupts = <51 2 52 2 67 2>;
437 };
438 }; 233 };
439 234
440 enet2: ethernet@b2000 { 235 enet2: ethernet@b2000 {
441 #address-cells = <1>;
442 #size-cells = <1>;
443 device_type = "network";
444 model = "eTSEC";
445 compatible = "fsl,etsec2";
446 fsl,num_rx_queues = <0x8>;
447 fsl,num_tx_queues = <0x8>;
448 local-mac-address = [ 00 00 00 00 00 00 ];
449 interrupt-parent = <&mpic>;
450 phy-handle = <&phy1>; 236 phy-handle = <&phy1>;
451 phy-connection-type = "rgmii-id"; 237 phy-connection-type = "rgmii-id";
452 238
453 queue-group@0 {
454 #address-cells = <1>;
455 #size-cells = <1>;
456 reg = <0xb2000 0x1000>;
457 interrupts = <31 2 32 2 33 2>;
458 };
459
460 queue-group@1 {
461 #address-cells = <1>;
462 #size-cells = <1>;
463 reg = <0xb6000 0x1000>;
464 interrupts = <25 2 26 2 27 2>;
465 };
466 }; 239 };
467 240
468 usb@22000 { 241 usb@22000 {
469 #address-cells = <1>;
470 #size-cells = <0>;
471 compatible = "fsl-usb2-dr";
472 reg = <0x22000 0x1000>;
473 interrupt-parent = <&mpic>;
474 interrupts = <28 0x2>;
475 phy_type = "ulpi"; 242 phy_type = "ulpi";
476 }; 243 };
477 244
@@ -481,82 +248,23 @@
481 it enables USB2. OTOH, U-Boot does create a new node 248 it enables USB2. OTOH, U-Boot does create a new node
482 when there isn't any. So, just comment it out. 249 when there isn't any. So, just comment it out.
483 usb@23000 { 250 usb@23000 {
484 #address-cells = <1>;
485 #size-cells = <0>;
486 compatible = "fsl-usb2-dr";
487 reg = <0x23000 0x1000>;
488 interrupt-parent = <&mpic>;
489 interrupts = <46 0x2>;
490 phy_type = "ulpi"; 251 phy_type = "ulpi";
491 }; 252 };
492 */ 253 */
493 254
494 sdhci@2e000 {
495 compatible = "fsl,p1020-esdhc", "fsl,esdhc";
496 reg = <0x2e000 0x1000>;
497 interrupts = <72 0x2>;
498 interrupt-parent = <&mpic>;
499 /* Filled in by U-Boot */
500 clock-frequency = <0>;
501 };
502
503 crypto@30000 {
504 compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
505 "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
506 reg = <0x30000 0x10000>;
507 interrupts = <45 2 58 2>;
508 interrupt-parent = <&mpic>;
509 fsl,num-channels = <4>;
510 fsl,channel-fifo-len = <24>;
511 fsl,exec-units-mask = <0xbfe>;
512 fsl,descriptor-types-mask = <0x3ab0ebf>;
513 };
514
515 mpic: pic@40000 {
516 interrupt-controller;
517 #address-cells = <0>;
518 #interrupt-cells = <2>;
519 reg = <0x40000 0x40000>;
520 compatible = "chrp,open-pic";
521 device_type = "open-pic";
522 };
523
524 msi@41600 {
525 compatible = "fsl,p1020-msi", "fsl,mpic-msi";
526 reg = <0x41600 0x80>;
527 msi-available-ranges = <0 0x100>;
528 interrupts = <
529 0xe0 0
530 0xe1 0
531 0xe2 0
532 0xe3 0
533 0xe4 0
534 0xe5 0
535 0xe6 0
536 0xe7 0>;
537 interrupt-parent = <&mpic>;
538 };
539
540 global-utilities@e0000 { //global utilities block
541 compatible = "fsl,p1020-guts";
542 reg = <0xe0000 0x1000>;
543 fsl,has-rstcr;
544 };
545 }; 255 };
546 256
547 pci0: pcie@ffe09000 { 257 pci0: pcie@ffe09000 {
548 compatible = "fsl,mpc8548-pcie";
549 device_type = "pci";
550 #interrupt-cells = <1>;
551 #size-cells = <2>;
552 #address-cells = <3>;
553 reg = <0 0xffe09000 0 0x1000>;
554 bus-range = <0 255>;
555 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 258 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
556 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; 259 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
557 clock-frequency = <33333333>; 260 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
558 interrupt-parent = <&mpic>; 261 interrupt-map = <
559 interrupts = <16 2>; 262 /* IDSEL 0x0 */
263 0000 0x0 0x0 0x1 &mpic 0x4 0x1
264 0000 0x0 0x0 0x2 &mpic 0x5 0x1
265 0000 0x0 0x0 0x3 &mpic 0x6 0x1
266 0000 0x0 0x0 0x4 &mpic 0x7 0x1
267 >;
560 pcie@0 { 268 pcie@0 {
561 reg = <0x0 0x0 0x0 0x0 0x0>; 269 reg = <0x0 0x0 0x0 0x0 0x0>;
562 #size-cells = <2>; 270 #size-cells = <2>;
@@ -573,18 +281,16 @@
573 }; 281 };
574 282
575 pci1: pcie@ffe0a000 { 283 pci1: pcie@ffe0a000 {
576 compatible = "fsl,mpc8548-pcie";
577 device_type = "pci";
578 #interrupt-cells = <1>;
579 #size-cells = <2>;
580 #address-cells = <3>;
581 reg = <0 0xffe0a000 0 0x1000>;
582 bus-range = <0 255>;
583 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 284 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
584 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; 285 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
585 clock-frequency = <33333333>; 286 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
586 interrupt-parent = <&mpic>; 287 interrupt-map = <
587 interrupts = <16 2>; 288 /* IDSEL 0x0 */
289 0000 0x0 0x0 0x1 &mpic 0x0 0x1
290 0000 0x0 0x0 0x2 &mpic 0x1 0x1
291 0000 0x0 0x0 0x3 &mpic 0x2 0x1
292 0000 0x0 0x0 0x4 &mpic 0x3 0x1
293 >;
588 pcie@0 { 294 pcie@0 {
589 reg = <0x0 0x0 0x0 0x0 0x0>; 295 reg = <0x0 0x0 0x0 0x0 0x0>;
590 #size-cells = <2>; 296 #size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
new file mode 100644
index 000000000000..f0bf7f42f097
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
@@ -0,0 +1,213 @@
1/*
2 * P1020 RDB Core0 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts file allows core0 to have memory, l2, i2c, spi, gpio, tdm, dma, usb,
7 * eth1, eth2, sdhc, crypto, global-util, message, pci0, pci1, msi.
8 *
9 * Please note to add "-b 0" for core0's dts compiling.
10 *
11 * Copyright 2011 Freescale Semiconductor Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18
19/include/ "p1020si.dtsi"
20
21/ {
22 model = "fsl,P1020RDB";
23 compatible = "fsl,P1020RDB", "fsl,MPC85XXRDB-CAMP";
24
25 aliases {
26 ethernet1 = &enet1;
27 ethernet2 = &enet2;
28 serial0 = &serial0;
29 pci0 = &pci0;
30 pci1 = &pci1;
31 };
32
33 cpus {
34 PowerPC,P1020@1 {
35 status = "disabled";
36 };
37 };
38
39 memory {
40 device_type = "memory";
41 };
42
43 localbus@ffe05000 {
44 status = "disabled";
45 };
46
47 soc@ffe00000 {
48 i2c@3000 {
49 rtc@68 {
50 compatible = "dallas,ds1339";
51 reg = <0x68>;
52 };
53 };
54
55 serial1: serial@4600 {
56 status = "disabled";
57 };
58
59 spi@7000 {
60 fsl_m25p80@0 {
61 #address-cells = <1>;
62 #size-cells = <1>;
63 compatible = "fsl,espi-flash";
64 reg = <0>;
65 linux,modalias = "fsl_m25p80";
66 spi-max-frequency = <40000000>;
67
68 partition@0 {
69 /* 512KB for u-boot Bootloader Image */
70 reg = <0x0 0x00080000>;
71 label = "SPI (RO) U-Boot Image";
72 read-only;
73 };
74
75 partition@80000 {
76 /* 512KB for DTB Image */
77 reg = <0x00080000 0x00080000>;
78 label = "SPI (RO) DTB Image";
79 read-only;
80 };
81
82 partition@100000 {
83 /* 4MB for Linux Kernel Image */
84 reg = <0x00100000 0x00400000>;
85 label = "SPI (RO) Linux Kernel Image";
86 read-only;
87 };
88
89 partition@500000 {
90 /* 4MB for Compressed RFS Image */
91 reg = <0x00500000 0x00400000>;
92 label = "SPI (RO) Compressed RFS Image";
93 read-only;
94 };
95
96 partition@900000 {
97 /* 7MB for JFFS2 based RFS */
98 reg = <0x00900000 0x00700000>;
99 label = "SPI (RW) JFFS2 RFS";
100 };
101 };
102 };
103
104 mdio@24000 {
105 phy0: ethernet-phy@0 {
106 interrupt-parent = <&mpic>;
107 interrupts = <3 1>;
108 reg = <0x0>;
109 };
110 phy1: ethernet-phy@1 {
111 interrupt-parent = <&mpic>;
112 interrupts = <2 1>;
113 reg = <0x1>;
114 };
115 };
116
117 mdio@25000 {
118 tbi0: tbi-phy@11 {
119 reg = <0x11>;
120 device_type = "tbi-phy";
121 };
122 };
123
124 enet0: ethernet@b0000 {
125 status = "disabled";
126 };
127
128 enet1: ethernet@b1000 {
129 phy-handle = <&phy0>;
130 tbi-handle = <&tbi0>;
131 phy-connection-type = "sgmii";
132 };
133
134 enet2: ethernet@b2000 {
135 phy-handle = <&phy1>;
136 phy-connection-type = "rgmii-id";
137 };
138
139 usb@22000 {
140 phy_type = "ulpi";
141 };
142
143 /* USB2 is shared with localbus, so it must be disabled
144 by default. We can't put 'status = "disabled";' here
145 since U-Boot doesn't clear the status property when
146 it enables USB2. OTOH, U-Boot does create a new node
147 when there isn't any. So, just comment it out.
148 usb@23000 {
149 phy_type = "ulpi";
150 };
151 */
152
153 mpic: pic@40000 {
154 protected-sources = <
155 42 29 30 34 /* serial1, enet0-queue-group0 */
156 17 18 24 45 /* enet0-queue-group1, crypto */
157 >;
158 };
159
160 };
161
162 pci0: pcie@ffe09000 {
163 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
164 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
165 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
166 interrupt-map = <
167 /* IDSEL 0x0 */
168 0000 0x0 0x0 0x1 &mpic 0x4 0x1
169 0000 0x0 0x0 0x2 &mpic 0x5 0x1
170 0000 0x0 0x0 0x3 &mpic 0x6 0x1
171 0000 0x0 0x0 0x4 &mpic 0x7 0x1
172 >;
173 pcie@0 {
174 reg = <0x0 0x0 0x0 0x0 0x0>;
175 #size-cells = <2>;
176 #address-cells = <3>;
177 device_type = "pci";
178 ranges = <0x2000000 0x0 0xa0000000
179 0x2000000 0x0 0xa0000000
180 0x0 0x20000000
181
182 0x1000000 0x0 0x0
183 0x1000000 0x0 0x0
184 0x0 0x100000>;
185 };
186 };
187
188 pci1: pcie@ffe0a000 {
189 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
190 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
191 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
192 interrupt-map = <
193 /* IDSEL 0x0 */
194 0000 0x0 0x0 0x1 &mpic 0x0 0x1
195 0000 0x0 0x0 0x2 &mpic 0x1 0x1
196 0000 0x0 0x0 0x3 &mpic 0x2 0x1
197 0000 0x0 0x0 0x4 &mpic 0x3 0x1
198 >;
199 pcie@0 {
200 reg = <0x0 0x0 0x0 0x0 0x0>;
201 #size-cells = <2>;
202 #address-cells = <3>;
203 device_type = "pci";
204 ranges = <0x2000000 0x0 0x80000000
205 0x2000000 0x0 0x80000000
206 0x0 0x20000000
207
208 0x1000000 0x0 0x0
209 0x1000000 0x0 0x0
210 0x0 0x100000>;
211 };
212 };
213};
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
new file mode 100644
index 000000000000..6ec02204a44e
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
@@ -0,0 +1,148 @@
1/*
2 * P1020 RDB Core1 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts allows core1 to have l2, eth0, crypto.
7 *
8 * Please note to add "-b 1" for core1's dts compiling.
9 *
10 * Copyright 2011 Freescale Semiconductor Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18/include/ "p1020si.dtsi"
19
20/ {
21 model = "fsl,P1020RDB";
22 compatible = "fsl,P1020RDB", "fsl,MPC85XXRDB-CAMP";
23
24 aliases {
25 ethernet0 = &enet0;
26 serial0 = &serial1;
27 };
28
29 cpus {
30 PowerPC,P1020@0 {
31 status = "disabled";
32 };
33 };
34
35 memory {
36 device_type = "memory";
37 };
38
39 localbus@ffe05000 {
40 status = "disabled";
41 };
42
43 soc@ffe00000 {
44 ecm-law@0 {
45 status = "disabled";
46 };
47
48 ecm@1000 {
49 status = "disabled";
50 };
51
52 memory-controller@2000 {
53 status = "disabled";
54 };
55
56 i2c@3000 {
57 status = "disabled";
58 };
59
60 i2c@3100 {
61 status = "disabled";
62 };
63
64 serial0: serial@4500 {
65 status = "disabled";
66 };
67
68 spi@7000 {
69 status = "disabled";
70 };
71
72 gpio: gpio-controller@f000 {
73 status = "disabled";
74 };
75
76 dma@21300 {
77 status = "disabled";
78 };
79
80 mdio@24000 {
81 status = "disabled";
82 };
83
84 mdio@25000 {
85 status = "disabled";
86 };
87
88 enet0: ethernet@b0000 {
89 fixed-link = <1 1 1000 0 0>;
90 phy-connection-type = "rgmii-id";
91
92 };
93
94 enet1: ethernet@b1000 {
95 status = "disabled";
96 };
97
98 enet2: ethernet@b2000 {
99 status = "disabled";
100 };
101
102 usb@22000 {
103 status = "disabled";
104 };
105
106 sdhci@2e000 {
107 status = "disabled";
108 };
109
110 mpic: pic@40000 {
111 protected-sources = <
112 16 /* ecm, mem, L2, pci0, pci1 */
113 43 42 59 /* i2c, serial0, spi */
114 47 63 62 /* gpio, tdm */
115 20 21 22 23 /* dma */
116 03 02 /* mdio */
117 35 36 40 /* enet1-queue-group0 */
118 51 52 67 /* enet1-queue-group1 */
119 31 32 33 /* enet2-queue-group0 */
120 25 26 27 /* enet2-queue-group1 */
121 28 72 58 /* usb, sdhci, crypto */
122 0xb0 0xb1 0xb2 /* message */
123 0xb3 0xb4 0xb5
124 0xb6 0xb7
125 0xe0 0xe1 0xe2 /* msi */
126 0xe3 0xe4 0xe5
127 0xe6 0xe7 /* sdhci, crypto , pci */
128 >;
129 };
130
131 msi@41600 {
132 status = "disabled";
133 };
134
135 global-utilities@e0000 { //global utilities block
136 status = "disabled";
137 };
138
139 };
140
141 pci0: pcie@ffe09000 {
142 status = "disabled";
143 };
144
145 pci1: pcie@ffe0a000 {
146 status = "disabled";
147 };
148};
diff --git a/arch/powerpc/boot/dts/p1020si.dtsi b/arch/powerpc/boot/dts/p1020si.dtsi
new file mode 100644
index 000000000000..5c5acb66c3fc
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020si.dtsi
@@ -0,0 +1,377 @@
1/*
2 * P1020si Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/dts-v1/;
13/ {
14 compatible = "fsl,P1020";
15 #address-cells = <2>;
16 #size-cells = <2>;
17
18 cpus {
19 #address-cells = <1>;
20 #size-cells = <0>;
21
22 PowerPC,P1020@0 {
23 device_type = "cpu";
24 reg = <0x0>;
25 next-level-cache = <&L2>;
26 };
27
28 PowerPC,P1020@1 {
29 device_type = "cpu";
30 reg = <0x1>;
31 next-level-cache = <&L2>;
32 };
33 };
34
35 localbus@ffe05000 {
36 #address-cells = <2>;
37 #size-cells = <1>;
38 compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
39 reg = <0 0xffe05000 0 0x1000>;
40 interrupts = <19 2>;
41 interrupt-parent = <&mpic>;
42 };
43
44 soc@ffe00000 {
45 #address-cells = <1>;
46 #size-cells = <1>;
47 device_type = "soc";
48 compatible = "fsl,p1020-immr", "simple-bus";
49 ranges = <0x0 0x0 0xffe00000 0x100000>;
50 bus-frequency = <0>; // Filled out by uboot.
51
52 ecm-law@0 {
53 compatible = "fsl,ecm-law";
54 reg = <0x0 0x1000>;
55 fsl,num-laws = <12>;
56 };
57
58 ecm@1000 {
59 compatible = "fsl,p1020-ecm", "fsl,ecm";
60 reg = <0x1000 0x1000>;
61 interrupts = <16 2>;
62 interrupt-parent = <&mpic>;
63 };
64
65 memory-controller@2000 {
66 compatible = "fsl,p1020-memory-controller";
67 reg = <0x2000 0x1000>;
68 interrupt-parent = <&mpic>;
69 interrupts = <16 2>;
70 };
71
72 i2c@3000 {
73 #address-cells = <1>;
74 #size-cells = <0>;
75 cell-index = <0>;
76 compatible = "fsl-i2c";
77 reg = <0x3000 0x100>;
78 interrupts = <43 2>;
79 interrupt-parent = <&mpic>;
80 dfsrr;
81 };
82
83 i2c@3100 {
84 #address-cells = <1>;
85 #size-cells = <0>;
86 cell-index = <1>;
87 compatible = "fsl-i2c";
88 reg = <0x3100 0x100>;
89 interrupts = <43 2>;
90 interrupt-parent = <&mpic>;
91 dfsrr;
92 };
93
94 serial0: serial@4500 {
95 cell-index = <0>;
96 device_type = "serial";
97 compatible = "ns16550";
98 reg = <0x4500 0x100>;
99 clock-frequency = <0>;
100 interrupts = <42 2>;
101 interrupt-parent = <&mpic>;
102 };
103
104 serial1: serial@4600 {
105 cell-index = <1>;
106 device_type = "serial";
107 compatible = "ns16550";
108 reg = <0x4600 0x100>;
109 clock-frequency = <0>;
110 interrupts = <42 2>;
111 interrupt-parent = <&mpic>;
112 };
113
114 spi@7000 {
115 cell-index = <0>;
116 #address-cells = <1>;
117 #size-cells = <0>;
118 compatible = "fsl,espi";
119 reg = <0x7000 0x1000>;
120 interrupts = <59 0x2>;
121 interrupt-parent = <&mpic>;
122 mode = "cpu";
123 };
124
125 gpio: gpio-controller@f000 {
126 #gpio-cells = <2>;
127 compatible = "fsl,mpc8572-gpio";
128 reg = <0xf000 0x100>;
129 interrupts = <47 0x2>;
130 interrupt-parent = <&mpic>;
131 gpio-controller;
132 };
133
134 L2: l2-cache-controller@20000 {
135 compatible = "fsl,p1020-l2-cache-controller";
136 reg = <0x20000 0x1000>;
137 cache-line-size = <32>; // 32 bytes
138 cache-size = <0x40000>; // L2,256K
139 interrupt-parent = <&mpic>;
140 interrupts = <16 2>;
141 };
142
143 dma@21300 {
144 #address-cells = <1>;
145 #size-cells = <1>;
146 compatible = "fsl,eloplus-dma";
147 reg = <0x21300 0x4>;
148 ranges = <0x0 0x21100 0x200>;
149 cell-index = <0>;
150 dma-channel@0 {
151 compatible = "fsl,eloplus-dma-channel";
152 reg = <0x0 0x80>;
153 cell-index = <0>;
154 interrupt-parent = <&mpic>;
155 interrupts = <20 2>;
156 };
157 dma-channel@80 {
158 compatible = "fsl,eloplus-dma-channel";
159 reg = <0x80 0x80>;
160 cell-index = <1>;
161 interrupt-parent = <&mpic>;
162 interrupts = <21 2>;
163 };
164 dma-channel@100 {
165 compatible = "fsl,eloplus-dma-channel";
166 reg = <0x100 0x80>;
167 cell-index = <2>;
168 interrupt-parent = <&mpic>;
169 interrupts = <22 2>;
170 };
171 dma-channel@180 {
172 compatible = "fsl,eloplus-dma-channel";
173 reg = <0x180 0x80>;
174 cell-index = <3>;
175 interrupt-parent = <&mpic>;
176 interrupts = <23 2>;
177 };
178 };
179
180 mdio@24000 {
181 #address-cells = <1>;
182 #size-cells = <0>;
183 compatible = "fsl,etsec2-mdio";
184 reg = <0x24000 0x1000 0xb0030 0x4>;
185
186 };
187
188 mdio@25000 {
189 #address-cells = <1>;
190 #size-cells = <0>;
191 compatible = "fsl,etsec2-tbi";
192 reg = <0x25000 0x1000 0xb1030 0x4>;
193
194 };
195
196 enet0: ethernet@b0000 {
197 #address-cells = <1>;
198 #size-cells = <1>;
199 device_type = "network";
200 model = "eTSEC";
201 compatible = "fsl,etsec2";
202 fsl,num_rx_queues = <0x8>;
203 fsl,num_tx_queues = <0x8>;
204 local-mac-address = [ 00 00 00 00 00 00 ];
205 interrupt-parent = <&mpic>;
206
207 queue-group@0 {
208 #address-cells = <1>;
209 #size-cells = <1>;
210 reg = <0xb0000 0x1000>;
211 interrupts = <29 2 30 2 34 2>;
212 };
213
214 queue-group@1 {
215 #address-cells = <1>;
216 #size-cells = <1>;
217 reg = <0xb4000 0x1000>;
218 interrupts = <17 2 18 2 24 2>;
219 };
220 };
221
222 enet1: ethernet@b1000 {
223 #address-cells = <1>;
224 #size-cells = <1>;
225 device_type = "network";
226 model = "eTSEC";
227 compatible = "fsl,etsec2";
228 fsl,num_rx_queues = <0x8>;
229 fsl,num_tx_queues = <0x8>;
230 local-mac-address = [ 00 00 00 00 00 00 ];
231 interrupt-parent = <&mpic>;
232
233 queue-group@0 {
234 #address-cells = <1>;
235 #size-cells = <1>;
236 reg = <0xb1000 0x1000>;
237 interrupts = <35 2 36 2 40 2>;
238 };
239
240 queue-group@1 {
241 #address-cells = <1>;
242 #size-cells = <1>;
243 reg = <0xb5000 0x1000>;
244 interrupts = <51 2 52 2 67 2>;
245 };
246 };
247
248 enet2: ethernet@b2000 {
249 #address-cells = <1>;
250 #size-cells = <1>;
251 device_type = "network";
252 model = "eTSEC";
253 compatible = "fsl,etsec2";
254 fsl,num_rx_queues = <0x8>;
255 fsl,num_tx_queues = <0x8>;
256 local-mac-address = [ 00 00 00 00 00 00 ];
257 interrupt-parent = <&mpic>;
258
259 queue-group@0 {
260 #address-cells = <1>;
261 #size-cells = <1>;
262 reg = <0xb2000 0x1000>;
263 interrupts = <31 2 32 2 33 2>;
264 };
265
266 queue-group@1 {
267 #address-cells = <1>;
268 #size-cells = <1>;
269 reg = <0xb6000 0x1000>;
270 interrupts = <25 2 26 2 27 2>;
271 };
272 };
273
274 usb@22000 {
275 #address-cells = <1>;
276 #size-cells = <0>;
277 compatible = "fsl-usb2-dr";
278 reg = <0x22000 0x1000>;
279 interrupt-parent = <&mpic>;
280 interrupts = <28 0x2>;
281 };
282
283 /* USB2 is shared with localbus, so it must be disabled
284 by default. We can't put 'status = "disabled";' here
285 since U-Boot doesn't clear the status property when
286 it enables USB2. OTOH, U-Boot does create a new node
287 when there isn't any. So, just comment it out.
288 usb@23000 {
289 #address-cells = <1>;
290 #size-cells = <0>;
291 compatible = "fsl-usb2-dr";
292 reg = <0x23000 0x1000>;
293 interrupt-parent = <&mpic>;
294 interrupts = <46 0x2>;
295 phy_type = "ulpi";
296 };
297 */
298
299 sdhci@2e000 {
300 compatible = "fsl,p1020-esdhc", "fsl,esdhc";
301 reg = <0x2e000 0x1000>;
302 interrupts = <72 0x2>;
303 interrupt-parent = <&mpic>;
304 /* Filled in by U-Boot */
305 clock-frequency = <0>;
306 };
307
308 crypto@30000 {
309 compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
310 "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
311 reg = <0x30000 0x10000>;
312 interrupts = <45 2 58 2>;
313 interrupt-parent = <&mpic>;
314 fsl,num-channels = <4>;
315 fsl,channel-fifo-len = <24>;
316 fsl,exec-units-mask = <0xbfe>;
317 fsl,descriptor-types-mask = <0x3ab0ebf>;
318 };
319
320 mpic: pic@40000 {
321 interrupt-controller;
322 #address-cells = <0>;
323 #interrupt-cells = <2>;
324 reg = <0x40000 0x40000>;
325 compatible = "chrp,open-pic";
326 device_type = "open-pic";
327 };
328
329 msi@41600 {
330 compatible = "fsl,p1020-msi", "fsl,mpic-msi";
331 reg = <0x41600 0x80>;
332 msi-available-ranges = <0 0x100>;
333 interrupts = <
334 0xe0 0
335 0xe1 0
336 0xe2 0
337 0xe3 0
338 0xe4 0
339 0xe5 0
340 0xe6 0
341 0xe7 0>;
342 interrupt-parent = <&mpic>;
343 };
344
345 global-utilities@e0000 { //global utilities block
346 compatible = "fsl,p1020-guts","fsl,p2020-guts";
347 reg = <0xe0000 0x1000>;
348 fsl,has-rstcr;
349 };
350 };
351
352 pci0: pcie@ffe09000 {
353 compatible = "fsl,mpc8548-pcie";
354 device_type = "pci";
355 #interrupt-cells = <1>;
356 #size-cells = <2>;
357 #address-cells = <3>;
358 reg = <0 0xffe09000 0 0x1000>;
359 bus-range = <0 255>;
360 clock-frequency = <33333333>;
361 interrupt-parent = <&mpic>;
362 interrupts = <16 2>;
363 };
364
365 pci1: pcie@ffe0a000 {
366 compatible = "fsl,mpc8548-pcie";
367 device_type = "pci";
368 #interrupt-cells = <1>;
369 #size-cells = <2>;
370 #address-cells = <3>;
371 reg = <0 0xffe0a000 0 0x1000>;
372 bus-range = <0 255>;
373 clock-frequency = <33333333>;
374 interrupt-parent = <&mpic>;
375 interrupts = <16 2>;
376 };
377};
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index 59ef405c1c91..4f685a779f4c 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -52,7 +52,7 @@
52 #size-cells = <1>; 52 #size-cells = <1>;
53 compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus"; 53 compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus";
54 reg = <0 0xffe05000 0 0x1000>; 54 reg = <0 0xffe05000 0 0x1000>;
55 interrupts = <19 2>; 55 interrupts = <19 2 0 0>;
56 56
57 ranges = <0x0 0x0 0xf 0xe8000000 0x08000000 57 ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
58 0x1 0x0 0xf 0xe0000000 0x08000000 58 0x1 0x0 0xf 0xe0000000 0x08000000
@@ -157,7 +157,7 @@
157 * IRQ8 is generated if the "EVENT" switch is pressed 157 * IRQ8 is generated if the "EVENT" switch is pressed
158 * and PX_CTL[EVESEL] is set to 00. 158 * and PX_CTL[EVESEL] is set to 00.
159 */ 159 */
160 interrupts = <8 8>; 160 interrupts = <8 8 0 0>;
161 }; 161 };
162 }; 162 };
163 163
@@ -178,13 +178,13 @@
178 ecm@1000 { 178 ecm@1000 {
179 compatible = "fsl,p1022-ecm", "fsl,ecm"; 179 compatible = "fsl,p1022-ecm", "fsl,ecm";
180 reg = <0x1000 0x1000>; 180 reg = <0x1000 0x1000>;
181 interrupts = <16 2>; 181 interrupts = <16 2 0 0>;
182 }; 182 };
183 183
184 memory-controller@2000 { 184 memory-controller@2000 {
185 compatible = "fsl,p1022-memory-controller"; 185 compatible = "fsl,p1022-memory-controller";
186 reg = <0x2000 0x1000>; 186 reg = <0x2000 0x1000>;
187 interrupts = <16 2>; 187 interrupts = <16 2 0 0>;
188 }; 188 };
189 189
190 i2c@3000 { 190 i2c@3000 {
@@ -193,7 +193,7 @@
193 cell-index = <0>; 193 cell-index = <0>;
194 compatible = "fsl-i2c"; 194 compatible = "fsl-i2c";
195 reg = <0x3000 0x100>; 195 reg = <0x3000 0x100>;
196 interrupts = <43 2>; 196 interrupts = <43 2 0 0>;
197 dfsrr; 197 dfsrr;
198 }; 198 };
199 199
@@ -203,7 +203,7 @@
203 cell-index = <1>; 203 cell-index = <1>;
204 compatible = "fsl-i2c"; 204 compatible = "fsl-i2c";
205 reg = <0x3100 0x100>; 205 reg = <0x3100 0x100>;
206 interrupts = <43 2>; 206 interrupts = <43 2 0 0>;
207 dfsrr; 207 dfsrr;
208 208
209 wm8776:codec@1a { 209 wm8776:codec@1a {
@@ -220,7 +220,7 @@
220 compatible = "ns16550"; 220 compatible = "ns16550";
221 reg = <0x4500 0x100>; 221 reg = <0x4500 0x100>;
222 clock-frequency = <0>; 222 clock-frequency = <0>;
223 interrupts = <42 2>; 223 interrupts = <42 2 0 0>;
224 }; 224 };
225 225
226 serial1: serial@4600 { 226 serial1: serial@4600 {
@@ -229,7 +229,7 @@
229 compatible = "ns16550"; 229 compatible = "ns16550";
230 reg = <0x4600 0x100>; 230 reg = <0x4600 0x100>;
231 clock-frequency = <0>; 231 clock-frequency = <0>;
232 interrupts = <42 2>; 232 interrupts = <42 2 0 0>;
233 }; 233 };
234 234
235 spi@7000 { 235 spi@7000 {
@@ -238,7 +238,7 @@
238 #size-cells = <0>; 238 #size-cells = <0>;
239 compatible = "fsl,espi"; 239 compatible = "fsl,espi";
240 reg = <0x7000 0x1000>; 240 reg = <0x7000 0x1000>;
241 interrupts = <59 0x2>; 241 interrupts = <59 0x2 0 0>;
242 espi,num-ss-bits = <4>; 242 espi,num-ss-bits = <4>;
243 mode = "cpu"; 243 mode = "cpu";
244 244
@@ -275,7 +275,7 @@
275 compatible = "fsl,mpc8610-ssi"; 275 compatible = "fsl,mpc8610-ssi";
276 cell-index = <0>; 276 cell-index = <0>;
277 reg = <0x15000 0x100>; 277 reg = <0x15000 0x100>;
278 interrupts = <75 2>; 278 interrupts = <75 2 0 0>;
279 fsl,mode = "i2s-slave"; 279 fsl,mode = "i2s-slave";
280 codec-handle = <&wm8776>; 280 codec-handle = <&wm8776>;
281 fsl,playback-dma = <&dma00>; 281 fsl,playback-dma = <&dma00>;
@@ -294,25 +294,25 @@
294 compatible = "fsl,ssi-dma-channel"; 294 compatible = "fsl,ssi-dma-channel";
295 reg = <0x0 0x80>; 295 reg = <0x0 0x80>;
296 cell-index = <0>; 296 cell-index = <0>;
297 interrupts = <76 2>; 297 interrupts = <76 2 0 0>;
298 }; 298 };
299 dma01: dma-channel@80 { 299 dma01: dma-channel@80 {
300 compatible = "fsl,ssi-dma-channel"; 300 compatible = "fsl,ssi-dma-channel";
301 reg = <0x80 0x80>; 301 reg = <0x80 0x80>;
302 cell-index = <1>; 302 cell-index = <1>;
303 interrupts = <77 2>; 303 interrupts = <77 2 0 0>;
304 }; 304 };
305 dma-channel@100 { 305 dma-channel@100 {
306 compatible = "fsl,eloplus-dma-channel"; 306 compatible = "fsl,eloplus-dma-channel";
307 reg = <0x100 0x80>; 307 reg = <0x100 0x80>;
308 cell-index = <2>; 308 cell-index = <2>;
309 interrupts = <78 2>; 309 interrupts = <78 2 0 0>;
310 }; 310 };
311 dma-channel@180 { 311 dma-channel@180 {
312 compatible = "fsl,eloplus-dma-channel"; 312 compatible = "fsl,eloplus-dma-channel";
313 reg = <0x180 0x80>; 313 reg = <0x180 0x80>;
314 cell-index = <3>; 314 cell-index = <3>;
315 interrupts = <79 2>; 315 interrupts = <79 2 0 0>;
316 }; 316 };
317 }; 317 };
318 318
@@ -320,7 +320,7 @@
320 #gpio-cells = <2>; 320 #gpio-cells = <2>;
321 compatible = "fsl,mpc8572-gpio"; 321 compatible = "fsl,mpc8572-gpio";
322 reg = <0xf000 0x100>; 322 reg = <0xf000 0x100>;
323 interrupts = <47 0x2>; 323 interrupts = <47 0x2 0 0>;
324 gpio-controller; 324 gpio-controller;
325 }; 325 };
326 326
@@ -329,7 +329,7 @@
329 reg = <0x20000 0x1000>; 329 reg = <0x20000 0x1000>;
330 cache-line-size = <32>; // 32 bytes 330 cache-line-size = <32>; // 32 bytes
331 cache-size = <0x40000>; // L2, 256K 331 cache-size = <0x40000>; // L2, 256K
332 interrupts = <16 2>; 332 interrupts = <16 2 0 0>;
333 }; 333 };
334 334
335 dma@21300 { 335 dma@21300 {
@@ -343,25 +343,25 @@
343 compatible = "fsl,eloplus-dma-channel"; 343 compatible = "fsl,eloplus-dma-channel";
344 reg = <0x0 0x80>; 344 reg = <0x0 0x80>;
345 cell-index = <0>; 345 cell-index = <0>;
346 interrupts = <20 2>; 346 interrupts = <20 2 0 0>;
347 }; 347 };
348 dma-channel@80 { 348 dma-channel@80 {
349 compatible = "fsl,eloplus-dma-channel"; 349 compatible = "fsl,eloplus-dma-channel";
350 reg = <0x80 0x80>; 350 reg = <0x80 0x80>;
351 cell-index = <1>; 351 cell-index = <1>;
352 interrupts = <21 2>; 352 interrupts = <21 2 0 0>;
353 }; 353 };
354 dma-channel@100 { 354 dma-channel@100 {
355 compatible = "fsl,eloplus-dma-channel"; 355 compatible = "fsl,eloplus-dma-channel";
356 reg = <0x100 0x80>; 356 reg = <0x100 0x80>;
357 cell-index = <2>; 357 cell-index = <2>;
358 interrupts = <22 2>; 358 interrupts = <22 2 0 0>;
359 }; 359 };
360 dma-channel@180 { 360 dma-channel@180 {
361 compatible = "fsl,eloplus-dma-channel"; 361 compatible = "fsl,eloplus-dma-channel";
362 reg = <0x180 0x80>; 362 reg = <0x180 0x80>;
363 cell-index = <3>; 363 cell-index = <3>;
364 interrupts = <23 2>; 364 interrupts = <23 2 0 0>;
365 }; 365 };
366 }; 366 };
367 367
@@ -370,7 +370,7 @@
370 #size-cells = <0>; 370 #size-cells = <0>;
371 compatible = "fsl-usb2-dr"; 371 compatible = "fsl-usb2-dr";
372 reg = <0x22000 0x1000>; 372 reg = <0x22000 0x1000>;
373 interrupts = <28 0x2>; 373 interrupts = <28 0x2 0 0>;
374 phy_type = "ulpi"; 374 phy_type = "ulpi";
375 }; 375 };
376 376
@@ -381,11 +381,11 @@
381 reg = <0x24000 0x1000 0xb0030 0x4>; 381 reg = <0x24000 0x1000 0xb0030 0x4>;
382 382
383 phy0: ethernet-phy@0 { 383 phy0: ethernet-phy@0 {
384 interrupts = <3 1>; 384 interrupts = <3 1 0 0>;
385 reg = <0x1>; 385 reg = <0x1>;
386 }; 386 };
387 phy1: ethernet-phy@1 { 387 phy1: ethernet-phy@1 {
388 interrupts = <9 1>; 388 interrupts = <9 1 0 0>;
389 reg = <0x2>; 389 reg = <0x2>;
390 }; 390 };
391 }; 391 };
@@ -416,13 +416,13 @@
416 #address-cells = <1>; 416 #address-cells = <1>;
417 #size-cells = <1>; 417 #size-cells = <1>;
418 reg = <0xB0000 0x1000>; 418 reg = <0xB0000 0x1000>;
419 interrupts = <29 2 30 2 34 2>; 419 interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
420 }; 420 };
421 queue-group@1{ 421 queue-group@1{
422 #address-cells = <1>; 422 #address-cells = <1>;
423 #size-cells = <1>; 423 #size-cells = <1>;
424 reg = <0xB4000 0x1000>; 424 reg = <0xB4000 0x1000>;
425 interrupts = <17 2 18 2 24 2>; 425 interrupts = <17 2 0 0 18 2 0 0 24 2 0 0>;
426 }; 426 };
427 }; 427 };
428 428
@@ -443,20 +443,20 @@
443 #address-cells = <1>; 443 #address-cells = <1>;
444 #size-cells = <1>; 444 #size-cells = <1>;
445 reg = <0xB1000 0x1000>; 445 reg = <0xB1000 0x1000>;
446 interrupts = <35 2 36 2 40 2>; 446 interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>;
447 }; 447 };
448 queue-group@1{ 448 queue-group@1{
449 #address-cells = <1>; 449 #address-cells = <1>;
450 #size-cells = <1>; 450 #size-cells = <1>;
451 reg = <0xB5000 0x1000>; 451 reg = <0xB5000 0x1000>;
452 interrupts = <51 2 52 2 67 2>; 452 interrupts = <51 2 0 0 52 2 0 0 67 2 0 0>;
453 }; 453 };
454 }; 454 };
455 455
456 sdhci@2e000 { 456 sdhci@2e000 {
457 compatible = "fsl,p1022-esdhc", "fsl,esdhc"; 457 compatible = "fsl,p1022-esdhc", "fsl,esdhc";
458 reg = <0x2e000 0x1000>; 458 reg = <0x2e000 0x1000>;
459 interrupts = <72 0x2>; 459 interrupts = <72 0x2 0 0>;
460 fsl,sdhci-auto-cmd12; 460 fsl,sdhci-auto-cmd12;
461 /* Filled in by U-Boot */ 461 /* Filled in by U-Boot */
462 clock-frequency = <0>; 462 clock-frequency = <0>;
@@ -467,7 +467,7 @@
467 "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1", 467 "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1",
468 "fsl,sec2.0"; 468 "fsl,sec2.0";
469 reg = <0x30000 0x10000>; 469 reg = <0x30000 0x10000>;
470 interrupts = <45 2 58 2>; 470 interrupts = <45 2 0 0 58 2 0 0>;
471 fsl,num-channels = <4>; 471 fsl,num-channels = <4>;
472 fsl,channel-fifo-len = <24>; 472 fsl,channel-fifo-len = <24>;
473 fsl,exec-units-mask = <0x97c>; 473 fsl,exec-units-mask = <0x97c>;
@@ -478,14 +478,14 @@
478 compatible = "fsl,p1022-sata", "fsl,pq-sata-v2"; 478 compatible = "fsl,p1022-sata", "fsl,pq-sata-v2";
479 reg = <0x18000 0x1000>; 479 reg = <0x18000 0x1000>;
480 cell-index = <1>; 480 cell-index = <1>;
481 interrupts = <74 0x2>; 481 interrupts = <74 0x2 0 0>;
482 }; 482 };
483 483
484 sata@19000 { 484 sata@19000 {
485 compatible = "fsl,p1022-sata", "fsl,pq-sata-v2"; 485 compatible = "fsl,p1022-sata", "fsl,pq-sata-v2";
486 reg = <0x19000 0x1000>; 486 reg = <0x19000 0x1000>;
487 cell-index = <2>; 487 cell-index = <2>;
488 interrupts = <41 0x2>; 488 interrupts = <41 0x2 0 0>;
489 }; 489 };
490 490
491 power@e0070{ 491 power@e0070{
@@ -496,21 +496,33 @@
496 display@10000 { 496 display@10000 {
497 compatible = "fsl,diu", "fsl,p1022-diu"; 497 compatible = "fsl,diu", "fsl,p1022-diu";
498 reg = <0x10000 1000>; 498 reg = <0x10000 1000>;
499 interrupts = <64 2>; 499 interrupts = <64 2 0 0>;
500 }; 500 };
501 501
502 timer@41100 { 502 timer@41100 {
503 compatible = "fsl,mpic-global-timer"; 503 compatible = "fsl,mpic-global-timer";
504 reg = <0x41100 0x204>; 504 reg = <0x41100 0x100 0x41300 4>;
505 interrupts = <0xf7 0x2>; 505 interrupts = <0 0 3 0
506 1 0 3 0
507 2 0 3 0
508 3 0 3 0>;
509 };
510
511 timer@42100 {
512 compatible = "fsl,mpic-global-timer";
513 reg = <0x42100 0x100 0x42300 4>;
514 interrupts = <4 0 3 0
515 5 0 3 0
516 6 0 3 0
517 7 0 3 0>;
506 }; 518 };
507 519
508 mpic: pic@40000 { 520 mpic: pic@40000 {
509 interrupt-controller; 521 interrupt-controller;
510 #address-cells = <0>; 522 #address-cells = <0>;
511 #interrupt-cells = <2>; 523 #interrupt-cells = <4>;
512 reg = <0x40000 0x40000>; 524 reg = <0x40000 0x40000>;
513 compatible = "chrp,open-pic"; 525 compatible = "fsl,mpic";
514 device_type = "open-pic"; 526 device_type = "open-pic";
515 }; 527 };
516 528
@@ -519,14 +531,14 @@
519 reg = <0x41600 0x80>; 531 reg = <0x41600 0x80>;
520 msi-available-ranges = <0 0x100>; 532 msi-available-ranges = <0 0x100>;
521 interrupts = < 533 interrupts = <
522 0xe0 0 534 0xe0 0 0 0
523 0xe1 0 535 0xe1 0 0 0
524 0xe2 0 536 0xe2 0 0 0
525 0xe3 0 537 0xe3 0 0 0
526 0xe4 0 538 0xe4 0 0 0
527 0xe5 0 539 0xe5 0 0 0
528 0xe6 0 540 0xe6 0 0 0
529 0xe7 0>; 541 0xe7 0 0 0>;
530 }; 542 };
531 543
532 global-utilities@e0000 { //global utilities block 544 global-utilities@e0000 { //global utilities block
@@ -547,7 +559,7 @@
547 ranges = <0x2000000 0x0 0xa0000000 0xc 0x20000000 0x0 0x20000000 559 ranges = <0x2000000 0x0 0xa0000000 0xc 0x20000000 0x0 0x20000000
548 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; 560 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
549 clock-frequency = <33333333>; 561 clock-frequency = <33333333>;
550 interrupts = <16 2>; 562 interrupts = <16 2 0 0>;
551 interrupt-map-mask = <0xf800 0 0 7>; 563 interrupt-map-mask = <0xf800 0 0 7>;
552 interrupt-map = < 564 interrupt-map = <
553 /* IDSEL 0x0 */ 565 /* IDSEL 0x0 */
@@ -582,7 +594,7 @@
582 ranges = <0x2000000 0x0 0xc0000000 0xc 0x40000000 0x0 0x20000000 594 ranges = <0x2000000 0x0 0xc0000000 0xc 0x40000000 0x0 0x20000000
583 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>; 595 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>;
584 clock-frequency = <33333333>; 596 clock-frequency = <33333333>;
585 interrupts = <16 2>; 597 interrupts = <16 2 0 0>;
586 interrupt-map-mask = <0xf800 0 0 7>; 598 interrupt-map-mask = <0xf800 0 0 7>;
587 interrupt-map = < 599 interrupt-map = <
588 /* IDSEL 0x0 */ 600 /* IDSEL 0x0 */
@@ -618,7 +630,7 @@
618 ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000 630 ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
619 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>; 631 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
620 clock-frequency = <33333333>; 632 clock-frequency = <33333333>;
621 interrupts = <16 2>; 633 interrupts = <16 2 0 0>;
622 interrupt-map-mask = <0xf800 0 0 7>; 634 interrupt-map-mask = <0xf800 0 0 7>;
623 interrupt-map = < 635 interrupt-map = <
624 /* IDSEL 0x0 */ 636 /* IDSEL 0x0 */
diff --git a/arch/powerpc/boot/dts/p2020ds.dts b/arch/powerpc/boot/dts/p2020ds.dts
index 11019142813c..2bcf3683d223 100644
--- a/arch/powerpc/boot/dts/p2020ds.dts
+++ b/arch/powerpc/boot/dts/p2020ds.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * P2020 DS Device Tree Source 2 * P2020 DS Device Tree Source
3 * 3 *
4 * Copyright 2009 Freescale Semiconductor Inc. 4 * Copyright 2009-2011 Freescale Semiconductor Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -9,12 +9,11 @@
9 * option) any later version. 9 * option) any later version.
10 */ 10 */
11 11
12/dts-v1/; 12/include/ "p2020si.dtsi"
13
13/ { 14/ {
14 model = "fsl,P2020"; 15 model = "fsl,P2020DS";
15 compatible = "fsl,P2020DS"; 16 compatible = "fsl,P2020DS";
16 #address-cells = <2>;
17 #size-cells = <2>;
18 17
19 aliases { 18 aliases {
20 ethernet0 = &enet0; 19 ethernet0 = &enet0;
@@ -27,35 +26,13 @@
27 pci2 = &pci2; 26 pci2 = &pci2;
28 }; 27 };
29 28
30 cpus {
31 #address-cells = <1>;
32 #size-cells = <0>;
33
34 PowerPC,P2020@0 {
35 device_type = "cpu";
36 reg = <0x0>;
37 next-level-cache = <&L2>;
38 };
39
40 PowerPC,P2020@1 {
41 device_type = "cpu";
42 reg = <0x1>;
43 next-level-cache = <&L2>;
44 };
45 };
46 29
47 memory { 30 memory {
48 device_type = "memory"; 31 device_type = "memory";
49 }; 32 };
50 33
51 localbus@ffe05000 { 34 localbus@ffe05000 {
52 #address-cells = <2>;
53 #size-cells = <1>;
54 compatible = "fsl,elbc", "simple-bus"; 35 compatible = "fsl,elbc", "simple-bus";
55 reg = <0 0xffe05000 0 0x1000>;
56 interrupts = <19 2>;
57 interrupt-parent = <&mpic>;
58
59 ranges = <0x0 0x0 0x0 0xe8000000 0x08000000 36 ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
60 0x1 0x0 0x0 0xe0000000 0x08000000 37 0x1 0x0 0x0 0xe0000000 0x08000000
61 0x2 0x0 0x0 0xffa00000 0x00040000 38 0x2 0x0 0x0 0xffa00000 0x00040000
@@ -158,352 +135,77 @@
158 }; 135 };
159 136
160 soc@ffe00000 { 137 soc@ffe00000 {
161 #address-cells = <1>;
162 #size-cells = <1>;
163 device_type = "soc";
164 compatible = "fsl,p2020-immr", "simple-bus";
165 ranges = <0x0 0 0xffe00000 0x100000>;
166 bus-frequency = <0>; // Filled out by uboot.
167
168 ecm-law@0 {
169 compatible = "fsl,ecm-law";
170 reg = <0x0 0x1000>;
171 fsl,num-laws = <12>;
172 };
173
174 ecm@1000 {
175 compatible = "fsl,p2020-ecm", "fsl,ecm";
176 reg = <0x1000 0x1000>;
177 interrupts = <17 2>;
178 interrupt-parent = <&mpic>;
179 };
180
181 memory-controller@2000 {
182 compatible = "fsl,p2020-memory-controller";
183 reg = <0x2000 0x1000>;
184 interrupt-parent = <&mpic>;
185 interrupts = <18 2>;
186 };
187
188 i2c@3000 {
189 #address-cells = <1>;
190 #size-cells = <0>;
191 cell-index = <0>;
192 compatible = "fsl-i2c";
193 reg = <0x3000 0x100>;
194 interrupts = <43 2>;
195 interrupt-parent = <&mpic>;
196 dfsrr;
197 };
198
199 i2c@3100 {
200 #address-cells = <1>;
201 #size-cells = <0>;
202 cell-index = <1>;
203 compatible = "fsl-i2c";
204 reg = <0x3100 0x100>;
205 interrupts = <43 2>;
206 interrupt-parent = <&mpic>;
207 dfsrr;
208 };
209 138
210 serial0: serial@4500 { 139 usb@22000 {
211 cell-index = <0>; 140 phy_type = "ulpi";
212 device_type = "serial";
213 compatible = "ns16550";
214 reg = <0x4500 0x100>;
215 clock-frequency = <0>;
216 interrupts = <42 2>;
217 interrupt-parent = <&mpic>;
218 };
219
220 serial1: serial@4600 {
221 cell-index = <1>;
222 device_type = "serial";
223 compatible = "ns16550";
224 reg = <0x4600 0x100>;
225 clock-frequency = <0>;
226 interrupts = <42 2>;
227 interrupt-parent = <&mpic>;
228 };
229
230 spi@7000 {
231 compatible = "fsl,espi";
232 reg = <0x7000 0x1000>;
233 interrupts = <59 0x2>;
234 interrupt-parent = <&mpic>;
235 }; 141 };
236 142
237 dma@c300 { 143 mdio@24520 {
238 #address-cells = <1>; 144 phy0: ethernet-phy@0 {
239 #size-cells = <1>;
240 compatible = "fsl,eloplus-dma";
241 reg = <0xc300 0x4>;
242 ranges = <0x0 0xc100 0x200>;
243 cell-index = <1>;
244 dma-channel@0 {
245 compatible = "fsl,eloplus-dma-channel";
246 reg = <0x0 0x80>;
247 cell-index = <0>;
248 interrupt-parent = <&mpic>; 145 interrupt-parent = <&mpic>;
249 interrupts = <76 2>; 146 interrupts = <3 1>;
147 reg = <0x0>;
250 }; 148 };
251 dma-channel@80 { 149 phy1: ethernet-phy@1 {
252 compatible = "fsl,eloplus-dma-channel";
253 reg = <0x80 0x80>;
254 cell-index = <1>;
255 interrupt-parent = <&mpic>; 150 interrupt-parent = <&mpic>;
256 interrupts = <77 2>; 151 interrupts = <3 1>;
152 reg = <0x1>;
257 }; 153 };
258 dma-channel@100 { 154 phy2: ethernet-phy@2 {
259 compatible = "fsl,eloplus-dma-channel";
260 reg = <0x100 0x80>;
261 cell-index = <2>;
262 interrupt-parent = <&mpic>; 155 interrupt-parent = <&mpic>;
263 interrupts = <78 2>; 156 interrupts = <3 1>;
157 reg = <0x2>;
264 }; 158 };
265 dma-channel@180 { 159 tbi0: tbi-phy@11 {
266 compatible = "fsl,eloplus-dma-channel"; 160 reg = <0x11>;
267 reg = <0x180 0x80>; 161 device_type = "tbi-phy";
268 cell-index = <3>;
269 interrupt-parent = <&mpic>;
270 interrupts = <79 2>;
271 }; 162 };
272 };
273 163
274 gpio: gpio-controller@f000 {
275 #gpio-cells = <2>;
276 compatible = "fsl,mpc8572-gpio";
277 reg = <0xf000 0x100>;
278 interrupts = <47 0x2>;
279 interrupt-parent = <&mpic>;
280 gpio-controller;
281 }; 164 };
282 165
283 L2: l2-cache-controller@20000 { 166 mdio@25520 {
284 compatible = "fsl,p2020-l2-cache-controller"; 167 tbi1: tbi-phy@11 {
285 reg = <0x20000 0x1000>; 168 reg = <0x11>;
286 cache-line-size = <32>; // 32 bytes 169 device_type = "tbi-phy";
287 cache-size = <0x80000>; // L2, 512k 170 };
288 interrupt-parent = <&mpic>;
289 interrupts = <16 2>;
290 }; 171 };
291 172
292 dma@21300 { 173 mdio@26520 {
293 #address-cells = <1>; 174 tbi2: tbi-phy@11 {
294 #size-cells = <1>; 175 reg = <0x11>;
295 compatible = "fsl,eloplus-dma"; 176 device_type = "tbi-phy";
296 reg = <0x21300 0x4>;
297 ranges = <0x0 0x21100 0x200>;
298 cell-index = <0>;
299 dma-channel@0 {
300 compatible = "fsl,eloplus-dma-channel";
301 reg = <0x0 0x80>;
302 cell-index = <0>;
303 interrupt-parent = <&mpic>;
304 interrupts = <20 2>;
305 };
306 dma-channel@80 {
307 compatible = "fsl,eloplus-dma-channel";
308 reg = <0x80 0x80>;
309 cell-index = <1>;
310 interrupt-parent = <&mpic>;
311 interrupts = <21 2>;
312 }; 177 };
313 dma-channel@100 {
314 compatible = "fsl,eloplus-dma-channel";
315 reg = <0x100 0x80>;
316 cell-index = <2>;
317 interrupt-parent = <&mpic>;
318 interrupts = <22 2>;
319 };
320 dma-channel@180 {
321 compatible = "fsl,eloplus-dma-channel";
322 reg = <0x180 0x80>;
323 cell-index = <3>;
324 interrupt-parent = <&mpic>;
325 interrupts = <23 2>;
326 };
327 };
328 178
329 usb@22000 {
330 #address-cells = <1>;
331 #size-cells = <0>;
332 compatible = "fsl-usb2-dr";
333 reg = <0x22000 0x1000>;
334 interrupt-parent = <&mpic>;
335 interrupts = <28 0x2>;
336 phy_type = "ulpi";
337 }; 179 };
338 180
339 enet0: ethernet@24000 { 181 enet0: ethernet@24000 {
340 #address-cells = <1>;
341 #size-cells = <1>;
342 cell-index = <0>;
343 device_type = "network";
344 model = "eTSEC";
345 compatible = "gianfar";
346 reg = <0x24000 0x1000>;
347 ranges = <0x0 0x24000 0x1000>;
348 local-mac-address = [ 00 00 00 00 00 00 ];
349 interrupts = <29 2 30 2 34 2>;
350 interrupt-parent = <&mpic>;
351 tbi-handle = <&tbi0>; 182 tbi-handle = <&tbi0>;
352 phy-handle = <&phy0>; 183 phy-handle = <&phy0>;
353 phy-connection-type = "rgmii-id"; 184 phy-connection-type = "rgmii-id";
354
355 mdio@520 {
356 #address-cells = <1>;
357 #size-cells = <0>;
358 compatible = "fsl,gianfar-mdio";
359 reg = <0x520 0x20>;
360
361 phy0: ethernet-phy@0 {
362 interrupt-parent = <&mpic>;
363 interrupts = <3 1>;
364 reg = <0x0>;
365 };
366 phy1: ethernet-phy@1 {
367 interrupt-parent = <&mpic>;
368 interrupts = <3 1>;
369 reg = <0x1>;
370 };
371 phy2: ethernet-phy@2 {
372 interrupt-parent = <&mpic>;
373 interrupts = <3 1>;
374 reg = <0x2>;
375 };
376 tbi0: tbi-phy@11 {
377 reg = <0x11>;
378 device_type = "tbi-phy";
379 };
380 };
381 }; 185 };
382 186
383 enet1: ethernet@25000 { 187 enet1: ethernet@25000 {
384 #address-cells = <1>;
385 #size-cells = <1>;
386 cell-index = <1>;
387 device_type = "network";
388 model = "eTSEC";
389 compatible = "gianfar";
390 reg = <0x25000 0x1000>;
391 ranges = <0x0 0x25000 0x1000>;
392 local-mac-address = [ 00 00 00 00 00 00 ];
393 interrupts = <35 2 36 2 40 2>;
394 interrupt-parent = <&mpic>;
395 tbi-handle = <&tbi1>; 188 tbi-handle = <&tbi1>;
396 phy-handle = <&phy1>; 189 phy-handle = <&phy1>;
397 phy-connection-type = "rgmii-id"; 190 phy-connection-type = "rgmii-id";
398 191
399 mdio@520 {
400 #address-cells = <1>;
401 #size-cells = <0>;
402 compatible = "fsl,gianfar-tbi";
403 reg = <0x520 0x20>;
404
405 tbi1: tbi-phy@11 {
406 reg = <0x11>;
407 device_type = "tbi-phy";
408 };
409 };
410 }; 192 };
411 193
412 enet2: ethernet@26000 { 194 enet2: ethernet@26000 {
413 #address-cells = <1>;
414 #size-cells = <1>;
415 cell-index = <2>;
416 device_type = "network";
417 model = "eTSEC";
418 compatible = "gianfar";
419 reg = <0x26000 0x1000>;
420 ranges = <0x0 0x26000 0x1000>;
421 local-mac-address = [ 00 00 00 00 00 00 ];
422 interrupts = <31 2 32 2 33 2>;
423 interrupt-parent = <&mpic>;
424 tbi-handle = <&tbi2>; 195 tbi-handle = <&tbi2>;
425 phy-handle = <&phy2>; 196 phy-handle = <&phy2>;
426 phy-connection-type = "rgmii-id"; 197 phy-connection-type = "rgmii-id";
427
428 mdio@520 {
429 #address-cells = <1>;
430 #size-cells = <0>;
431 compatible = "fsl,gianfar-tbi";
432 reg = <0x520 0x20>;
433
434 tbi2: tbi-phy@11 {
435 reg = <0x11>;
436 device_type = "tbi-phy";
437 };
438 };
439 };
440
441 sdhci@2e000 {
442 compatible = "fsl,p2020-esdhc", "fsl,esdhc";
443 reg = <0x2e000 0x1000>;
444 interrupts = <72 0x2>;
445 interrupt-parent = <&mpic>;
446 /* Filled in by U-Boot */
447 clock-frequency = <0>;
448 };
449
450 crypto@30000 {
451 compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
452 "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
453 reg = <0x30000 0x10000>;
454 interrupts = <45 2 58 2>;
455 interrupt-parent = <&mpic>;
456 fsl,num-channels = <4>;
457 fsl,channel-fifo-len = <24>;
458 fsl,exec-units-mask = <0xbfe>;
459 fsl,descriptor-types-mask = <0x3ab0ebf>;
460 }; 198 };
461 199
462 mpic: pic@40000 {
463 interrupt-controller;
464 #address-cells = <0>;
465 #interrupt-cells = <2>;
466 reg = <0x40000 0x40000>;
467 compatible = "chrp,open-pic";
468 device_type = "open-pic";
469 };
470 200
471 msi@41600 { 201 msi@41600 {
472 compatible = "fsl,mpic-msi"; 202 compatible = "fsl,mpic-msi";
473 reg = <0x41600 0x80>;
474 msi-available-ranges = <0 0x100>;
475 interrupts = <
476 0xe0 0
477 0xe1 0
478 0xe2 0
479 0xe3 0
480 0xe4 0
481 0xe5 0
482 0xe6 0
483 0xe7 0>;
484 interrupt-parent = <&mpic>;
485 };
486
487 global-utilities@e0000 { //global utilities block
488 compatible = "fsl,p2020-guts";
489 reg = <0xe0000 0x1000>;
490 fsl,has-rstcr;
491 }; 203 };
492 }; 204 };
493 205
494 pci0: pcie@ffe08000 { 206 pci0: pcie@ffe08000 {
495 compatible = "fsl,mpc8548-pcie";
496 device_type = "pci";
497 #interrupt-cells = <1>;
498 #size-cells = <2>;
499 #address-cells = <3>;
500 reg = <0 0xffe08000 0 0x1000>;
501 bus-range = <0 255>;
502 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 207 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
503 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; 208 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
504 clock-frequency = <33333333>;
505 interrupt-parent = <&mpic>;
506 interrupts = <24 2>;
507 interrupt-map-mask = <0xf800 0x0 0x0 0x7>; 209 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
508 interrupt-map = < 210 interrupt-map = <
509 /* IDSEL 0x0 */ 211 /* IDSEL 0x0 */
@@ -528,18 +230,8 @@
528 }; 230 };
529 231
530 pci1: pcie@ffe09000 { 232 pci1: pcie@ffe09000 {
531 compatible = "fsl,mpc8548-pcie";
532 device_type = "pci";
533 #interrupt-cells = <1>;
534 #size-cells = <2>;
535 #address-cells = <3>;
536 reg = <0 0xffe09000 0 0x1000>;
537 bus-range = <0 255>;
538 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 233 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
539 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; 234 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
540 clock-frequency = <33333333>;
541 interrupt-parent = <&mpic>;
542 interrupts = <25 2>;
543 interrupt-map-mask = <0xff00 0x0 0x0 0x7>; 235 interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
544 interrupt-map = < 236 interrupt-map = <
545 237
@@ -667,18 +359,8 @@
667 }; 359 };
668 360
669 pci2: pcie@ffe0a000 { 361 pci2: pcie@ffe0a000 {
670 compatible = "fsl,mpc8548-pcie";
671 device_type = "pci";
672 #interrupt-cells = <1>;
673 #size-cells = <2>;
674 #address-cells = <3>;
675 reg = <0 0xffe0a000 0 0x1000>;
676 bus-range = <0 255>;
677 ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 362 ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
678 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; 363 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
679 clock-frequency = <33333333>;
680 interrupt-parent = <&mpic>;
681 interrupts = <26 2>;
682 interrupt-map-mask = <0xf800 0x0 0x0 0x7>; 364 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
683 interrupt-map = < 365 interrupt-map = <
684 /* IDSEL 0x0 */ 366 /* IDSEL 0x0 */
diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts
index e2d48fd4416e..3782a58f13be 100644
--- a/arch/powerpc/boot/dts/p2020rdb.dts
+++ b/arch/powerpc/boot/dts/p2020rdb.dts
@@ -9,12 +9,11 @@
9 * option) any later version. 9 * option) any later version.
10 */ 10 */
11 11
12/dts-v1/; 12/include/ "p2020si.dtsi"
13
13/ { 14/ {
14 model = "fsl,P2020"; 15 model = "fsl,P2020RDB";
15 compatible = "fsl,P2020RDB"; 16 compatible = "fsl,P2020RDB";
16 #address-cells = <2>;
17 #size-cells = <2>;
18 17
19 aliases { 18 aliases {
20 ethernet0 = &enet0; 19 ethernet0 = &enet0;
@@ -26,34 +25,11 @@
26 pci1 = &pci1; 25 pci1 = &pci1;
27 }; 26 };
28 27
29 cpus {
30 #address-cells = <1>;
31 #size-cells = <0>;
32
33 PowerPC,P2020@0 {
34 device_type = "cpu";
35 reg = <0x0>;
36 next-level-cache = <&L2>;
37 };
38
39 PowerPC,P2020@1 {
40 device_type = "cpu";
41 reg = <0x1>;
42 next-level-cache = <&L2>;
43 };
44 };
45
46 memory { 28 memory {
47 device_type = "memory"; 29 device_type = "memory";
48 }; 30 };
49 31
50 localbus@ffe05000 { 32 localbus@ffe05000 {
51 #address-cells = <2>;
52 #size-cells = <1>;
53 compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus";
54 reg = <0 0xffe05000 0 0x1000>;
55 interrupts = <19 2>;
56 interrupt-parent = <&mpic>;
57 33
58 /* NOR and NAND Flashes */ 34 /* NOR and NAND Flashes */
59 ranges = <0x0 0x0 0x0 0xef000000 0x01000000 35 ranges = <0x0 0x0 0x0 0xef000000 0x01000000
@@ -165,90 +141,16 @@
165 }; 141 };
166 142
167 soc@ffe00000 { 143 soc@ffe00000 {
168 #address-cells = <1>;
169 #size-cells = <1>;
170 device_type = "soc";
171 compatible = "fsl,p2020-immr", "simple-bus";
172 ranges = <0x0 0x0 0xffe00000 0x100000>;
173 bus-frequency = <0>; // Filled out by uboot.
174
175 ecm-law@0 {
176 compatible = "fsl,ecm-law";
177 reg = <0x0 0x1000>;
178 fsl,num-laws = <12>;
179 };
180
181 ecm@1000 {
182 compatible = "fsl,p2020-ecm", "fsl,ecm";
183 reg = <0x1000 0x1000>;
184 interrupts = <17 2>;
185 interrupt-parent = <&mpic>;
186 };
187
188 memory-controller@2000 {
189 compatible = "fsl,p2020-memory-controller";
190 reg = <0x2000 0x1000>;
191 interrupt-parent = <&mpic>;
192 interrupts = <18 2>;
193 };
194
195 i2c@3000 { 144 i2c@3000 {
196 #address-cells = <1>;
197 #size-cells = <0>;
198 cell-index = <0>;
199 compatible = "fsl-i2c";
200 reg = <0x3000 0x100>;
201 interrupts = <43 2>;
202 interrupt-parent = <&mpic>;
203 dfsrr;
204 rtc@68 { 145 rtc@68 {
205 compatible = "dallas,ds1339"; 146 compatible = "dallas,ds1339";
206 reg = <0x68>; 147 reg = <0x68>;
207 }; 148 };
208 }; 149 };
209 150
210 i2c@3100 { 151 spi@7000 {
211 #address-cells = <1>;
212 #size-cells = <0>;
213 cell-index = <1>;
214 compatible = "fsl-i2c";
215 reg = <0x3100 0x100>;
216 interrupts = <43 2>;
217 interrupt-parent = <&mpic>;
218 dfsrr;
219 };
220
221 serial0: serial@4500 {
222 cell-index = <0>;
223 device_type = "serial";
224 compatible = "ns16550";
225 reg = <0x4500 0x100>;
226 clock-frequency = <0>;
227 interrupts = <42 2>;
228 interrupt-parent = <&mpic>;
229 };
230
231 serial1: serial@4600 {
232 cell-index = <1>;
233 device_type = "serial";
234 compatible = "ns16550";
235 reg = <0x4600 0x100>;
236 clock-frequency = <0>;
237 interrupts = <42 2>;
238 interrupt-parent = <&mpic>;
239 };
240 152
241 spi@7000 { 153 fsl_m25p80@0 {
242 cell-index = <0>;
243 #address-cells = <1>;
244 #size-cells = <0>;
245 compatible = "fsl,espi";
246 reg = <0x7000 0x1000>;
247 interrupts = <59 0x2>;
248 interrupt-parent = <&mpic>;
249 mode = "cpu";
250
251 fsl_m25p80@0 {
252 #address-cells = <1>; 154 #address-cells = <1>;
253 #size-cells = <1>; 155 #size-cells = <1>;
254 compatible = "fsl,espi-flash"; 156 compatible = "fsl,espi-flash";
@@ -294,254 +196,68 @@
294 }; 196 };
295 }; 197 };
296 198
297 dma@c300 { 199 usb@22000 {
298 #address-cells = <1>; 200 phy_type = "ulpi";
299 #size-cells = <1>;
300 compatible = "fsl,eloplus-dma";
301 reg = <0xc300 0x4>;
302 ranges = <0x0 0xc100 0x200>;
303 cell-index = <1>;
304 dma-channel@0 {
305 compatible = "fsl,eloplus-dma-channel";
306 reg = <0x0 0x80>;
307 cell-index = <0>;
308 interrupt-parent = <&mpic>;
309 interrupts = <76 2>;
310 };
311 dma-channel@80 {
312 compatible = "fsl,eloplus-dma-channel";
313 reg = <0x80 0x80>;
314 cell-index = <1>;
315 interrupt-parent = <&mpic>;
316 interrupts = <77 2>;
317 };
318 dma-channel@100 {
319 compatible = "fsl,eloplus-dma-channel";
320 reg = <0x100 0x80>;
321 cell-index = <2>;
322 interrupt-parent = <&mpic>;
323 interrupts = <78 2>;
324 };
325 dma-channel@180 {
326 compatible = "fsl,eloplus-dma-channel";
327 reg = <0x180 0x80>;
328 cell-index = <3>;
329 interrupt-parent = <&mpic>;
330 interrupts = <79 2>;
331 };
332 };
333
334 gpio: gpio-controller@f000 {
335 #gpio-cells = <2>;
336 compatible = "fsl,mpc8572-gpio";
337 reg = <0xf000 0x100>;
338 interrupts = <47 0x2>;
339 interrupt-parent = <&mpic>;
340 gpio-controller;
341 };
342
343 L2: l2-cache-controller@20000 {
344 compatible = "fsl,p2020-l2-cache-controller";
345 reg = <0x20000 0x1000>;
346 cache-line-size = <32>; // 32 bytes
347 cache-size = <0x80000>; // L2,512K
348 interrupt-parent = <&mpic>;
349 interrupts = <16 2>;
350 }; 201 };
351 202
352 dma@21300 { 203 mdio@24520 {
353 #address-cells = <1>; 204 phy0: ethernet-phy@0 {
354 #size-cells = <1>;
355 compatible = "fsl,eloplus-dma";
356 reg = <0x21300 0x4>;
357 ranges = <0x0 0x21100 0x200>;
358 cell-index = <0>;
359 dma-channel@0 {
360 compatible = "fsl,eloplus-dma-channel";
361 reg = <0x0 0x80>;
362 cell-index = <0>;
363 interrupt-parent = <&mpic>; 205 interrupt-parent = <&mpic>;
364 interrupts = <20 2>; 206 interrupts = <3 1>;
365 }; 207 reg = <0x0>;
366 dma-channel@80 { 208 };
367 compatible = "fsl,eloplus-dma-channel"; 209 phy1: ethernet-phy@1 {
368 reg = <0x80 0x80>;
369 cell-index = <1>;
370 interrupt-parent = <&mpic>;
371 interrupts = <21 2>;
372 };
373 dma-channel@100 {
374 compatible = "fsl,eloplus-dma-channel";
375 reg = <0x100 0x80>;
376 cell-index = <2>;
377 interrupt-parent = <&mpic>;
378 interrupts = <22 2>;
379 };
380 dma-channel@180 {
381 compatible = "fsl,eloplus-dma-channel";
382 reg = <0x180 0x80>;
383 cell-index = <3>;
384 interrupt-parent = <&mpic>; 210 interrupt-parent = <&mpic>;
385 interrupts = <23 2>; 211 interrupts = <3 1>;
212 reg = <0x1>;
213 };
214 };
215
216 mdio@25520 {
217 tbi0: tbi-phy@11 {
218 reg = <0x11>;
219 device_type = "tbi-phy";
386 }; 220 };
387 }; 221 };
388 222
389 usb@22000 { 223 mdio@26520 {
390 #address-cells = <1>; 224 status = "disabled";
391 #size-cells = <0>;
392 compatible = "fsl-usb2-dr";
393 reg = <0x22000 0x1000>;
394 interrupt-parent = <&mpic>;
395 interrupts = <28 0x2>;
396 phy_type = "ulpi";
397 }; 225 };
398 226
399 enet0: ethernet@24000 { 227 enet0: ethernet@24000 {
400 #address-cells = <1>;
401 #size-cells = <1>;
402 cell-index = <0>;
403 device_type = "network";
404 model = "eTSEC";
405 compatible = "gianfar";
406 reg = <0x24000 0x1000>;
407 ranges = <0x0 0x24000 0x1000>;
408 local-mac-address = [ 00 00 00 00 00 00 ];
409 interrupts = <29 2 30 2 34 2>;
410 interrupt-parent = <&mpic>;
411 fixed-link = <1 1 1000 0 0>; 228 fixed-link = <1 1 1000 0 0>;
412 phy-connection-type = "rgmii-id"; 229 phy-connection-type = "rgmii-id";
413
414 mdio@520 {
415 #address-cells = <1>;
416 #size-cells = <0>;
417 compatible = "fsl,gianfar-mdio";
418 reg = <0x520 0x20>;
419
420 phy0: ethernet-phy@0 {
421 interrupt-parent = <&mpic>;
422 interrupts = <3 1>;
423 reg = <0x0>;
424 };
425 phy1: ethernet-phy@1 {
426 interrupt-parent = <&mpic>;
427 interrupts = <3 1>;
428 reg = <0x1>;
429 };
430 };
431 }; 230 };
432 231
433 enet1: ethernet@25000 { 232 enet1: ethernet@25000 {
434 #address-cells = <1>;
435 #size-cells = <1>;
436 cell-index = <1>;
437 device_type = "network";
438 model = "eTSEC";
439 compatible = "gianfar";
440 reg = <0x25000 0x1000>;
441 ranges = <0x0 0x25000 0x1000>;
442 local-mac-address = [ 00 00 00 00 00 00 ];
443 interrupts = <35 2 36 2 40 2>;
444 interrupt-parent = <&mpic>;
445 tbi-handle = <&tbi0>; 233 tbi-handle = <&tbi0>;
446 phy-handle = <&phy0>; 234 phy-handle = <&phy0>;
447 phy-connection-type = "sgmii"; 235 phy-connection-type = "sgmii";
448
449 mdio@520 {
450 #address-cells = <1>;
451 #size-cells = <0>;
452 compatible = "fsl,gianfar-tbi";
453 reg = <0x520 0x20>;
454
455 tbi0: tbi-phy@11 {
456 reg = <0x11>;
457 device_type = "tbi-phy";
458 };
459 };
460 }; 236 };
461 237
462 enet2: ethernet@26000 { 238 enet2: ethernet@26000 {
463 #address-cells = <1>;
464 #size-cells = <1>;
465 cell-index = <2>;
466 device_type = "network";
467 model = "eTSEC";
468 compatible = "gianfar";
469 reg = <0x26000 0x1000>;
470 ranges = <0x0 0x26000 0x1000>;
471 local-mac-address = [ 00 00 00 00 00 00 ];
472 interrupts = <31 2 32 2 33 2>;
473 interrupt-parent = <&mpic>;
474 phy-handle = <&phy1>; 239 phy-handle = <&phy1>;
475 phy-connection-type = "rgmii-id"; 240 phy-connection-type = "rgmii-id";
476 }; 241 };
477 242
478 sdhci@2e000 { 243 };
479 compatible = "fsl,p2020-esdhc", "fsl,esdhc";
480 reg = <0x2e000 0x1000>;
481 interrupts = <72 0x2>;
482 interrupt-parent = <&mpic>;
483 /* Filled in by U-Boot */
484 clock-frequency = <0>;
485 };
486
487 crypto@30000 {
488 compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
489 "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
490 reg = <0x30000 0x10000>;
491 interrupts = <45 2 58 2>;
492 interrupt-parent = <&mpic>;
493 fsl,num-channels = <4>;
494 fsl,channel-fifo-len = <24>;
495 fsl,exec-units-mask = <0xbfe>;
496 fsl,descriptor-types-mask = <0x3ab0ebf>;
497 };
498
499 mpic: pic@40000 {
500 interrupt-controller;
501 #address-cells = <0>;
502 #interrupt-cells = <2>;
503 reg = <0x40000 0x40000>;
504 compatible = "chrp,open-pic";
505 device_type = "open-pic";
506 };
507
508 msi@41600 {
509 compatible = "fsl,p2020-msi", "fsl,mpic-msi";
510 reg = <0x41600 0x80>;
511 msi-available-ranges = <0 0x100>;
512 interrupts = <
513 0xe0 0
514 0xe1 0
515 0xe2 0
516 0xe3 0
517 0xe4 0
518 0xe5 0
519 0xe6 0
520 0xe7 0>;
521 interrupt-parent = <&mpic>;
522 };
523 244
524 global-utilities@e0000 { //global utilities block 245 pci0: pcie@ffe08000 {
525 compatible = "fsl,p2020-guts"; 246 status = "disabled";
526 reg = <0xe0000 0x1000>;
527 fsl,has-rstcr;
528 };
529 }; 247 };
530 248
531 pci0: pcie@ffe09000 { 249 pci1: pcie@ffe09000 {
532 compatible = "fsl,mpc8548-pcie";
533 device_type = "pci";
534 #interrupt-cells = <1>;
535 #size-cells = <2>;
536 #address-cells = <3>;
537 reg = <0 0xffe09000 0 0x1000>;
538 bus-range = <0 255>;
539 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 250 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
540 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; 251 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
541 clock-frequency = <33333333>; 252 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
542 interrupt-parent = <&mpic>; 253 interrupt-map = <
543 interrupts = <25 2>; 254 /* IDSEL 0x0 */
544 pcie@0 { 255 0000 0x0 0x0 0x1 &mpic 0x4 0x1
256 0000 0x0 0x0 0x2 &mpic 0x5 0x1
257 0000 0x0 0x0 0x3 &mpic 0x6 0x1
258 0000 0x0 0x0 0x4 &mpic 0x7 0x1
259 >;
260 pcie@0 {
545 reg = <0x0 0x0 0x0 0x0 0x0>; 261 reg = <0x0 0x0 0x0 0x0 0x0>;
546 #size-cells = <2>; 262 #size-cells = <2>;
547 #address-cells = <3>; 263 #address-cells = <3>;
@@ -556,19 +272,17 @@
556 }; 272 };
557 }; 273 };
558 274
559 pci1: pcie@ffe0a000 { 275 pci2: pcie@ffe0a000 {
560 compatible = "fsl,mpc8548-pcie";
561 device_type = "pci";
562 #interrupt-cells = <1>;
563 #size-cells = <2>;
564 #address-cells = <3>;
565 reg = <0 0xffe0a000 0 0x1000>;
566 bus-range = <0 255>;
567 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 276 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
568 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; 277 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
569 clock-frequency = <33333333>; 278 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
570 interrupt-parent = <&mpic>; 279 interrupt-map = <
571 interrupts = <26 2>; 280 /* IDSEL 0x0 */
281 0000 0x0 0x0 0x1 &mpic 0x0 0x1
282 0000 0x0 0x0 0x2 &mpic 0x1 0x1
283 0000 0x0 0x0 0x3 &mpic 0x2 0x1
284 0000 0x0 0x0 0x4 &mpic 0x3 0x1
285 >;
572 pcie@0 { 286 pcie@0 {
573 reg = <0x0 0x0 0x0 0x0 0x0>; 287 reg = <0x0 0x0 0x0 0x0 0x0>;
574 #size-cells = <2>; 288 #size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
index b69c3a5dc858..fc8ddddfccb6 100644
--- a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
@@ -14,12 +14,11 @@
14 * option) any later version. 14 * option) any later version.
15 */ 15 */
16 16
17/dts-v1/; 17/include/ "p2020si.dtsi"
18
18/ { 19/ {
19 model = "fsl,P2020"; 20 model = "fsl,P2020RDB";
20 compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP"; 21 compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
21 #address-cells = <2>;
22 #size-cells = <2>;
23 22
24 aliases { 23 aliases {
25 ethernet1 = &enet1; 24 ethernet1 = &enet1;
@@ -29,91 +28,33 @@
29 }; 28 };
30 29
31 cpus { 30 cpus {
32 #address-cells = <1>; 31 PowerPC,P2020@1 {
33 #size-cells = <0>; 32 status = "disabled";
34
35 PowerPC,P2020@0 {
36 device_type = "cpu";
37 reg = <0x0>;
38 next-level-cache = <&L2>;
39 }; 33 };
34
40 }; 35 };
41 36
42 memory { 37 memory {
43 device_type = "memory"; 38 device_type = "memory";
44 }; 39 };
45 40
46 soc@ffe00000 { 41 localbus@ffe05000 {
47 #address-cells = <1>; 42 status = "disabled";
48 #size-cells = <1>; 43 };
49 device_type = "soc";
50 compatible = "fsl,p2020-immr", "simple-bus";
51 ranges = <0x0 0x0 0xffe00000 0x100000>;
52 bus-frequency = <0>; // Filled out by uboot.
53
54 ecm-law@0 {
55 compatible = "fsl,ecm-law";
56 reg = <0x0 0x1000>;
57 fsl,num-laws = <12>;
58 };
59
60 ecm@1000 {
61 compatible = "fsl,p2020-ecm", "fsl,ecm";
62 reg = <0x1000 0x1000>;
63 interrupts = <17 2>;
64 interrupt-parent = <&mpic>;
65 };
66
67 memory-controller@2000 {
68 compatible = "fsl,p2020-memory-controller";
69 reg = <0x2000 0x1000>;
70 interrupt-parent = <&mpic>;
71 interrupts = <18 2>;
72 };
73 44
45 soc@ffe00000 {
74 i2c@3000 { 46 i2c@3000 {
75 #address-cells = <1>;
76 #size-cells = <0>;
77 cell-index = <0>;
78 compatible = "fsl-i2c";
79 reg = <0x3000 0x100>;
80 interrupts = <43 2>;
81 interrupt-parent = <&mpic>;
82 dfsrr;
83 rtc@68 { 47 rtc@68 {
84 compatible = "dallas,ds1339"; 48 compatible = "dallas,ds1339";
85 reg = <0x68>; 49 reg = <0x68>;
86 }; 50 };
87 }; 51 };
88 52
89 i2c@3100 { 53 serial1: serial@4600 {
90 #address-cells = <1>; 54 status = "disabled";
91 #size-cells = <0>;
92 cell-index = <1>;
93 compatible = "fsl-i2c";
94 reg = <0x3100 0x100>;
95 interrupts = <43 2>;
96 interrupt-parent = <&mpic>;
97 dfsrr;
98 };
99
100 serial0: serial@4500 {
101 cell-index = <0>;
102 device_type = "serial";
103 compatible = "ns16550";
104 reg = <0x4500 0x100>;
105 clock-frequency = <0>;
106 }; 55 };
107 56
108 spi@7000 { 57 spi@7000 {
109 cell-index = <0>;
110 #address-cells = <1>;
111 #size-cells = <0>;
112 compatible = "fsl,espi";
113 reg = <0x7000 0x1000>;
114 interrupts = <59 0x2>;
115 interrupt-parent = <&mpic>;
116 mode = "cpu";
117 58
118 fsl_m25p80@0 { 59 fsl_m25p80@0 {
119 #address-cells = <1>; 60 #address-cells = <1>;
@@ -161,76 +102,15 @@
161 }; 102 };
162 }; 103 };
163 104
164 gpio: gpio-controller@f000 { 105 dma@c300 {
165 #gpio-cells = <2>; 106 status = "disabled";
166 compatible = "fsl,mpc8572-gpio";
167 reg = <0xf000 0x100>;
168 interrupts = <47 0x2>;
169 interrupt-parent = <&mpic>;
170 gpio-controller;
171 };
172
173 L2: l2-cache-controller@20000 {
174 compatible = "fsl,p2020-l2-cache-controller";
175 reg = <0x20000 0x1000>;
176 cache-line-size = <32>; // 32 bytes
177 cache-size = <0x80000>; // L2,512K
178 interrupt-parent = <&mpic>;
179 interrupts = <16 2>;
180 };
181
182 dma@21300 {
183 #address-cells = <1>;
184 #size-cells = <1>;
185 compatible = "fsl,eloplus-dma";
186 reg = <0x21300 0x4>;
187 ranges = <0x0 0x21100 0x200>;
188 cell-index = <0>;
189 dma-channel@0 {
190 compatible = "fsl,eloplus-dma-channel";
191 reg = <0x0 0x80>;
192 cell-index = <0>;
193 interrupt-parent = <&mpic>;
194 interrupts = <20 2>;
195 };
196 dma-channel@80 {
197 compatible = "fsl,eloplus-dma-channel";
198 reg = <0x80 0x80>;
199 cell-index = <1>;
200 interrupt-parent = <&mpic>;
201 interrupts = <21 2>;
202 };
203 dma-channel@100 {
204 compatible = "fsl,eloplus-dma-channel";
205 reg = <0x100 0x80>;
206 cell-index = <2>;
207 interrupt-parent = <&mpic>;
208 interrupts = <22 2>;
209 };
210 dma-channel@180 {
211 compatible = "fsl,eloplus-dma-channel";
212 reg = <0x180 0x80>;
213 cell-index = <3>;
214 interrupt-parent = <&mpic>;
215 interrupts = <23 2>;
216 };
217 }; 107 };
218 108
219 usb@22000 { 109 usb@22000 {
220 #address-cells = <1>;
221 #size-cells = <0>;
222 compatible = "fsl-usb2-dr";
223 reg = <0x22000 0x1000>;
224 interrupt-parent = <&mpic>;
225 interrupts = <28 0x2>;
226 phy_type = "ulpi"; 110 phy_type = "ulpi";
227 }; 111 };
228 112
229 mdio@24520 { 113 mdio@24520 {
230 #address-cells = <1>;
231 #size-cells = <0>;
232 compatible = "fsl,gianfar-mdio";
233 reg = <0x24520 0x20>;
234 114
235 phy0: ethernet-phy@0 { 115 phy0: ethernet-phy@0 {
236 interrupt-parent = <&mpic>; 116 interrupt-parent = <&mpic>;
@@ -245,29 +125,21 @@
245 }; 125 };
246 126
247 mdio@25520 { 127 mdio@25520 {
248 #address-cells = <1>;
249 #size-cells = <0>;
250 compatible = "fsl,gianfar-tbi";
251 reg = <0x26520 0x20>;
252
253 tbi0: tbi-phy@11 { 128 tbi0: tbi-phy@11 {
254 reg = <0x11>; 129 reg = <0x11>;
255 device_type = "tbi-phy"; 130 device_type = "tbi-phy";
256 }; 131 };
257 }; 132 };
258 133
134 mdio@26520 {
135 status = "disabled";
136 };
137
138 enet0: ethernet@24000 {
139 status = "disabled";
140 };
141
259 enet1: ethernet@25000 { 142 enet1: ethernet@25000 {
260 #address-cells = <1>;
261 #size-cells = <1>;
262 cell-index = <1>;
263 device_type = "network";
264 model = "eTSEC";
265 compatible = "gianfar";
266 reg = <0x25000 0x1000>;
267 ranges = <0x0 0x25000 0x1000>;
268 local-mac-address = [ 00 00 00 00 00 00 ];
269 interrupts = <35 2 36 2 40 2>;
270 interrupt-parent = <&mpic>;
271 tbi-handle = <&tbi0>; 143 tbi-handle = <&tbi0>;
272 phy-handle = <&phy0>; 144 phy-handle = <&phy0>;
273 phy-connection-type = "sgmii"; 145 phy-connection-type = "sgmii";
@@ -275,49 +147,12 @@
275 }; 147 };
276 148
277 enet2: ethernet@26000 { 149 enet2: ethernet@26000 {
278 #address-cells = <1>;
279 #size-cells = <1>;
280 cell-index = <2>;
281 device_type = "network";
282 model = "eTSEC";
283 compatible = "gianfar";
284 reg = <0x26000 0x1000>;
285 ranges = <0x0 0x26000 0x1000>;
286 local-mac-address = [ 00 00 00 00 00 00 ];
287 interrupts = <31 2 32 2 33 2>;
288 interrupt-parent = <&mpic>;
289 phy-handle = <&phy1>; 150 phy-handle = <&phy1>;
290 phy-connection-type = "rgmii-id"; 151 phy-connection-type = "rgmii-id";
291 }; 152 };
292 153
293 sdhci@2e000 {
294 compatible = "fsl,p2020-esdhc", "fsl,esdhc";
295 reg = <0x2e000 0x1000>;
296 interrupts = <72 0x2>;
297 interrupt-parent = <&mpic>;
298 /* Filled in by U-Boot */
299 clock-frequency = <0>;
300 };
301
302 crypto@30000 {
303 compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
304 "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
305 reg = <0x30000 0x10000>;
306 interrupts = <45 2 58 2>;
307 interrupt-parent = <&mpic>;
308 fsl,num-channels = <4>;
309 fsl,channel-fifo-len = <24>;
310 fsl,exec-units-mask = <0xbfe>;
311 fsl,descriptor-types-mask = <0x3ab0ebf>;
312 };
313 154
314 mpic: pic@40000 { 155 mpic: pic@40000 {
315 interrupt-controller;
316 #address-cells = <0>;
317 #interrupt-cells = <2>;
318 reg = <0x40000 0x40000>;
319 compatible = "chrp,open-pic";
320 device_type = "open-pic";
321 protected-sources = < 156 protected-sources = <
322 42 76 77 78 79 /* serial1 , dma2 */ 157 42 76 77 78 79 /* serial1 , dma2 */
323 29 30 34 26 /* enet0, pci1 */ 158 29 30 34 26 /* enet0, pci1 */
@@ -326,26 +161,28 @@
326 >; 161 >;
327 }; 162 };
328 163
329 global-utilities@e0000 { 164 msi@41600 {
330 compatible = "fsl,p2020-guts"; 165 status = "disabled";
331 reg = <0xe0000 0x1000>;
332 fsl,has-rstcr;
333 }; 166 };
167
168
334 }; 169 };
335 170
336 pci0: pcie@ffe09000 { 171 pci0: pcie@ffe08000 {
337 compatible = "fsl,mpc8548-pcie"; 172 status = "disabled";
338 device_type = "pci"; 173 };
339 #interrupt-cells = <1>; 174
340 #size-cells = <2>; 175 pci1: pcie@ffe09000 {
341 #address-cells = <3>;
342 reg = <0 0xffe09000 0 0x1000>;
343 bus-range = <0 255>;
344 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 176 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
345 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; 177 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
346 clock-frequency = <33333333>; 178 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
347 interrupt-parent = <&mpic>; 179 interrupt-map = <
348 interrupts = <25 2>; 180 /* IDSEL 0x0 */
181 0000 0x0 0x0 0x1 &mpic 0x4 0x1
182 0000 0x0 0x0 0x2 &mpic 0x5 0x1
183 0000 0x0 0x0 0x3 &mpic 0x6 0x1
184 0000 0x0 0x0 0x4 &mpic 0x7 0x1
185 >;
349 pcie@0 { 186 pcie@0 {
350 reg = <0x0 0x0 0x0 0x0 0x0>; 187 reg = <0x0 0x0 0x0 0x0 0x0>;
351 #size-cells = <2>; 188 #size-cells = <2>;
@@ -360,4 +197,8 @@
360 0x0 0x100000>; 197 0x0 0x100000>;
361 }; 198 };
362 }; 199 };
200
201 pci2: pcie@ffe0a000 {
202 status = "disabled";
203 };
363}; 204};
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
index 7a31d46c01b0..261c34ba45ec 100644
--- a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
@@ -15,27 +15,21 @@
15 * option) any later version. 15 * option) any later version.
16 */ 16 */
17 17
18/dts-v1/; 18/include/ "p2020si.dtsi"
19
19/ { 20/ {
20 model = "fsl,P2020"; 21 model = "fsl,P2020RDB";
21 compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP"; 22 compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
22 #address-cells = <2>;
23 #size-cells = <2>;
24 23
25 aliases { 24 aliases {
26 ethernet0 = &enet0; 25 ethernet0 = &enet0;
27 serial0 = &serial0; 26 serial0 = &serial1;
28 pci1 = &pci1; 27 pci1 = &pci1;
29 }; 28 };
30 29
31 cpus { 30 cpus {
32 #address-cells = <1>; 31 PowerPC,P2020@0 {
33 #size-cells = <0>; 32 status = "disabled";
34
35 PowerPC,P2020@1 {
36 device_type = "cpu";
37 reg = <0x1>;
38 next-level-cache = <&L2>;
39 }; 33 };
40 }; 34 };
41 35
@@ -43,20 +37,37 @@
43 device_type = "memory"; 37 device_type = "memory";
44 }; 38 };
45 39
40 localbus@ffe05000 {
41 status = "disabled";
42 };
43
46 soc@ffe00000 { 44 soc@ffe00000 {
47 #address-cells = <1>; 45 ecm-law@0 {
48 #size-cells = <1>; 46 status = "disabled";
49 device_type = "soc"; 47 };
50 compatible = "fsl,p2020-immr", "simple-bus"; 48
51 ranges = <0x0 0x0 0xffe00000 0x100000>; 49 ecm@1000 {
52 bus-frequency = <0>; // Filled out by uboot. 50 status = "disabled";
53 51 };
54 serial0: serial@4600 { 52
55 cell-index = <1>; 53 memory-controller@2000 {
56 device_type = "serial"; 54 status = "disabled";
57 compatible = "ns16550"; 55 };
58 reg = <0x4600 0x100>; 56
59 clock-frequency = <0>; 57 i2c@3000 {
58 status = "disabled";
59 };
60
61 i2c@3100 {
62 status = "disabled";
63 };
64
65 serial0: serial@4500 {
66 status = "disabled";
67 };
68
69 spi@7000 {
70 status = "disabled";
60 }; 71 };
61 72
62 dma@c300 { 73 dma@c300 {
@@ -96,6 +107,10 @@
96 }; 107 };
97 }; 108 };
98 109
110 gpio: gpio-controller@f000 {
111 status = "disabled";
112 };
113
99 L2: l2-cache-controller@20000 { 114 L2: l2-cache-controller@20000 {
100 compatible = "fsl,p2020-l2-cache-controller"; 115 compatible = "fsl,p2020-l2-cache-controller";
101 reg = <0x20000 0x1000>; 116 reg = <0x20000 0x1000>;
@@ -104,31 +119,49 @@
104 interrupt-parent = <&mpic>; 119 interrupt-parent = <&mpic>;
105 }; 120 };
106 121
122 dma@21300 {
123 status = "disabled";
124 };
125
126 usb@22000 {
127 status = "disabled";
128 };
129
130 mdio@24520 {
131 status = "disabled";
132 };
133
134 mdio@25520 {
135 status = "disabled";
136 };
137
138 mdio@26520 {
139 status = "disabled";
140 };
107 141
108 enet0: ethernet@24000 { 142 enet0: ethernet@24000 {
109 #address-cells = <1>;
110 #size-cells = <1>;
111 cell-index = <0>;
112 device_type = "network";
113 model = "eTSEC";
114 compatible = "gianfar";
115 reg = <0x24000 0x1000>;
116 ranges = <0x0 0x24000 0x1000>;
117 local-mac-address = [ 00 00 00 00 00 00 ];
118 interrupts = <29 2 30 2 34 2>;
119 interrupt-parent = <&mpic>;
120 fixed-link = <1 1 1000 0 0>; 143 fixed-link = <1 1 1000 0 0>;
121 phy-connection-type = "rgmii-id"; 144 phy-connection-type = "rgmii-id";
122 145
123 }; 146 };
124 147
148 enet1: ethernet@25000 {
149 status = "disabled";
150 };
151
152 enet2: ethernet@26000 {
153 status = "disabled";
154 };
155
156 sdhci@2e000 {
157 status = "disabled";
158 };
159
160 crypto@30000 {
161 status = "disabled";
162 };
163
125 mpic: pic@40000 { 164 mpic: pic@40000 {
126 interrupt-controller;
127 #address-cells = <0>;
128 #interrupt-cells = <2>;
129 reg = <0x40000 0x40000>;
130 compatible = "chrp,open-pic";
131 device_type = "open-pic";
132 protected-sources = < 165 protected-sources = <
133 17 18 43 42 59 47 /*ecm, mem, i2c, serial0, spi,gpio */ 166 17 18 43 42 59 47 /*ecm, mem, i2c, serial0, spi,gpio */
134 16 20 21 22 23 28 /* L2, dma1, USB */ 167 16 20 21 22 23 28 /* L2, dma1, USB */
@@ -152,21 +185,32 @@
152 0xe7 0>; 185 0xe7 0>;
153 interrupt-parent = <&mpic>; 186 interrupt-parent = <&mpic>;
154 }; 187 };
188
189 global-utilities@e0000 { //global utilities block
190 status = "disabled";
191 };
192
155 }; 193 };
156 194
157 pci1: pcie@ffe0a000 { 195 pci0: pcie@ffe08000 {
158 compatible = "fsl,mpc8548-pcie"; 196 status = "disabled";
159 device_type = "pci"; 197 };
160 #interrupt-cells = <1>; 198
161 #size-cells = <2>; 199 pci1: pcie@ffe09000 {
162 #address-cells = <3>; 200 status = "disabled";
163 reg = <0 0xffe0a000 0 0x1000>; 201 };
164 bus-range = <0 255>; 202
203 pci2: pcie@ffe0a000 {
165 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 204 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
166 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; 205 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
167 clock-frequency = <33333333>; 206 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
168 interrupt-parent = <&mpic>; 207 interrupt-map = <
169 interrupts = <26 2>; 208 /* IDSEL 0x0 */
209 0000 0x0 0x0 0x1 &mpic 0x0 0x1
210 0000 0x0 0x0 0x2 &mpic 0x1 0x1
211 0000 0x0 0x0 0x3 &mpic 0x2 0x1
212 0000 0x0 0x0 0x4 &mpic 0x3 0x1
213 >;
170 pcie@0 { 214 pcie@0 {
171 reg = <0x0 0x0 0x0 0x0 0x0>; 215 reg = <0x0 0x0 0x0 0x0 0x0>;
172 #size-cells = <2>; 216 #size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/p2020si.dtsi b/arch/powerpc/boot/dts/p2020si.dtsi
new file mode 100644
index 000000000000..6def17f265d3
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020si.dtsi
@@ -0,0 +1,382 @@
1/*
2 * P2020 Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/dts-v1/;
13/ {
14 compatible = "fsl,P2020";
15 #address-cells = <2>;
16 #size-cells = <2>;
17
18 cpus {
19 #address-cells = <1>;
20 #size-cells = <0>;
21
22 PowerPC,P2020@0 {
23 device_type = "cpu";
24 reg = <0x0>;
25 next-level-cache = <&L2>;
26 };
27
28 PowerPC,P2020@1 {
29 device_type = "cpu";
30 reg = <0x1>;
31 next-level-cache = <&L2>;
32 };
33 };
34
35 localbus@ffe05000 {
36 #address-cells = <2>;
37 #size-cells = <1>;
38 compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus";
39 reg = <0 0xffe05000 0 0x1000>;
40 interrupts = <19 2>;
41 interrupt-parent = <&mpic>;
42 };
43
44 soc@ffe00000 {
45 #address-cells = <1>;
46 #size-cells = <1>;
47 device_type = "soc";
48 compatible = "fsl,p2020-immr", "simple-bus";
49 ranges = <0x0 0x0 0xffe00000 0x100000>;
50 bus-frequency = <0>; // Filled out by uboot.
51
52 ecm-law@0 {
53 compatible = "fsl,ecm-law";
54 reg = <0x0 0x1000>;
55 fsl,num-laws = <12>;
56 };
57
58 ecm@1000 {
59 compatible = "fsl,p2020-ecm", "fsl,ecm";
60 reg = <0x1000 0x1000>;
61 interrupts = <17 2>;
62 interrupt-parent = <&mpic>;
63 };
64
65 memory-controller@2000 {
66 compatible = "fsl,p2020-memory-controller";
67 reg = <0x2000 0x1000>;
68 interrupt-parent = <&mpic>;
69 interrupts = <18 2>;
70 };
71
72 i2c@3000 {
73 #address-cells = <1>;
74 #size-cells = <0>;
75 cell-index = <0>;
76 compatible = "fsl-i2c";
77 reg = <0x3000 0x100>;
78 interrupts = <43 2>;
79 interrupt-parent = <&mpic>;
80 dfsrr;
81 };
82
83 i2c@3100 {
84 #address-cells = <1>;
85 #size-cells = <0>;
86 cell-index = <1>;
87 compatible = "fsl-i2c";
88 reg = <0x3100 0x100>;
89 interrupts = <43 2>;
90 interrupt-parent = <&mpic>;
91 dfsrr;
92 };
93
94 serial0: serial@4500 {
95 cell-index = <0>;
96 device_type = "serial";
97 compatible = "ns16550";
98 reg = <0x4500 0x100>;
99 clock-frequency = <0>;
100 interrupts = <42 2>;
101 interrupt-parent = <&mpic>;
102 };
103
104 serial1: serial@4600 {
105 cell-index = <1>;
106 device_type = "serial";
107 compatible = "ns16550";
108 reg = <0x4600 0x100>;
109 clock-frequency = <0>;
110 interrupts = <42 2>;
111 interrupt-parent = <&mpic>;
112 };
113
114 spi@7000 {
115 cell-index = <0>;
116 #address-cells = <1>;
117 #size-cells = <0>;
118 compatible = "fsl,espi";
119 reg = <0x7000 0x1000>;
120 interrupts = <59 0x2>;
121 interrupt-parent = <&mpic>;
122 mode = "cpu";
123 };
124
125 dma@c300 {
126 #address-cells = <1>;
127 #size-cells = <1>;
128 compatible = "fsl,eloplus-dma";
129 reg = <0xc300 0x4>;
130 ranges = <0x0 0xc100 0x200>;
131 cell-index = <1>;
132 dma-channel@0 {
133 compatible = "fsl,eloplus-dma-channel";
134 reg = <0x0 0x80>;
135 cell-index = <0>;
136 interrupt-parent = <&mpic>;
137 interrupts = <76 2>;
138 };
139 dma-channel@80 {
140 compatible = "fsl,eloplus-dma-channel";
141 reg = <0x80 0x80>;
142 cell-index = <1>;
143 interrupt-parent = <&mpic>;
144 interrupts = <77 2>;
145 };
146 dma-channel@100 {
147 compatible = "fsl,eloplus-dma-channel";
148 reg = <0x100 0x80>;
149 cell-index = <2>;
150 interrupt-parent = <&mpic>;
151 interrupts = <78 2>;
152 };
153 dma-channel@180 {
154 compatible = "fsl,eloplus-dma-channel";
155 reg = <0x180 0x80>;
156 cell-index = <3>;
157 interrupt-parent = <&mpic>;
158 interrupts = <79 2>;
159 };
160 };
161
162 gpio: gpio-controller@f000 {
163 #gpio-cells = <2>;
164 compatible = "fsl,mpc8572-gpio";
165 reg = <0xf000 0x100>;
166 interrupts = <47 0x2>;
167 interrupt-parent = <&mpic>;
168 gpio-controller;
169 };
170
171 L2: l2-cache-controller@20000 {
172 compatible = "fsl,p2020-l2-cache-controller";
173 reg = <0x20000 0x1000>;
174 cache-line-size = <32>; // 32 bytes
175 cache-size = <0x80000>; // L2,512K
176 interrupt-parent = <&mpic>;
177 interrupts = <16 2>;
178 };
179
180 dma@21300 {
181 #address-cells = <1>;
182 #size-cells = <1>;
183 compatible = "fsl,eloplus-dma";
184 reg = <0x21300 0x4>;
185 ranges = <0x0 0x21100 0x200>;
186 cell-index = <0>;
187 dma-channel@0 {
188 compatible = "fsl,eloplus-dma-channel";
189 reg = <0x0 0x80>;
190 cell-index = <0>;
191 interrupt-parent = <&mpic>;
192 interrupts = <20 2>;
193 };
194 dma-channel@80 {
195 compatible = "fsl,eloplus-dma-channel";
196 reg = <0x80 0x80>;
197 cell-index = <1>;
198 interrupt-parent = <&mpic>;
199 interrupts = <21 2>;
200 };
201 dma-channel@100 {
202 compatible = "fsl,eloplus-dma-channel";
203 reg = <0x100 0x80>;
204 cell-index = <2>;
205 interrupt-parent = <&mpic>;
206 interrupts = <22 2>;
207 };
208 dma-channel@180 {
209 compatible = "fsl,eloplus-dma-channel";
210 reg = <0x180 0x80>;
211 cell-index = <3>;
212 interrupt-parent = <&mpic>;
213 interrupts = <23 2>;
214 };
215 };
216
217 usb@22000 {
218 #address-cells = <1>;
219 #size-cells = <0>;
220 compatible = "fsl-usb2-dr";
221 reg = <0x22000 0x1000>;
222 interrupt-parent = <&mpic>;
223 interrupts = <28 0x2>;
224 };
225
226 mdio@24520 {
227 #address-cells = <1>;
228 #size-cells = <0>;
229 compatible = "fsl,gianfar-mdio";
230 reg = <0x24520 0x20>;
231 };
232
233 mdio@25520 {
234 #address-cells = <1>;
235 #size-cells = <0>;
236 compatible = "fsl,gianfar-tbi";
237 reg = <0x26520 0x20>;
238 };
239
240 mdio@26520 {
241 #address-cells = <1>;
242 #size-cells = <0>;
243 compatible = "fsl,gianfar-tbi";
244 reg = <0x520 0x20>;
245 };
246
247 enet0: ethernet@24000 {
248 #address-cells = <1>;
249 #size-cells = <1>;
250 cell-index = <0>;
251 device_type = "network";
252 model = "eTSEC";
253 compatible = "gianfar";
254 reg = <0x24000 0x1000>;
255 ranges = <0x0 0x24000 0x1000>;
256 local-mac-address = [ 00 00 00 00 00 00 ];
257 interrupts = <29 2 30 2 34 2>;
258 interrupt-parent = <&mpic>;
259 };
260
261 enet1: ethernet@25000 {
262 #address-cells = <1>;
263 #size-cells = <1>;
264 cell-index = <1>;
265 device_type = "network";
266 model = "eTSEC";
267 compatible = "gianfar";
268 reg = <0x25000 0x1000>;
269 ranges = <0x0 0x25000 0x1000>;
270 local-mac-address = [ 00 00 00 00 00 00 ];
271 interrupts = <35 2 36 2 40 2>;
272 interrupt-parent = <&mpic>;
273
274 };
275
276 enet2: ethernet@26000 {
277 #address-cells = <1>;
278 #size-cells = <1>;
279 cell-index = <2>;
280 device_type = "network";
281 model = "eTSEC";
282 compatible = "gianfar";
283 reg = <0x26000 0x1000>;
284 ranges = <0x0 0x26000 0x1000>;
285 local-mac-address = [ 00 00 00 00 00 00 ];
286 interrupts = <31 2 32 2 33 2>;
287 interrupt-parent = <&mpic>;
288
289 };
290
291 sdhci@2e000 {
292 compatible = "fsl,p2020-esdhc", "fsl,esdhc";
293 reg = <0x2e000 0x1000>;
294 interrupts = <72 0x2>;
295 interrupt-parent = <&mpic>;
296 /* Filled in by U-Boot */
297 clock-frequency = <0>;
298 };
299
300 crypto@30000 {
301 compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
302 "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
303 reg = <0x30000 0x10000>;
304 interrupts = <45 2 58 2>;
305 interrupt-parent = <&mpic>;
306 fsl,num-channels = <4>;
307 fsl,channel-fifo-len = <24>;
308 fsl,exec-units-mask = <0xbfe>;
309 fsl,descriptor-types-mask = <0x3ab0ebf>;
310 };
311
312 mpic: pic@40000 {
313 interrupt-controller;
314 #address-cells = <0>;
315 #interrupt-cells = <2>;
316 reg = <0x40000 0x40000>;
317 compatible = "chrp,open-pic";
318 device_type = "open-pic";
319 };
320
321 msi@41600 {
322 compatible = "fsl,p2020-msi", "fsl,mpic-msi";
323 reg = <0x41600 0x80>;
324 msi-available-ranges = <0 0x100>;
325 interrupts = <
326 0xe0 0
327 0xe1 0
328 0xe2 0
329 0xe3 0
330 0xe4 0
331 0xe5 0
332 0xe6 0
333 0xe7 0>;
334 interrupt-parent = <&mpic>;
335 };
336
337 global-utilities@e0000 { //global utilities block
338 compatible = "fsl,p2020-guts";
339 reg = <0xe0000 0x1000>;
340 fsl,has-rstcr;
341 };
342 };
343
344 pci0: pcie@ffe08000 {
345 compatible = "fsl,mpc8548-pcie";
346 device_type = "pci";
347 #interrupt-cells = <1>;
348 #size-cells = <2>;
349 #address-cells = <3>;
350 reg = <0 0xffe08000 0 0x1000>;
351 bus-range = <0 255>;
352 clock-frequency = <33333333>;
353 interrupt-parent = <&mpic>;
354 interrupts = <24 2>;
355 };
356
357 pci1: pcie@ffe09000 {
358 compatible = "fsl,mpc8548-pcie";
359 device_type = "pci";
360 #interrupt-cells = <1>;
361 #size-cells = <2>;
362 #address-cells = <3>;
363 reg = <0 0xffe09000 0 0x1000>;
364 bus-range = <0 255>;
365 clock-frequency = <33333333>;
366 interrupt-parent = <&mpic>;
367 interrupts = <25 2>;
368 };
369
370 pci2: pcie@ffe0a000 {
371 compatible = "fsl,mpc8548-pcie";
372 device_type = "pci";
373 #interrupt-cells = <1>;
374 #size-cells = <2>;
375 #address-cells = <3>;
376 reg = <0 0xffe0a000 0 0x1000>;
377 bus-range = <0 255>;
378 clock-frequency = <33333333>;
379 interrupt-parent = <&mpic>;
380 interrupts = <26 2>;
381 };
382};
diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c
new file mode 100644
index 000000000000..06c1961bd124
--- /dev/null
+++ b/arch/powerpc/boot/epapr.c
@@ -0,0 +1,66 @@
1/*
2 * Bootwrapper for ePAPR compliant firmwares
3 *
4 * Copyright 2010 David Gibson <david@gibson.dropbear.id.au>, IBM Corporation.
5 *
6 * Based on earlier bootwrappers by:
7 * (c) Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp,\
8 * and
9 * Scott Wood <scottwood@freescale.com>
10 * Copyright (c) 2007 Freescale Semiconductor, Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published
14 * by the Free Software Foundation.
15 */
16
17#include "ops.h"
18#include "stdio.h"
19#include "io.h"
20#include <libfdt.h>
21
22BSS_STACK(4096);
23
24#define EPAPR_SMAGIC 0x65504150
25#define EPAPR_EMAGIC 0x45504150
26
27static unsigned epapr_magic;
28static unsigned long ima_size;
29static unsigned long fdt_addr;
30
31static void platform_fixups(void)
32{
33 if ((epapr_magic != EPAPR_EMAGIC)
34 && (epapr_magic != EPAPR_SMAGIC))
35 fatal("r6 contained 0x%08x instead of ePAPR magic number\n",
36 epapr_magic);
37
38 if (ima_size < (unsigned long)_end)
39 printf("WARNING: Image loaded outside IMA!"
40 " (_end=%p, ima_size=0x%lx)\n", _end, ima_size);
41 if (ima_size < fdt_addr)
42 printf("WARNING: Device tree address is outside IMA!"
43 "(fdt_addr=0x%lx, ima_size=0x%lx)\n", fdt_addr,
44 ima_size);
45 if (ima_size < fdt_addr + fdt_totalsize((void *)fdt_addr))
46 printf("WARNING: Device tree extends outside IMA!"
47 " (fdt_addr=0x%lx, size=0x%x, ima_size=0x%lx\n",
48 fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
49}
50
51void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
52 unsigned long r6, unsigned long r7)
53{
54 epapr_magic = r6;
55 ima_size = r7;
56 fdt_addr = r3;
57
58 /* FIXME: we should process reserve entries */
59
60 simple_alloc_init(_end, ima_size - (unsigned long)_end, 32, 64);
61
62 fdt_init((void *)fdt_addr);
63
64 serial_console_init();
65 platform_ops.fixups = platform_fixups;
66}
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index cb97e7511d7e..c74531af72c0 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -39,6 +39,7 @@ dts=
39cacheit= 39cacheit=
40binary= 40binary=
41gzip=.gz 41gzip=.gz
42pie=
42 43
43# cross-compilation prefix 44# cross-compilation prefix
44CROSS= 45CROSS=
@@ -157,9 +158,10 @@ pmac|chrp)
157 platformo=$object/of.o 158 platformo=$object/of.o
158 ;; 159 ;;
159coff) 160coff)
160 platformo=$object/of.o 161 platformo="$object/crt0.o $object/of.o"
161 lds=$object/zImage.coff.lds 162 lds=$object/zImage.coff.lds
162 link_address='0x500000' 163 link_address='0x500000'
164 pie=
163 ;; 165 ;;
164miboot|uboot) 166miboot|uboot)
165 # miboot and U-boot want just the bare bits, not an ELF binary 167 # miboot and U-boot want just the bare bits, not an ELF binary
@@ -208,6 +210,7 @@ ps3)
208 ksection=.kernel:vmlinux.bin 210 ksection=.kernel:vmlinux.bin
209 isection=.kernel:initrd 211 isection=.kernel:initrd
210 link_address='' 212 link_address=''
213 pie=
211 ;; 214 ;;
212ep88xc|ep405|ep8248e) 215ep88xc|ep405|ep8248e)
213 platformo="$object/fixed-head.o $object/$platform.o" 216 platformo="$object/fixed-head.o $object/$platform.o"
@@ -244,6 +247,10 @@ gamecube|wii)
244treeboot-iss4xx-mpic) 247treeboot-iss4xx-mpic)
245 platformo="$object/treeboot-iss4xx.o" 248 platformo="$object/treeboot-iss4xx.o"
246 ;; 249 ;;
250epapr)
251 link_address='0x20000000'
252 pie=-pie
253 ;;
247esac 254esac
248 255
249vmz="$tmpdir/`basename \"$kernel\"`.$ext" 256vmz="$tmpdir/`basename \"$kernel\"`.$ext"
@@ -251,7 +258,7 @@ if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then
251 ${CROSS}objcopy $objflags "$kernel" "$vmz.$$" 258 ${CROSS}objcopy $objflags "$kernel" "$vmz.$$"
252 259
253 if [ -n "$gzip" ]; then 260 if [ -n "$gzip" ]; then
254 gzip -f -9 "$vmz.$$" 261 gzip -n -f -9 "$vmz.$$"
255 fi 262 fi
256 263
257 if [ -n "$cacheit" ]; then 264 if [ -n "$cacheit" ]; then
@@ -310,9 +317,9 @@ fi
310 317
311if [ "$platform" != "miboot" ]; then 318if [ "$platform" != "miboot" ]; then
312 if [ -n "$link_address" ] ; then 319 if [ -n "$link_address" ] ; then
313 text_start="-Ttext $link_address --defsym _start=$link_address" 320 text_start="-Ttext $link_address"
314 fi 321 fi
315 ${CROSS}ld -m elf32ppc -T $lds $text_start -o "$ofile" \ 322 ${CROSS}ld -m elf32ppc -T $lds $text_start $pie -o "$ofile" \
316 $platformo $tmp $object/wrapper.a 323 $platformo $tmp $object/wrapper.a
317 rm $tmp 324 rm $tmp
318fi 325fi
@@ -336,7 +343,7 @@ coff)
336 $objbin/hack-coff "$ofile" 343 $objbin/hack-coff "$ofile"
337 ;; 344 ;;
338cuboot*) 345cuboot*)
339 gzip -f -9 "$ofile" 346 gzip -n -f -9 "$ofile"
340 ${MKIMAGE} -A ppc -O linux -T kernel -C gzip -a "$base" -e "$entry" \ 347 ${MKIMAGE} -A ppc -O linux -T kernel -C gzip -a "$base" -e "$entry" \
341 $uboot_version -d "$ofile".gz "$ofile" 348 $uboot_version -d "$ofile".gz "$ofile"
342 ;; 349 ;;
@@ -383,6 +390,6 @@ ps3)
383 390
384 odir="$(dirname "$ofile.bin")" 391 odir="$(dirname "$ofile.bin")"
385 rm -f "$odir/otheros.bld" 392 rm -f "$odir/otheros.bld"
386 gzip --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld" 393 gzip -n --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld"
387 ;; 394 ;;
388esac 395esac
diff --git a/arch/powerpc/boot/zImage.coff.lds.S b/arch/powerpc/boot/zImage.coff.lds.S
index 856dc78b14ef..de4c9e3c9344 100644
--- a/arch/powerpc/boot/zImage.coff.lds.S
+++ b/arch/powerpc/boot/zImage.coff.lds.S
@@ -3,13 +3,13 @@ ENTRY(_zimage_start_opd)
3EXTERN(_zimage_start_opd) 3EXTERN(_zimage_start_opd)
4SECTIONS 4SECTIONS
5{ 5{
6 _start = .;
7 .text : 6 .text :
8 { 7 {
8 _start = .;
9 *(.text) 9 *(.text)
10 *(.fixup) 10 *(.fixup)
11 _etext = .;
11 } 12 }
12 _etext = .;
13 . = ALIGN(4096); 13 . = ALIGN(4096);
14 .data : 14 .data :
15 { 15 {
@@ -17,9 +17,7 @@ SECTIONS
17 *(.data*) 17 *(.data*)
18 *(__builtin_*) 18 *(__builtin_*)
19 *(.sdata*) 19 *(.sdata*)
20 __got2_start = .;
21 *(.got2) 20 *(.got2)
22 __got2_end = .;
23 21
24 _dtb_start = .; 22 _dtb_start = .;
25 *(.kernel:dtb) 23 *(.kernel:dtb)
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 0962d62bdb50..2bd8731f1365 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -3,49 +3,64 @@ ENTRY(_zimage_start)
3EXTERN(_zimage_start) 3EXTERN(_zimage_start)
4SECTIONS 4SECTIONS
5{ 5{
6 _start = .;
7 .text : 6 .text :
8 { 7 {
8 _start = .;
9 *(.text) 9 *(.text)
10 *(.fixup) 10 *(.fixup)
11 _etext = .;
11 } 12 }
12 _etext = .;
13 . = ALIGN(4096); 13 . = ALIGN(4096);
14 .data : 14 .data :
15 { 15 {
16 *(.rodata*) 16 *(.rodata*)
17 *(.data*) 17 *(.data*)
18 *(.sdata*) 18 *(.sdata*)
19 __got2_start = .;
20 *(.got2) 19 *(.got2)
21 __got2_end = .;
22 } 20 }
21 .dynsym : { *(.dynsym) }
22 .dynstr : { *(.dynstr) }
23 .dynamic :
24 {
25 __dynamic_start = .;
26 *(.dynamic)
27 }
28 .hash : { *(.hash) }
29 .interp : { *(.interp) }
30 .rela.dyn : { *(.rela*) }
23 31
24 . = ALIGN(8); 32 . = ALIGN(8);
25 _dtb_start = .; 33 .kernel:dtb :
26 .kernel:dtb : { *(.kernel:dtb) } 34 {
27 _dtb_end = .; 35 _dtb_start = .;
28 36 *(.kernel:dtb)
29 . = ALIGN(4096); 37 _dtb_end = .;
30 _vmlinux_start = .; 38 }
31 .kernel:vmlinux.strip : { *(.kernel:vmlinux.strip) }
32 _vmlinux_end = .;
33 39
34 . = ALIGN(4096); 40 . = ALIGN(4096);
35 _initrd_start = .; 41 .kernel:vmlinux.strip :
36 .kernel:initrd : { *(.kernel:initrd) } 42 {
37 _initrd_end = .; 43 _vmlinux_start = .;
44 *(.kernel:vmlinux.strip)
45 _vmlinux_end = .;
46 }
38 47
39 . = ALIGN(4096); 48 . = ALIGN(4096);
40 _edata = .; 49 .kernel:initrd :
50 {
51 _initrd_start = .;
52 *(.kernel:initrd)
53 _initrd_end = .;
54 }
41 55
42 . = ALIGN(4096); 56 . = ALIGN(4096);
43 __bss_start = .;
44 .bss : 57 .bss :
45 { 58 {
46 *(.sbss) 59 _edata = .;
47 *(.bss) 60 __bss_start = .;
61 *(.sbss)
62 *(.bss)
63 *(COMMON)
64 _end = . ;
48 } 65 }
49 . = ALIGN(4096);
50 _end = . ;
51} 66}
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index c683bce4c26e..126ef1b08a01 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -104,7 +104,6 @@ CONFIG_ROOT_NFS=y
104CONFIG_PARTITION_ADVANCED=y 104CONFIG_PARTITION_ADVANCED=y
105CONFIG_DEBUG_KERNEL=y 105CONFIG_DEBUG_KERNEL=y
106CONFIG_DETECT_HUNG_TASK=y 106CONFIG_DETECT_HUNG_TASK=y
107# CONFIG_DEBUG_BUGVERBOSE is not set
108# CONFIG_RCU_CPU_STALL_DETECTOR is not set 107# CONFIG_RCU_CPU_STALL_DETECTOR is not set
109CONFIG_SYSCTL_SYSCALL_CHECK=y 108CONFIG_SYSCTL_SYSCALL_CHECK=y
110CONFIG_CRYPTO_PCBC=m 109CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index a721cd3d793f..abcf00ad939e 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -101,7 +101,6 @@ CONFIG_ROOT_NFS=y
101CONFIG_PARTITION_ADVANCED=y 101CONFIG_PARTITION_ADVANCED=y
102CONFIG_DEBUG_KERNEL=y 102CONFIG_DEBUG_KERNEL=y
103CONFIG_DETECT_HUNG_TASK=y 103CONFIG_DETECT_HUNG_TASK=y
104# CONFIG_DEBUG_BUGVERBOSE is not set
105# CONFIG_RCU_CPU_STALL_DETECTOR is not set 104# CONFIG_RCU_CPU_STALL_DETECTOR is not set
106CONFIG_SYSCTL_SYSCALL_CHECK=y 105CONFIG_SYSCTL_SYSCALL_CHECK=y
107CONFIG_CRYPTO_PCBC=m 106CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
index 55e0725500dc..11662c217ac0 100644
--- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
@@ -58,7 +58,6 @@ CONFIG_PARTITION_ADVANCED=y
58CONFIG_DEBUG_KERNEL=y 58CONFIG_DEBUG_KERNEL=y
59CONFIG_DETECT_HUNG_TASK=y 59CONFIG_DETECT_HUNG_TASK=y
60CONFIG_DEBUG_MUTEXES=y 60CONFIG_DEBUG_MUTEXES=y
61# CONFIG_DEBUG_BUGVERBOSE is not set
62# CONFIG_RCU_CPU_STALL_DETECTOR is not set 61# CONFIG_RCU_CPU_STALL_DETECTOR is not set
63CONFIG_SYSCTL_SYSCALL_CHECK=y 62CONFIG_SYSCTL_SYSCALL_CHECK=y
64# CONFIG_CRYPTO_ANSI_CPRNG is not set 63# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
index d724095530a6..ebe9b30b0721 100644
--- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
@@ -59,7 +59,6 @@ CONFIG_PARTITION_ADVANCED=y
59CONFIG_DEBUG_KERNEL=y 59CONFIG_DEBUG_KERNEL=y
60CONFIG_DETECT_HUNG_TASK=y 60CONFIG_DETECT_HUNG_TASK=y
61CONFIG_DEBUG_MUTEXES=y 61CONFIG_DEBUG_MUTEXES=y
62# CONFIG_DEBUG_BUGVERBOSE is not set
63# CONFIG_RCU_CPU_STALL_DETECTOR is not set 62# CONFIG_RCU_CPU_STALL_DETECTOR is not set
64CONFIG_SYSCTL_SYSCALL_CHECK=y 63CONFIG_SYSCTL_SYSCALL_CHECK=y
65# CONFIG_CRYPTO_ANSI_CPRNG is not set 64# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index 4b44beaa21ae..eb25229b387a 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -63,7 +63,6 @@ CONFIG_PARTITION_ADVANCED=y
63CONFIG_DEBUG_KERNEL=y 63CONFIG_DEBUG_KERNEL=y
64CONFIG_DETECT_HUNG_TASK=y 64CONFIG_DETECT_HUNG_TASK=y
65CONFIG_DEBUG_MUTEXES=y 65CONFIG_DEBUG_MUTEXES=y
66# CONFIG_DEBUG_BUGVERBOSE is not set
67# CONFIG_RCU_CPU_STALL_DETECTOR is not set 66# CONFIG_RCU_CPU_STALL_DETECTOR is not set
68CONFIG_SYSCTL_SYSCALL_CHECK=y 67CONFIG_SYSCTL_SYSCALL_CHECK=y
69# CONFIG_CRYPTO_ANSI_CPRNG is not set 68# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
index b614508d6fd2..f51c7ebc181e 100644
--- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
@@ -168,7 +168,6 @@ CONFIG_MAC_PARTITION=y
168CONFIG_CRC_T10DIF=y 168CONFIG_CRC_T10DIF=y
169CONFIG_DEBUG_KERNEL=y 169CONFIG_DEBUG_KERNEL=y
170CONFIG_DETECT_HUNG_TASK=y 170CONFIG_DETECT_HUNG_TASK=y
171# CONFIG_DEBUG_BUGVERBOSE is not set
172CONFIG_DEBUG_INFO=y 171CONFIG_DEBUG_INFO=y
173# CONFIG_RCU_CPU_STALL_DETECTOR is not set 172# CONFIG_RCU_CPU_STALL_DETECTOR is not set
174CONFIG_SYSCTL_SYSCALL_CHECK=y 173CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
index f9e6a3ea5a64..2a84fd7f631c 100644
--- a/arch/powerpc/configs/c2k_defconfig
+++ b/arch/powerpc/configs/c2k_defconfig
@@ -132,8 +132,8 @@ CONFIG_NET_CLS_RSVP=m
132CONFIG_NET_CLS_RSVP6=m 132CONFIG_NET_CLS_RSVP6=m
133CONFIG_NET_CLS_IND=y 133CONFIG_NET_CLS_IND=y
134CONFIG_BT=m 134CONFIG_BT=m
135CONFIG_BT_L2CAP=m 135CONFIG_BT_L2CAP=y
136CONFIG_BT_SCO=m 136CONFIG_BT_SCO=y
137CONFIG_BT_RFCOMM=m 137CONFIG_BT_RFCOMM=m
138CONFIG_BT_RFCOMM_TTY=y 138CONFIG_BT_RFCOMM_TTY=y
139CONFIG_BT_BNEP=m 139CONFIG_BT_BNEP=m
diff --git a/arch/powerpc/configs/e55xx_smp_defconfig b/arch/powerpc/configs/e55xx_smp_defconfig
index 9fa1613e5e2b..d32283555b53 100644
--- a/arch/powerpc/configs/e55xx_smp_defconfig
+++ b/arch/powerpc/configs/e55xx_smp_defconfig
@@ -6,10 +6,10 @@ CONFIG_NR_CPUS=2
6CONFIG_EXPERIMENTAL=y 6CONFIG_EXPERIMENTAL=y
7CONFIG_SYSVIPC=y 7CONFIG_SYSVIPC=y
8CONFIG_BSD_PROCESS_ACCT=y 8CONFIG_BSD_PROCESS_ACCT=y
9CONFIG_SPARSE_IRQ=y
9CONFIG_IKCONFIG=y 10CONFIG_IKCONFIG=y
10CONFIG_IKCONFIG_PROC=y 11CONFIG_IKCONFIG_PROC=y
11CONFIG_LOG_BUF_SHIFT=14 12CONFIG_LOG_BUF_SHIFT=14
12CONFIG_SYSFS_DEPRECATED_V2=y
13CONFIG_BLK_DEV_INITRD=y 13CONFIG_BLK_DEV_INITRD=y
14# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 14# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
15CONFIG_EXPERT=y 15CONFIG_EXPERT=y
@@ -25,8 +25,32 @@ CONFIG_P5020_DS=y
25CONFIG_NO_HZ=y 25CONFIG_NO_HZ=y
26CONFIG_HIGH_RES_TIMERS=y 26CONFIG_HIGH_RES_TIMERS=y
27CONFIG_BINFMT_MISC=m 27CONFIG_BINFMT_MISC=m
28CONFIG_SPARSE_IRQ=y
29# CONFIG_PCI is not set 28# CONFIG_PCI is not set
29CONFIG_NET=y
30CONFIG_PACKET=y
31CONFIG_UNIX=y
32CONFIG_XFRM_USER=y
33CONFIG_NET_KEY=y
34CONFIG_INET=y
35CONFIG_IP_MULTICAST=y
36CONFIG_IP_ADVANCED_ROUTER=y
37CONFIG_IP_MULTIPLE_TABLES=y
38CONFIG_IP_ROUTE_MULTIPATH=y
39CONFIG_IP_ROUTE_VERBOSE=y
40CONFIG_IP_PNP=y
41CONFIG_IP_PNP_DHCP=y
42CONFIG_IP_PNP_BOOTP=y
43CONFIG_IP_PNP_RARP=y
44CONFIG_NET_IPIP=y
45CONFIG_IP_MROUTE=y
46CONFIG_IP_PIMSM_V1=y
47CONFIG_IP_PIMSM_V2=y
48CONFIG_ARPD=y
49CONFIG_INET_ESP=y
50# CONFIG_INET_XFRM_MODE_BEET is not set
51# CONFIG_INET_LRO is not set
52CONFIG_IPV6=y
53CONFIG_IP_SCTP=m
30CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 54CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
31CONFIG_PROC_DEVICETREE=y 55CONFIG_PROC_DEVICETREE=y
32CONFIG_BLK_DEV_LOOP=y 56CONFIG_BLK_DEV_LOOP=y
@@ -34,6 +58,9 @@ CONFIG_BLK_DEV_RAM=y
34CONFIG_BLK_DEV_RAM_SIZE=131072 58CONFIG_BLK_DEV_RAM_SIZE=131072
35CONFIG_MISC_DEVICES=y 59CONFIG_MISC_DEVICES=y
36CONFIG_EEPROM_LEGACY=y 60CONFIG_EEPROM_LEGACY=y
61CONFIG_NETDEVICES=y
62CONFIG_DUMMY=y
63CONFIG_NET_ETHERNET=y
37CONFIG_INPUT_FF_MEMLESS=m 64CONFIG_INPUT_FF_MEMLESS=m
38# CONFIG_INPUT_MOUSEDEV is not set 65# CONFIG_INPUT_MOUSEDEV is not set
39# CONFIG_INPUT_KEYBOARD is not set 66# CONFIG_INPUT_KEYBOARD is not set
@@ -64,22 +91,14 @@ CONFIG_NLS=y
64CONFIG_NLS_UTF8=m 91CONFIG_NLS_UTF8=m
65CONFIG_CRC_T10DIF=y 92CONFIG_CRC_T10DIF=y
66CONFIG_CRC_ITU_T=m 93CONFIG_CRC_ITU_T=m
67CONFIG_LIBCRC32C=m
68CONFIG_FRAME_WARN=1024 94CONFIG_FRAME_WARN=1024
69CONFIG_DEBUG_FS=y 95CONFIG_DEBUG_FS=y
70CONFIG_DEBUG_KERNEL=y 96CONFIG_DEBUG_KERNEL=y
71CONFIG_DETECT_HUNG_TASK=y 97CONFIG_DETECT_HUNG_TASK=y
72# CONFIG_DEBUG_BUGVERBOSE is not set
73CONFIG_DEBUG_INFO=y 98CONFIG_DEBUG_INFO=y
74# CONFIG_RCU_CPU_STALL_DETECTOR is not set 99# CONFIG_RCU_CPU_STALL_DETECTOR is not set
75CONFIG_SYSCTL_SYSCALL_CHECK=y 100CONFIG_SYSCTL_SYSCALL_CHECK=y
76CONFIG_VIRQ_DEBUG=y 101CONFIG_VIRQ_DEBUG=y
77CONFIG_CRYPTO=y
78CONFIG_CRYPTO_CBC=y
79CONFIG_CRYPTO_PCBC=m 102CONFIG_CRYPTO_PCBC=m
80CONFIG_CRYPTO_HMAC=y
81CONFIG_CRYPTO_MD5=y
82CONFIG_CRYPTO_SHA1=m
83CONFIG_CRYPTO_DES=y
84# CONFIG_CRYPTO_ANSI_CPRNG is not set 103# CONFIG_CRYPTO_ANSI_CPRNG is not set
85CONFIG_CRYPTO_DEV_TALITOS=y 104CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index c06a86c33098..96b89df7752a 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -204,7 +204,6 @@ CONFIG_CRC_T10DIF=y
204CONFIG_DEBUG_FS=y 204CONFIG_DEBUG_FS=y
205CONFIG_DEBUG_KERNEL=y 205CONFIG_DEBUG_KERNEL=y
206CONFIG_DETECT_HUNG_TASK=y 206CONFIG_DETECT_HUNG_TASK=y
207# CONFIG_DEBUG_BUGVERBOSE is not set
208CONFIG_DEBUG_INFO=y 207CONFIG_DEBUG_INFO=y
209# CONFIG_RCU_CPU_STALL_DETECTOR is not set 208# CONFIG_RCU_CPU_STALL_DETECTOR is not set
210CONFIG_SYSCTL_SYSCALL_CHECK=y 209CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 942ced90557c..de65841aa04e 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -206,7 +206,6 @@ CONFIG_CRC_T10DIF=y
206CONFIG_DEBUG_FS=y 206CONFIG_DEBUG_FS=y
207CONFIG_DEBUG_KERNEL=y 207CONFIG_DEBUG_KERNEL=y
208CONFIG_DETECT_HUNG_TASK=y 208CONFIG_DETECT_HUNG_TASK=y
209# CONFIG_DEBUG_BUGVERBOSE is not set
210CONFIG_DEBUG_INFO=y 209CONFIG_DEBUG_INFO=y
211# CONFIG_RCU_CPU_STALL_DETECTOR is not set 210# CONFIG_RCU_CPU_STALL_DETECTOR is not set
212CONFIG_SYSCTL_SYSCALL_CHECK=y 211CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig
index 038a308cbfc4..a1cc8179e9fd 100644
--- a/arch/powerpc/configs/mpc86xx_defconfig
+++ b/arch/powerpc/configs/mpc86xx_defconfig
@@ -171,7 +171,6 @@ CONFIG_MAC_PARTITION=y
171CONFIG_CRC_T10DIF=y 171CONFIG_CRC_T10DIF=y
172CONFIG_DEBUG_KERNEL=y 172CONFIG_DEBUG_KERNEL=y
173CONFIG_DETECT_HUNG_TASK=y 173CONFIG_DETECT_HUNG_TASK=y
174# CONFIG_DEBUG_BUGVERBOSE is not set
175CONFIG_DEBUG_INFO=y 174CONFIG_DEBUG_INFO=y
176# CONFIG_RCU_CPU_STALL_DETECTOR is not set 175# CONFIG_RCU_CPU_STALL_DETECTOR is not set
177CONFIG_SYSCTL_SYSCALL_CHECK=y 176CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index ac4fc41035f6..f8b394a76ac3 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -112,8 +112,8 @@ CONFIG_IRDA_CACHE_LAST_LSAP=y
112CONFIG_IRDA_FAST_RR=y 112CONFIG_IRDA_FAST_RR=y
113CONFIG_IRTTY_SIR=m 113CONFIG_IRTTY_SIR=m
114CONFIG_BT=m 114CONFIG_BT=m
115CONFIG_BT_L2CAP=m 115CONFIG_BT_L2CAP=y
116CONFIG_BT_SCO=m 116CONFIG_BT_SCO=y
117CONFIG_BT_RFCOMM=m 117CONFIG_BT_RFCOMM=m
118CONFIG_BT_RFCOMM_TTY=y 118CONFIG_BT_RFCOMM_TTY=y
119CONFIG_BT_BNEP=m 119CONFIG_BT_BNEP=m
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 0a10fb009ef7..214208924a9c 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -351,8 +351,8 @@ CONFIG_VLSI_FIR=m
351CONFIG_VIA_FIR=m 351CONFIG_VIA_FIR=m
352CONFIG_MCS_FIR=m 352CONFIG_MCS_FIR=m
353CONFIG_BT=m 353CONFIG_BT=m
354CONFIG_BT_L2CAP=m 354CONFIG_BT_L2CAP=y
355CONFIG_BT_SCO=m 355CONFIG_BT_SCO=y
356CONFIG_BT_RFCOMM=m 356CONFIG_BT_RFCOMM=m
357CONFIG_BT_RFCOMM_TTY=y 357CONFIG_BT_RFCOMM_TTY=y
358CONFIG_BT_BNEP=m 358CONFIG_BT_BNEP=m
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index caba919f65d8..6472322bf13b 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -52,8 +52,8 @@ CONFIG_IP_PNP_DHCP=y
52# CONFIG_INET_DIAG is not set 52# CONFIG_INET_DIAG is not set
53CONFIG_IPV6=y 53CONFIG_IPV6=y
54CONFIG_BT=m 54CONFIG_BT=m
55CONFIG_BT_L2CAP=m 55CONFIG_BT_L2CAP=y
56CONFIG_BT_SCO=m 56CONFIG_BT_SCO=y
57CONFIG_BT_RFCOMM=m 57CONFIG_BT_RFCOMM=m
58CONFIG_BT_RFCOMM_TTY=y 58CONFIG_BT_RFCOMM_TTY=y
59CONFIG_BT_BNEP=m 59CONFIG_BT_BNEP=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 249ddd0a27cd..7de13865508c 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -146,12 +146,18 @@ CONFIG_SCSI_MULTI_LUN=y
146CONFIG_SCSI_CONSTANTS=y 146CONFIG_SCSI_CONSTANTS=y
147CONFIG_SCSI_FC_ATTRS=y 147CONFIG_SCSI_FC_ATTRS=y
148CONFIG_SCSI_SAS_ATTRS=m 148CONFIG_SCSI_SAS_ATTRS=m
149CONFIG_SCSI_CXGB3_ISCSI=m
150CONFIG_SCSI_CXGB4_ISCSI=m
151CONFIG_SCSI_BNX2_ISCSI=m
152CONFIG_SCSI_BNX2_ISCSI=m
153CONFIG_BE2ISCSI=m
149CONFIG_SCSI_IBMVSCSI=y 154CONFIG_SCSI_IBMVSCSI=y
150CONFIG_SCSI_IBMVFC=m 155CONFIG_SCSI_IBMVFC=m
151CONFIG_SCSI_SYM53C8XX_2=y 156CONFIG_SCSI_SYM53C8XX_2=y
152CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 157CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
153CONFIG_SCSI_IPR=y 158CONFIG_SCSI_IPR=y
154CONFIG_SCSI_QLA_FC=m 159CONFIG_SCSI_QLA_FC=m
160CONFIG_SCSI_QLA_ISCSI=m
155CONFIG_SCSI_LPFC=m 161CONFIG_SCSI_LPFC=m
156CONFIG_ATA=y 162CONFIG_ATA=y
157# CONFIG_ATA_SFF is not set 163# CONFIG_ATA_SFF is not set
@@ -197,6 +203,8 @@ CONFIG_S2IO=m
197CONFIG_MYRI10GE=m 203CONFIG_MYRI10GE=m
198CONFIG_NETXEN_NIC=m 204CONFIG_NETXEN_NIC=m
199CONFIG_MLX4_EN=m 205CONFIG_MLX4_EN=m
206CONFIG_QLGE=m
207CONFIG_BE2NET=m
200CONFIG_PPP=m 208CONFIG_PPP=m
201CONFIG_PPP_ASYNC=m 209CONFIG_PPP_ASYNC=m
202CONFIG_PPP_SYNC_TTY=m 210CONFIG_PPP_SYNC_TTY=m
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 1833d1a07e79..c0d842cfd012 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -157,6 +157,7 @@ extern const char *powerpc_base_platform;
157#define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000) 157#define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000)
158#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) 158#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
159#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) 159#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
160#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x0000000000080000)
160#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) 161#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
161#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) 162#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
162#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) 163#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
@@ -178,22 +179,18 @@ extern const char *powerpc_base_platform;
178#define LONG_ASM_CONST(x) 0 179#define LONG_ASM_CONST(x) 0
179#endif 180#endif
180 181
181#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000) 182
182#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000) 183#define CPU_FTR_HVMODE_206 LONG_ASM_CONST(0x0000000800000000)
183#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000) 184#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000)
184#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) 185#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
185#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) 186#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
186#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000) 187#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
187#define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000) 188#define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000)
188#define CPU_FTR_LOCKLESS_TLBIE LONG_ASM_CONST(0x0000040000000000)
189#define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000)
190#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000) 189#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000)
191#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000) 190#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000)
192#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000) 191#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
193#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000) 192#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000)
194#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000) 193#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000)
195#define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004000000000000)
196#define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000)
197#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) 194#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
198#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) 195#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
199#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) 196#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
@@ -202,12 +199,14 @@ extern const char *powerpc_base_platform;
202#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000) 199#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000)
203#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000) 200#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000)
204#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000) 201#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000)
202#define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000)
205 203
206#ifndef __ASSEMBLY__ 204#ifndef __ASSEMBLY__
207 205
208#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_SLB | \ 206#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
209 CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \ 207
210 CPU_FTR_NODSISRALIGN | CPU_FTR_16M_PAGE) 208#define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_SLB | MMU_FTR_TLBIEL | \
209 MMU_FTR_16M_PAGE)
211 210
212/* We only set the altivec features if the kernel was compiled with altivec 211/* We only set the altivec features if the kernel was compiled with altivec
213 * support 212 * support
@@ -387,7 +386,8 @@ extern const char *powerpc_base_platform;
387 CPU_FTR_DBELL) 386 CPU_FTR_DBELL)
388#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ 387#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
389 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ 388 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
390 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD) 389 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
390 CPU_FTR_DEBUG_LVL_EXC)
391#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 391#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
392 392
393/* 64-bit CPUs */ 393/* 64-bit CPUs */
@@ -407,44 +407,45 @@ extern const char *powerpc_base_platform;
407#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 407#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
408 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 408 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
409 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 409 CPU_FTR_MMCRA | CPU_FTR_SMT | \
410 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 410 CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
411 CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS | \ 411 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
412 CPU_FTR_POPCNTB)
413#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 412#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
414 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 413 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
415 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 414 CPU_FTR_MMCRA | CPU_FTR_SMT | \
416 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 415 CPU_FTR_COHERENT_ICACHE | \
417 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 416 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
418 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ 417 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
419 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) 418 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
420#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 419#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
421 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 420 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_HVMODE_206 |\
422 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 421 CPU_FTR_MMCRA | CPU_FTR_SMT | \
423 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 422 CPU_FTR_COHERENT_ICACHE | \
424 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 423 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
425 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ 424 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
426 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD) 425 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
426 CPU_FTR_ICSWX | CPU_FTR_CFAR)
427#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 427#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
428 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 428 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
429 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 429 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
430 CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \ 430 CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
431 CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
432 CPU_FTR_UNALIGNED_LD_STD) 431 CPU_FTR_UNALIGNED_LD_STD)
433#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 432#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
434 CPU_FTR_PPCAS_ARCH_V2 | \ 433 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
435 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ 434 CPU_FTR_PURR | CPU_FTR_REAL_LE)
436 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
437#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) 435#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
438 436
437#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
438 CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
439
439#ifdef __powerpc64__ 440#ifdef __powerpc64__
440#ifdef CONFIG_PPC_BOOK3E 441#ifdef CONFIG_PPC_BOOK3E
441#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500) 442#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500 | CPU_FTRS_A2)
442#else 443#else
443#define CPU_FTRS_POSSIBLE \ 444#define CPU_FTRS_POSSIBLE \
444 (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ 445 (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
445 CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ 446 CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
446 CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ 447 CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
447 CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) 448 CPU_FTR_VSX)
448#endif 449#endif
449#else 450#else
450enum { 451enum {
@@ -487,7 +488,7 @@ enum {
487 488
488#ifdef __powerpc64__ 489#ifdef __powerpc64__
489#ifdef CONFIG_PPC_BOOK3E 490#ifdef CONFIG_PPC_BOOK3E
490#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500) 491#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500 & CPU_FTRS_A2)
491#else 492#else
492#define CPU_FTRS_ALWAYS \ 493#define CPU_FTRS_ALWAYS \
493 (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ 494 (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index f71bb4c118b4..ce516e5eb0d3 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -37,16 +37,16 @@ extern cpumask_t threads_core_mask;
37 * This can typically be used for things like IPI for tlb invalidations 37 * This can typically be used for things like IPI for tlb invalidations
38 * since those need to be done only once per core/TLB 38 * since those need to be done only once per core/TLB
39 */ 39 */
40static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads) 40static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
41{ 41{
42 cpumask_t tmp, res; 42 cpumask_t tmp, res;
43 int i; 43 int i;
44 44
45 res = CPU_MASK_NONE; 45 cpumask_clear(&res);
46 for (i = 0; i < NR_CPUS; i += threads_per_core) { 46 for (i = 0; i < NR_CPUS; i += threads_per_core) {
47 cpus_shift_left(tmp, threads_core_mask, i); 47 cpumask_shift_left(&tmp, &threads_core_mask, i);
48 if (cpus_intersects(threads, tmp)) 48 if (cpumask_intersects(threads, &tmp))
49 cpu_set(i, res); 49 cpumask_set_cpu(i, &res);
50 } 50 }
51 return res; 51 return res;
52} 52}
@@ -58,7 +58,7 @@ static inline int cpu_nr_cores(void)
58 58
59static inline cpumask_t cpu_online_cores_map(void) 59static inline cpumask_t cpu_online_cores_map(void)
60{ 60{
61 return cpu_thread_mask_to_cores(cpu_online_map); 61 return cpu_thread_mask_to_cores(cpu_online_mask);
62} 62}
63 63
64#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 0893ab9343a6..9c70d0ca96d4 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -27,9 +27,8 @@ enum ppc_dbell {
27 PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */ 27 PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */
28}; 28};
29 29
30extern void doorbell_message_pass(int target, int msg); 30extern void doorbell_cause_ipi(int cpu, unsigned long data);
31extern void doorbell_exception(struct pt_regs *regs); 31extern void doorbell_exception(struct pt_regs *regs);
32extern void doorbell_check_self(void);
33extern void doorbell_setup_this_cpu(void); 32extern void doorbell_setup_this_cpu(void);
34 33
35static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) 34static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index f0fb4fc1f6e6..45921672b97a 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -52,6 +52,10 @@ extern struct ppc_emulated {
52#ifdef CONFIG_VSX 52#ifdef CONFIG_VSX
53 struct ppc_emulated_entry vsx; 53 struct ppc_emulated_entry vsx;
54#endif 54#endif
55#ifdef CONFIG_PPC64
56 struct ppc_emulated_entry mfdscr;
57 struct ppc_emulated_entry mtdscr;
58#endif
55} ppc_emulated; 59} ppc_emulated;
56 60
57extern u32 ppc_warn_emulated; 61extern u32 ppc_warn_emulated;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 7778d6f0c878..f5dfe3411f64 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -46,6 +46,7 @@
46#define EX_CCR 60 46#define EX_CCR 60
47#define EX_R3 64 47#define EX_R3 64
48#define EX_LR 72 48#define EX_LR 72
49#define EX_CFAR 80
49 50
50/* 51/*
51 * We're short on space and time in the exception prolog, so we can't 52 * We're short on space and time in the exception prolog, so we can't
@@ -56,30 +57,40 @@
56#define LOAD_HANDLER(reg, label) \ 57#define LOAD_HANDLER(reg, label) \
57 addi reg,reg,(label)-_stext; /* virt addr of handler ... */ 58 addi reg,reg,(label)-_stext; /* virt addr of handler ... */
58 59
59#define EXCEPTION_PROLOG_1(area) \ 60/* Exception register prefixes */
60 mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \ 61#define EXC_HV H
62#define EXC_STD
63
64#define EXCEPTION_PROLOG_1(area) \
65 GET_PACA(r13); \
61 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 66 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
62 std r10,area+EX_R10(r13); \ 67 std r10,area+EX_R10(r13); \
63 std r11,area+EX_R11(r13); \ 68 std r11,area+EX_R11(r13); \
64 std r12,area+EX_R12(r13); \ 69 std r12,area+EX_R12(r13); \
65 mfspr r9,SPRN_SPRG_SCRATCH0; \ 70 BEGIN_FTR_SECTION_NESTED(66); \
71 mfspr r10,SPRN_CFAR; \
72 std r10,area+EX_CFAR(r13); \
73 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
74 GET_SCRATCH0(r9); \
66 std r9,area+EX_R13(r13); \ 75 std r9,area+EX_R13(r13); \
67 mfcr r9 76 mfcr r9
68 77
69#define EXCEPTION_PROLOG_PSERIES_1(label) \ 78#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
70 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 79 ld r12,PACAKBASE(r13); /* get high part of &label */ \
71 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ 80 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
72 mfspr r11,SPRN_SRR0; /* save SRR0 */ \ 81 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
73 LOAD_HANDLER(r12,label) \ 82 LOAD_HANDLER(r12,label) \
74 mtspr SPRN_SRR0,r12; \ 83 mtspr SPRN_##h##SRR0,r12; \
75 mfspr r12,SPRN_SRR1; /* and SRR1 */ \ 84 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
76 mtspr SPRN_SRR1,r10; \ 85 mtspr SPRN_##h##SRR1,r10; \
77 rfid; \ 86 h##rfid; \
78 b . /* prevent speculative execution */ 87 b . /* prevent speculative execution */
88#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
89 __EXCEPTION_PROLOG_PSERIES_1(label, h)
79 90
80#define EXCEPTION_PROLOG_PSERIES(area, label) \ 91#define EXCEPTION_PROLOG_PSERIES(area, label, h) \
81 EXCEPTION_PROLOG_1(area); \ 92 EXCEPTION_PROLOG_1(area); \
82 EXCEPTION_PROLOG_PSERIES_1(label); 93 EXCEPTION_PROLOG_PSERIES_1(label, h);
83 94
84/* 95/*
85 * The common exception prolog is used for all except a few exceptions 96 * The common exception prolog is used for all except a few exceptions
@@ -98,10 +109,11 @@
98 beq- 1f; \ 109 beq- 1f; \
99 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ 110 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
1001: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ 1111: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
101 bge- cr1,2f; /* abort if it is */ \ 112 blt+ cr1,3f; /* abort if it is */ \
102 b 3f; \ 113 li r1,(n); /* will be reloaded later */ \
1032: li r1,(n); /* will be reloaded later */ \
104 sth r1,PACA_TRAP_SAVE(r13); \ 114 sth r1,PACA_TRAP_SAVE(r13); \
115 std r3,area+EX_R3(r13); \
116 addi r3,r13,area; /* r3 -> where regs are saved*/ \
105 b bad_stack; \ 117 b bad_stack; \
1063: std r9,_CCR(r1); /* save CR in stackframe */ \ 1183: std r9,_CCR(r1); /* save CR in stackframe */ \
107 std r11,_NIP(r1); /* save SRR0 in stackframe */ \ 119 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
@@ -123,6 +135,10 @@
123 std r9,GPR11(r1); \ 135 std r9,GPR11(r1); \
124 std r10,GPR12(r1); \ 136 std r10,GPR12(r1); \
125 std r11,GPR13(r1); \ 137 std r11,GPR13(r1); \
138 BEGIN_FTR_SECTION_NESTED(66); \
139 ld r10,area+EX_CFAR(r13); \
140 std r10,ORIG_GPR3(r1); \
141 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
126 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 142 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
127 mflr r9; /* save LR in stackframe */ \ 143 mflr r9; /* save LR in stackframe */ \
128 std r9,_LINK(r1); \ 144 std r9,_LINK(r1); \
@@ -143,57 +159,62 @@
143/* 159/*
144 * Exception vectors. 160 * Exception vectors.
145 */ 161 */
146#define STD_EXCEPTION_PSERIES(n, label) \ 162#define STD_EXCEPTION_PSERIES(loc, vec, label) \
147 . = n; \ 163 . = loc; \
148 .globl label##_pSeries; \ 164 .globl label##_pSeries; \
149label##_pSeries: \ 165label##_pSeries: \
150 HMT_MEDIUM; \ 166 HMT_MEDIUM; \
151 DO_KVM n; \ 167 DO_KVM vec; \
152 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ 168 SET_SCRATCH0(r13); /* save r13 */ \
153 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 169 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_STD)
154 170
155#define HSTD_EXCEPTION_PSERIES(n, label) \ 171#define STD_EXCEPTION_HV(loc, vec, label) \
156 . = n; \ 172 . = loc; \
157 .globl label##_pSeries; \ 173 .globl label##_hv; \
158label##_pSeries: \ 174label##_hv: \
159 HMT_MEDIUM; \ 175 HMT_MEDIUM; \
160 mtspr SPRN_SPRG_SCRATCH0,r20; /* save r20 */ \ 176 DO_KVM vec; \
161 mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \ 177 SET_SCRATCH0(r13); /* save r13 */ \
162 mtspr SPRN_SRR0,r20; \ 178 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_HV)
163 mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
164 mtspr SPRN_SRR1,r20; \
165 mfspr r20,SPRN_SPRG_SCRATCH0; /* restore r20 */ \
166 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
167 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
168 179
169 180#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h) \
170#define MASKABLE_EXCEPTION_PSERIES(n, label) \
171 . = n; \
172 .globl label##_pSeries; \
173label##_pSeries: \
174 HMT_MEDIUM; \ 181 HMT_MEDIUM; \
175 DO_KVM n; \ 182 DO_KVM vec; \
176 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ 183 SET_SCRATCH0(r13); /* save r13 */ \
177 mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \ 184 GET_PACA(r13); \
178 std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \ 185 std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
179 std r10,PACA_EXGEN+EX_R10(r13); \ 186 std r10,PACA_EXGEN+EX_R10(r13); \
180 lbz r10,PACASOFTIRQEN(r13); \ 187 lbz r10,PACASOFTIRQEN(r13); \
181 mfcr r9; \ 188 mfcr r9; \
182 cmpwi r10,0; \ 189 cmpwi r10,0; \
183 beq masked_interrupt; \ 190 beq masked_##h##interrupt; \
184 mfspr r10,SPRN_SPRG_SCRATCH0; \ 191 GET_SCRATCH0(r10); \
185 std r10,PACA_EXGEN+EX_R13(r13); \ 192 std r10,PACA_EXGEN+EX_R13(r13); \
186 std r11,PACA_EXGEN+EX_R11(r13); \ 193 std r11,PACA_EXGEN+EX_R11(r13); \
187 std r12,PACA_EXGEN+EX_R12(r13); \ 194 std r12,PACA_EXGEN+EX_R12(r13); \
188 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 195 ld r12,PACAKBASE(r13); /* get high part of &label */ \
189 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ 196 ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
190 mfspr r11,SPRN_SRR0; /* save SRR0 */ \ 197 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
191 LOAD_HANDLER(r12,label##_common) \ 198 LOAD_HANDLER(r12,label##_common) \
192 mtspr SPRN_SRR0,r12; \ 199 mtspr SPRN_##h##SRR0,r12; \
193 mfspr r12,SPRN_SRR1; /* and SRR1 */ \ 200 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
194 mtspr SPRN_SRR1,r10; \ 201 mtspr SPRN_##h##SRR1,r10; \
195 rfid; \ 202 h##rfid; \
196 b . /* prevent speculative execution */ 203 b . /* prevent speculative execution */
204#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h) \
205 __MASKABLE_EXCEPTION_PSERIES(vec, label, h)
206
207#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \
208 . = loc; \
209 .globl label##_pSeries; \
210label##_pSeries: \
211 _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_STD)
212
213#define MASKABLE_EXCEPTION_HV(loc, vec, label) \
214 . = loc; \
215 .globl label##_hv; \
216label##_hv: \
217 _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_HV)
197 218
198#ifdef CONFIG_PPC_ISERIES 219#ifdef CONFIG_PPC_ISERIES
199#define DISABLE_INTS \ 220#define DISABLE_INTS \
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 921a8470e18a..9a67a38bf7b9 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -49,7 +49,7 @@ label##5: \
49 FTR_ENTRY_OFFSET label##2b-label##5b; \ 49 FTR_ENTRY_OFFSET label##2b-label##5b; \
50 FTR_ENTRY_OFFSET label##3b-label##5b; \ 50 FTR_ENTRY_OFFSET label##3b-label##5b; \
51 FTR_ENTRY_OFFSET label##4b-label##5b; \ 51 FTR_ENTRY_OFFSET label##4b-label##5b; \
52 .ifgt (label##4b-label##3b)-(label##2b-label##1b); \ 52 .ifgt (label##4b- label##3b)-(label##2b- label##1b); \
53 .error "Feature section else case larger than body"; \ 53 .error "Feature section else case larger than body"; \
54 .endif; \ 54 .endif; \
55 .popsection; 55 .popsection;
@@ -146,6 +146,19 @@ label##5: \
146 146
147#ifndef __ASSEMBLY__ 147#ifndef __ASSEMBLY__
148 148
149#define ASM_FTR_IF(section_if, section_else, msk, val) \
150 stringify_in_c(BEGIN_FTR_SECTION) \
151 section_if "; " \
152 stringify_in_c(FTR_SECTION_ELSE) \
153 section_else "; " \
154 stringify_in_c(ALT_FTR_SECTION_END((msk), (val)))
155
156#define ASM_FTR_IFSET(section_if, section_else, msk) \
157 ASM_FTR_IF(section_if, section_else, (msk), (msk))
158
159#define ASM_FTR_IFCLR(section_if, section_else, msk) \
160 ASM_FTR_IF(section_if, section_else, (msk), 0)
161
149#define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \ 162#define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \
150 stringify_in_c(BEGIN_MMU_FTR_SECTION) \ 163 stringify_in_c(BEGIN_MMU_FTR_SECTION) \
151 section_if "; " \ 164 section_if "; " \
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 4ef662e4a31d..3a6c586c4e40 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -47,6 +47,7 @@
47#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) 47#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
48#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000) 48#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
49#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000) 49#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
50#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
50 51
51#ifndef __ASSEMBLY__ 52#ifndef __ASSEMBLY__
52 53
@@ -60,7 +61,7 @@ enum {
60 FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | 61 FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
61 FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR | 62 FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
62 FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | 63 FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
63 FW_FEATURE_CMO | FW_FEATURE_VPHN, 64 FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO,
64 FW_FEATURE_PSERIES_ALWAYS = 0, 65 FW_FEATURE_PSERIES_ALWAYS = 0,
65 FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, 66 FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
66 FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, 67 FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 8edec710cc6d..852b8c1c09db 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -102,6 +102,7 @@
102#define H_ANDCOND (1UL<<(63-33)) 102#define H_ANDCOND (1UL<<(63-33))
103#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ 103#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
104#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ 104#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
105#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */
105#define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */ 106#define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */
106#define H_COPY_PAGE (1UL<<(63-49)) 107#define H_COPY_PAGE (1UL<<(63-49))
107#define H_N (1UL<<(63-61)) 108#define H_N (1UL<<(63-61))
@@ -234,6 +235,7 @@
234#define H_GET_MPP 0x2D4 235#define H_GET_MPP 0x2D4
235#define H_HOME_NODE_ASSOCIATIVITY 0x2EC 236#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
236#define H_BEST_ENERGY 0x2F4 237#define H_BEST_ENERGY 0x2F4
238#define H_GET_MPP_X 0x314
237#define MAX_HCALL_OPCODE H_BEST_ENERGY 239#define MAX_HCALL_OPCODE H_BEST_ENERGY
238 240
239#ifndef __ASSEMBLY__ 241#ifndef __ASSEMBLY__
@@ -312,6 +314,16 @@ struct hvcall_mpp_data {
312 314
313int h_get_mpp(struct hvcall_mpp_data *); 315int h_get_mpp(struct hvcall_mpp_data *);
314 316
317struct hvcall_mpp_x_data {
318 unsigned long coalesced_bytes;
319 unsigned long pool_coalesced_bytes;
320 unsigned long pool_purr_cycles;
321 unsigned long pool_spurr_cycles;
322 unsigned long reserved[3];
323};
324
325int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data);
326
315#ifdef CONFIG_PPC_PSERIES 327#ifdef CONFIG_PPC_PSERIES
316extern int CMO_PrPSP; 328extern int CMO_PrPSP;
317extern int CMO_SecPSP; 329extern int CMO_SecPSP;
diff --git a/arch/powerpc/platforms/cell/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h
index 6efc7782ebf2..fbae49286926 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.h
+++ b/arch/powerpc/include/asm/io-workarounds.h
@@ -31,7 +31,6 @@ struct iowa_bus {
31 void *private; 31 void *private;
32}; 32};
33 33
34void __devinit io_workaround_init(void);
35void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *, 34void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
36 int (*)(struct iowa_bus *, void *), void *); 35 int (*)(struct iowa_bus *, void *), void *);
37struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR); 36struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR);
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 001f2f11c19b..45698d55cd6a 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -2,6 +2,8 @@
2#define _ASM_POWERPC_IO_H 2#define _ASM_POWERPC_IO_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#define ARCH_HAS_IOREMAP_WC
6
5/* 7/*
6 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -481,10 +483,16 @@ __do_out_asm(_rec_outl, "stwbrx")
481 _memcpy_fromio(dst,PCI_FIX_ADDR(src),n) 483 _memcpy_fromio(dst,PCI_FIX_ADDR(src),n)
482#endif /* !CONFIG_EEH */ 484#endif /* !CONFIG_EEH */
483 485
484#ifdef CONFIG_PPC_INDIRECT_IO 486#ifdef CONFIG_PPC_INDIRECT_PIO
485#define DEF_PCI_HOOK(x) x 487#define DEF_PCI_HOOK_pio(x) x
488#else
489#define DEF_PCI_HOOK_pio(x) NULL
490#endif
491
492#ifdef CONFIG_PPC_INDIRECT_MMIO
493#define DEF_PCI_HOOK_mem(x) x
486#else 494#else
487#define DEF_PCI_HOOK(x) NULL 495#define DEF_PCI_HOOK_mem(x) NULL
488#endif 496#endif
489 497
490/* Structure containing all the hooks */ 498/* Structure containing all the hooks */
@@ -504,7 +512,7 @@ extern struct ppc_pci_io {
504#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ 512#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
505static inline ret name at \ 513static inline ret name at \
506{ \ 514{ \
507 if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \ 515 if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \
508 return ppc_pci_io.name al; \ 516 return ppc_pci_io.name al; \
509 return __do_##name al; \ 517 return __do_##name al; \
510} 518}
@@ -512,7 +520,7 @@ static inline ret name at \
512#define DEF_PCI_AC_NORET(name, at, al, space, aa) \ 520#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
513static inline void name at \ 521static inline void name at \
514{ \ 522{ \
515 if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \ 523 if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \
516 ppc_pci_io.name al; \ 524 ppc_pci_io.name al; \
517 else \ 525 else \
518 __do_##name al; \ 526 __do_##name al; \
@@ -616,12 +624,13 @@ static inline void iosync(void)
616 * * ioremap is the standard one and provides non-cacheable guarded mappings 624 * * ioremap is the standard one and provides non-cacheable guarded mappings
617 * and can be hooked by the platform via ppc_md 625 * and can be hooked by the platform via ppc_md
618 * 626 *
619 * * ioremap_flags allows to specify the page flags as an argument and can 627 * * ioremap_prot allows to specify the page flags as an argument and can
620 * also be hooked by the platform via ppc_md. ioremap_prot is the exact 628 * also be hooked by the platform via ppc_md.
621 * same thing as ioremap_flags.
622 * 629 *
623 * * ioremap_nocache is identical to ioremap 630 * * ioremap_nocache is identical to ioremap
624 * 631 *
632 * * ioremap_wc enables write combining
633 *
625 * * iounmap undoes such a mapping and can be hooked 634 * * iounmap undoes such a mapping and can be hooked
626 * 635 *
627 * * __ioremap_at (and the pending __iounmap_at) are low level functions to 636 * * __ioremap_at (and the pending __iounmap_at) are low level functions to
@@ -629,7 +638,7 @@ static inline void iosync(void)
629 * currently be hooked. Must be page aligned. 638 * currently be hooked. Must be page aligned.
630 * 639 *
631 * * __ioremap is the low level implementation used by ioremap and 640 * * __ioremap is the low level implementation used by ioremap and
632 * ioremap_flags and cannot be hooked (but can be used by a hook on one 641 * ioremap_prot and cannot be hooked (but can be used by a hook on one
633 * of the previous ones) 642 * of the previous ones)
634 * 643 *
635 * * __ioremap_caller is the same as above but takes an explicit caller 644 * * __ioremap_caller is the same as above but takes an explicit caller
@@ -640,10 +649,10 @@ static inline void iosync(void)
640 * 649 *
641 */ 650 */
642extern void __iomem *ioremap(phys_addr_t address, unsigned long size); 651extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
643extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size, 652extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
644 unsigned long flags); 653 unsigned long flags);
654extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
645#define ioremap_nocache(addr, size) ioremap((addr), (size)) 655#define ioremap_nocache(addr, size) ioremap((addr), (size))
646#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot))
647 656
648extern void iounmap(volatile void __iomem *addr); 657extern void iounmap(volatile void __iomem *addr);
649 658
diff --git a/arch/powerpc/include/asm/io_event_irq.h b/arch/powerpc/include/asm/io_event_irq.h
new file mode 100644
index 000000000000..b1a9a1be3c21
--- /dev/null
+++ b/arch/powerpc/include/asm/io_event_irq.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright 2010, 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef _ASM_POWERPC_IO_EVENT_IRQ_H
11#define _ASM_POWERPC_IO_EVENT_IRQ_H
12
13#include <linux/types.h>
14#include <linux/notifier.h>
15
16#define PSERIES_IOEI_RPC_MAX_LEN 216
17
18#define PSERIES_IOEI_TYPE_ERR_DETECTED 0x01
19#define PSERIES_IOEI_TYPE_ERR_RECOVERED 0x02
20#define PSERIES_IOEI_TYPE_EVENT 0x03
21#define PSERIES_IOEI_TYPE_RPC_PASS_THRU 0x04
22
23#define PSERIES_IOEI_SUBTYPE_NOT_APP 0x00
24#define PSERIES_IOEI_SUBTYPE_REBALANCE_REQ 0x01
25#define PSERIES_IOEI_SUBTYPE_NODE_ONLINE 0x03
26#define PSERIES_IOEI_SUBTYPE_NODE_OFFLINE 0x04
27#define PSERIES_IOEI_SUBTYPE_DUMP_SIZE_CHANGE 0x05
28#define PSERIES_IOEI_SUBTYPE_TORRENT_IRV_UPDATE 0x06
29#define PSERIES_IOEI_SUBTYPE_TORRENT_HFI_CFGED 0x07
30
31#define PSERIES_IOEI_SCOPE_NOT_APP 0x00
32#define PSERIES_IOEI_SCOPE_RIO_HUB 0x36
33#define PSERIES_IOEI_SCOPE_RIO_BRIDGE 0x37
34#define PSERIES_IOEI_SCOPE_PHB 0x38
35#define PSERIES_IOEI_SCOPE_EADS_GLOBAL 0x39
36#define PSERIES_IOEI_SCOPE_EADS_SLOT 0x3A
37#define PSERIES_IOEI_SCOPE_TORRENT_HUB 0x3B
38#define PSERIES_IOEI_SCOPE_SERVICE_PROC 0x51
39
40/* Platform Event Log Format, Version 6, data portition of IO event section */
41struct pseries_io_event {
42 uint8_t event_type; /* 0x00 IO-Event Type */
43 uint8_t rpc_data_len; /* 0x01 RPC data length */
44 uint8_t scope; /* 0x02 Error/Event Scope */
45 uint8_t event_subtype; /* 0x03 I/O-Event Sub-Type */
46 uint32_t drc_index; /* 0x04 DRC Index */
47 uint8_t rpc_data[PSERIES_IOEI_RPC_MAX_LEN];
48 /* 0x08 RPC Data (0-216 bytes, */
49 /* padded to 4 bytes alignment) */
50};
51
52extern struct atomic_notifier_head pseries_ioei_notifier_list;
53
54#endif /* _ASM_POWERPC_IO_EVENT_IRQ_H */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 67ab5fb7d153..1bff591f7f72 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -88,9 +88,6 @@ struct irq_host_ops {
88 /* Dispose of such a mapping */ 88 /* Dispose of such a mapping */
89 void (*unmap)(struct irq_host *h, unsigned int virq); 89 void (*unmap)(struct irq_host *h, unsigned int virq);
90 90
91 /* Update of such a mapping */
92 void (*remap)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
93
94 /* Translate device-tree interrupt specifier from raw format coming 91 /* Translate device-tree interrupt specifier from raw format coming
95 * from the firmware to a irq_hw_number_t (interrupt line number) and 92 * from the firmware to a irq_hw_number_t (interrupt line number) and
96 * type (sense) that can be passed to set_irq_type(). In the absence 93 * type (sense) that can be passed to set_irq_type(). In the absence
@@ -128,19 +125,10 @@ struct irq_host {
128 struct device_node *of_node; 125 struct device_node *of_node;
129}; 126};
130 127
131/* The main irq map itself is an array of NR_IRQ entries containing the 128struct irq_data;
132 * associate host and irq number. An entry with a host of NULL is free. 129extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
133 * An entry can be allocated if it's free, the allocator always then sets
134 * hwirq first to the host's invalid irq number and then fills ops.
135 */
136struct irq_map_entry {
137 irq_hw_number_t hwirq;
138 struct irq_host *host;
139};
140
141extern struct irq_map_entry irq_map[NR_IRQS];
142
143extern irq_hw_number_t virq_to_hw(unsigned int virq); 130extern irq_hw_number_t virq_to_hw(unsigned int virq);
131extern bool virq_is_host(unsigned int virq, struct irq_host *host);
144 132
145/** 133/**
146 * irq_alloc_host - Allocate a new irq_host data structure 134 * irq_alloc_host - Allocate a new irq_host data structure
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index f54408d995b5..8a33698c61bd 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -76,7 +76,7 @@ extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
76extern cpumask_t cpus_in_sr; 76extern cpumask_t cpus_in_sr;
77static inline int kexec_sr_activated(int cpu) 77static inline int kexec_sr_activated(int cpu)
78{ 78{
79 return cpu_isset(cpu,cpus_in_sr); 79 return cpumask_test_cpu(cpu, &cpus_in_sr);
80} 80}
81 81
82struct kimage; 82struct kimage;
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 5b7504674397..0951b17f4eb5 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -59,6 +59,7 @@
59#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480 59#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480
60#define BOOK3S_INTERRUPT_EXTERNAL 0x500 60#define BOOK3S_INTERRUPT_EXTERNAL 0x500
61#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501 61#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501
62#define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502
62#define BOOK3S_INTERRUPT_ALIGNMENT 0x600 63#define BOOK3S_INTERRUPT_ALIGNMENT 0x600
63#define BOOK3S_INTERRUPT_PROGRAM 0x700 64#define BOOK3S_INTERRUPT_PROGRAM 0x700
64#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 65#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 36fdb3aff30b..d5a8a3861635 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -34,6 +34,7 @@
34 (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \ 34 (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \
35 (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \ 35 (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \
36 (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \ 36 (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \
37 (\intno == BOOK3S_INTERRUPT_EXTERNAL_HV) || \
37 (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \ 38 (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \
38 (\intno == BOOK3S_INTERRUPT_PROGRAM) || \ 39 (\intno == BOOK3S_INTERRUPT_PROGRAM) || \
39 (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \ 40 (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index a077adc0b35e..e0298d26ce5d 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -210,6 +210,8 @@ struct dtl_entry {
210#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */ 210#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
211#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry)) 211#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
212 212
213extern struct kmem_cache *dtl_cache;
214
213/* 215/*
214 * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls 216 * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls
215 * reading from the dispatch trace log. If other code wants to consume 217 * reading from the dispatch trace log. If other code wants to consume
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index e4f01915fbb0..47cacddb14cf 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -29,21 +29,6 @@ struct file;
29struct pci_controller; 29struct pci_controller;
30struct kimage; 30struct kimage;
31 31
32#ifdef CONFIG_SMP
33struct smp_ops_t {
34 void (*message_pass)(int target, int msg);
35 int (*probe)(void);
36 void (*kick_cpu)(int nr);
37 void (*setup_cpu)(int nr);
38 void (*bringup_done)(void);
39 void (*take_timebase)(void);
40 void (*give_timebase)(void);
41 int (*cpu_disable)(void);
42 void (*cpu_die)(unsigned int nr);
43 int (*cpu_bootable)(unsigned int nr);
44};
45#endif
46
47struct machdep_calls { 32struct machdep_calls {
48 char *name; 33 char *name;
49#ifdef CONFIG_PPC64 34#ifdef CONFIG_PPC64
@@ -267,6 +252,7 @@ struct machdep_calls {
267 252
268extern void e500_idle(void); 253extern void e500_idle(void);
269extern void power4_idle(void); 254extern void power4_idle(void);
255extern void power7_idle(void);
270extern void ppc6xx_idle(void); 256extern void ppc6xx_idle(void);
271extern void book3e_idle(void); 257extern void book3e_idle(void);
272 258
@@ -311,12 +297,6 @@ extern sys_ctrler_t sys_ctrler;
311 297
312#endif /* CONFIG_PPC_PMAC */ 298#endif /* CONFIG_PPC_PMAC */
313 299
314#ifdef CONFIG_SMP
315/* Poor default implementations */
316extern void __devinit smp_generic_give_timebase(void);
317extern void __devinit smp_generic_take_timebase(void);
318#endif /* CONFIG_SMP */
319
320 300
321/* Functions to produce codes on the leds. 301/* Functions to produce codes on the leds.
322 * The SRC code should be unique for the message category and should 302 * The SRC code should be unique for the message category and should
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 17194fcd4040..3ea0f9a259d8 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -43,6 +43,7 @@
43#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) 43#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
44#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000) 44#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000)
45#define MAS0_NV(x) ((x) & 0x00000FFF) 45#define MAS0_NV(x) ((x) & 0x00000FFF)
46#define MAS0_ESEL_MASK 0x0FFF0000
46#define MAS0_HES 0x00004000 47#define MAS0_HES 0x00004000
47#define MAS0_WQ_ALLWAYS 0x00000000 48#define MAS0_WQ_ALLWAYS 0x00000000
48#define MAS0_WQ_COND 0x00001000 49#define MAS0_WQ_COND 0x00001000
@@ -137,6 +138,21 @@
137#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */ 138#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */
138#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */ 139#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */
139 140
141/* MMUCFG bits */
142#define MMUCFG_MAVN_NASK 0x00000003
143#define MMUCFG_MAVN_V1_0 0x00000000
144#define MMUCFG_MAVN_V2_0 0x00000001
145#define MMUCFG_NTLB_MASK 0x0000000c
146#define MMUCFG_NTLB_SHIFT 2
147#define MMUCFG_PIDSIZE_MASK 0x000007c0
148#define MMUCFG_PIDSIZE_SHIFT 6
149#define MMUCFG_TWC 0x00008000
150#define MMUCFG_LRAT 0x00010000
151#define MMUCFG_RASIZE_MASK 0x00fe0000
152#define MMUCFG_RASIZE_SHIFT 17
153#define MMUCFG_LPIDSIZE_MASK 0x0f000000
154#define MMUCFG_LPIDSIZE_SHIFT 24
155
140/* TLBnCFG encoding */ 156/* TLBnCFG encoding */
141#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */ 157#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */
142#define TLBnCFG_HES 0x00002000 /* HW select supported */ 158#define TLBnCFG_HES 0x00002000 /* HW select supported */
@@ -229,6 +245,10 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
229extern int mmu_linear_psize; 245extern int mmu_linear_psize;
230extern int mmu_vmemmap_psize; 246extern int mmu_vmemmap_psize;
231 247
248#ifdef CONFIG_PPC64
249extern unsigned long linear_map_top;
250#endif
251
232#endif /* !__ASSEMBLY__ */ 252#endif /* !__ASSEMBLY__ */
233 253
234#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ 254#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index ae7b3efec8e5..d865bd909c7d 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -408,6 +408,7 @@ static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
408#endif /* CONFIG_PPC_SUBPAGE_PROT */ 408#endif /* CONFIG_PPC_SUBPAGE_PROT */
409 409
410typedef unsigned long mm_context_id_t; 410typedef unsigned long mm_context_id_t;
411struct spinlock;
411 412
412typedef struct { 413typedef struct {
413 mm_context_id_t id; 414 mm_context_id_t id;
@@ -423,6 +424,11 @@ typedef struct {
423#ifdef CONFIG_PPC_SUBPAGE_PROT 424#ifdef CONFIG_PPC_SUBPAGE_PROT
424 struct subpage_prot_table spt; 425 struct subpage_prot_table spt;
425#endif /* CONFIG_PPC_SUBPAGE_PROT */ 426#endif /* CONFIG_PPC_SUBPAGE_PROT */
427#ifdef CONFIG_PPC_ICSWX
428 struct spinlock *cop_lockp; /* guard acop and cop_pid */
429 unsigned long acop; /* mask of enabled coprocessor types */
430 unsigned int cop_pid; /* pid value used with coprocessors */
431#endif /* CONFIG_PPC_ICSWX */
426} mm_context_t; 432} mm_context_t;
427 433
428 434
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index bb40a06d3b77..4138b21ae80a 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -56,11 +56,6 @@
56 */ 56 */
57#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) 57#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
58 58
59/* This indicates that the processor uses the ISA 2.06 server tlbie
60 * mnemonics
61 */
62#define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000)
63
64/* Enable use of TLB reservation. Processor should support tlbsrx. 59/* Enable use of TLB reservation. Processor should support tlbsrx.
65 * instruction and MAS0[WQ]. 60 * instruction and MAS0[WQ].
66 */ 61 */
@@ -70,6 +65,53 @@
70 */ 65 */
71#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000) 66#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
72 67
68/* MMU is SLB-based
69 */
70#define MMU_FTR_SLB ASM_CONST(0x02000000)
71
72/* Support 16M large pages
73 */
74#define MMU_FTR_16M_PAGE ASM_CONST(0x04000000)
75
76/* Supports TLBIEL variant
77 */
78#define MMU_FTR_TLBIEL ASM_CONST(0x08000000)
79
80/* Supports tlbies w/o locking
81 */
82#define MMU_FTR_LOCKLESS_TLBIE ASM_CONST(0x10000000)
83
84/* Large pages can be marked CI
85 */
86#define MMU_FTR_CI_LARGE_PAGE ASM_CONST(0x20000000)
87
88/* 1T segments available
89 */
90#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
91
92/* Doesn't support the B bit (1T segment) in SLBIE
93 */
94#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x80000000)
95
96/* MMU feature bit sets for various CPUs */
97#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
98 MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
99#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
100#define MMU_FTRS_PPC970 MMU_FTRS_POWER4
101#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
102#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
103#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
104#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
105 MMU_FTR_CI_LARGE_PAGE
106#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
107 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
108#define MMU_FTRS_A2 MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \
109 MMU_FTR_USE_TLBIVAX_BCAST | \
110 MMU_FTR_LOCK_BCAST_INVAL | \
111 MMU_FTR_USE_TLBRSRV | \
112 MMU_FTR_USE_PAIRED_MAS | \
113 MMU_FTR_TLBIEL | \
114 MMU_FTR_16M_PAGE
73#ifndef __ASSEMBLY__ 115#ifndef __ASSEMBLY__
74#include <asm/cputable.h> 116#include <asm/cputable.h>
75 117
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 81fb41289d6c..a73668a5f30d 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -32,6 +32,10 @@ extern void __destroy_context(unsigned long context_id);
32extern void mmu_context_init(void); 32extern void mmu_context_init(void);
33#endif 33#endif
34 34
35extern void switch_cop(struct mm_struct *next);
36extern int use_cop(unsigned long acop, struct mm_struct *mm);
37extern void drop_cop(unsigned long acop, struct mm_struct *mm);
38
35/* 39/*
36 * switch_mm is the entry point called from the architecture independent 40 * switch_mm is the entry point called from the architecture independent
37 * code in kernel/sched.c 41 * code in kernel/sched.c
@@ -55,6 +59,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
55 if (prev == next) 59 if (prev == next)
56 return; 60 return;
57 61
62#ifdef CONFIG_PPC_ICSWX
63 /* Switch coprocessor context only if prev or next uses a coprocessor */
64 if (prev->context.acop || next->context.acop)
65 switch_cop(next);
66#endif /* CONFIG_PPC_ICSWX */
67
58 /* We must stop all altivec streams before changing the HW 68 /* We must stop all altivec streams before changing the HW
59 * context 69 * context
60 */ 70 */
@@ -67,7 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
67 * sub architectures. 77 * sub architectures.
68 */ 78 */
69#ifdef CONFIG_PPC_STD_MMU_64 79#ifdef CONFIG_PPC_STD_MMU_64
70 if (cpu_has_feature(CPU_FTR_SLB)) 80 if (mmu_has_feature(MMU_FTR_SLB))
71 switch_slb(tsk, next); 81 switch_slb(tsk, next);
72 else 82 else
73 switch_stab(tsk, next); 83 switch_stab(tsk, next);
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 49baddcdd14e..df18989e78d4 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -262,6 +262,7 @@ struct mpic
262#ifdef CONFIG_SMP 262#ifdef CONFIG_SMP
263 struct irq_chip hc_ipi; 263 struct irq_chip hc_ipi;
264#endif 264#endif
265 struct irq_chip hc_tm;
265 const char *name; 266 const char *name;
266 /* Flags */ 267 /* Flags */
267 unsigned int flags; 268 unsigned int flags;
@@ -280,7 +281,7 @@ struct mpic
280 281
281 /* vector numbers used for internal sources (ipi/timers) */ 282 /* vector numbers used for internal sources (ipi/timers) */
282 unsigned int ipi_vecs[4]; 283 unsigned int ipi_vecs[4];
283 unsigned int timer_vecs[4]; 284 unsigned int timer_vecs[8];
284 285
285 /* Spurious vector to program into unused sources */ 286 /* Spurious vector to program into unused sources */
286 unsigned int spurious_vec; 287 unsigned int spurious_vec;
@@ -368,6 +369,8 @@ struct mpic
368 * NOTE: This flag trumps MPIC_WANTS_RESET. 369 * NOTE: This flag trumps MPIC_WANTS_RESET.
369 */ 370 */
370#define MPIC_NO_RESET 0x00004000 371#define MPIC_NO_RESET 0x00004000
372/* Freescale MPIC (compatible includes "fsl,mpic") */
373#define MPIC_FSL 0x00008000
371 374
372/* MPIC HW modification ID */ 375/* MPIC HW modification ID */
373#define MPIC_REGSET_MASK 0xf0000000 376#define MPIC_REGSET_MASK 0xf0000000
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
index d4b4bfa26fb3..89d2f99c1bf4 100644
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ b/arch/powerpc/include/asm/pSeries_reconfig.h
@@ -18,13 +18,18 @@
18extern int pSeries_reconfig_notifier_register(struct notifier_block *); 18extern int pSeries_reconfig_notifier_register(struct notifier_block *);
19extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); 19extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
20extern struct blocking_notifier_head pSeries_reconfig_chain; 20extern struct blocking_notifier_head pSeries_reconfig_chain;
21/* Not the best place to put this, will be fixed when we move some
22 * of the rtas suspend-me stuff to pseries */
23extern void pSeries_coalesce_init(void);
21#else /* !CONFIG_PPC_PSERIES */ 24#else /* !CONFIG_PPC_PSERIES */
22static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb) 25static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb)
23{ 26{
24 return 0; 27 return 0;
25} 28}
26static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { } 29static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { }
30static inline void pSeries_coalesce_init(void) { }
27#endif /* CONFIG_PPC_PSERIES */ 31#endif /* CONFIG_PPC_PSERIES */
28 32
33
29#endif /* __KERNEL__ */ 34#endif /* __KERNEL__ */
30#endif /* _PPC64_PSERIES_RECONFIG_H */ 35#endif /* _PPC64_PSERIES_RECONFIG_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index ec57540cd7af..74126765106a 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -92,9 +92,9 @@ struct paca_struct {
92 * Now, starting in cacheline 2, the exception save areas 92 * Now, starting in cacheline 2, the exception save areas
93 */ 93 */
94 /* used for most interrupts/exceptions */ 94 /* used for most interrupts/exceptions */
95 u64 exgen[10] __attribute__((aligned(0x80))); 95 u64 exgen[11] __attribute__((aligned(0x80)));
96 u64 exmc[10]; /* used for machine checks */ 96 u64 exmc[11]; /* used for machine checks */
97 u64 exslb[10]; /* used for SLB/segment table misses 97 u64 exslb[11]; /* used for SLB/segment table misses
98 * on the linear mapping */ 98 * on the linear mapping */
99 /* SLB related definitions */ 99 /* SLB related definitions */
100 u16 vmalloc_sllp; 100 u16 vmalloc_sllp;
@@ -106,7 +106,8 @@ struct paca_struct {
106 pgd_t *pgd; /* Current PGD */ 106 pgd_t *pgd; /* Current PGD */
107 pgd_t *kernel_pgd; /* Kernel PGD */ 107 pgd_t *kernel_pgd; /* Kernel PGD */
108 u64 exgen[8] __attribute__((aligned(0x80))); 108 u64 exgen[8] __attribute__((aligned(0x80)));
109 u64 extlb[EX_TLB_SIZE*3] __attribute__((aligned(0x80))); 109 /* We can have up to 3 levels of reentrancy in the TLB miss handler */
110 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)] __attribute__((aligned(0x80)));
110 u64 exmc[8]; /* used for machine checks */ 111 u64 exmc[8]; /* used for machine checks */
111 u64 excrit[8]; /* used for crit interrupts */ 112 u64 excrit[8]; /* used for crit interrupts */
112 u64 exdbg[8]; /* used for debug interrupts */ 113 u64 exdbg[8]; /* used for debug interrupts */
@@ -125,7 +126,7 @@ struct paca_struct {
125 struct task_struct *__current; /* Pointer to current */ 126 struct task_struct *__current; /* Pointer to current */
126 u64 kstack; /* Saved Kernel stack addr */ 127 u64 kstack; /* Saved Kernel stack addr */
127 u64 stab_rr; /* stab/slb round-robin counter */ 128 u64 stab_rr; /* stab/slb round-robin counter */
128 u64 saved_r1; /* r1 save for RTAS calls */ 129 u64 saved_r1; /* r1 save for RTAS calls or PM */
129 u64 saved_msr; /* MSR saved here by enter_rtas */ 130 u64 saved_msr; /* MSR saved here by enter_rtas */
130 u16 trap_save; /* Used when bad stack is encountered */ 131 u16 trap_save; /* Used when bad stack is encountered */
131 u8 soft_enabled; /* irq soft-enable flag */ 132 u8 soft_enabled; /* irq soft-enable flag */
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 812b2cd80aed..9356262fd3cc 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -59,24 +59,7 @@ static __inline__ void clear_page(void *addr)
59 : "ctr", "memory"); 59 : "ctr", "memory");
60} 60}
61 61
62extern void copy_4K_page(void *to, void *from); 62extern void copy_page(void *to, void *from);
63
64#ifdef CONFIG_PPC_64K_PAGES
65static inline void copy_page(void *to, void *from)
66{
67 unsigned int i;
68 for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
69 copy_4K_page(to, from);
70 to += 4096;
71 from += 4096;
72 }
73}
74#else /* CONFIG_PPC_64K_PAGES */
75static inline void copy_page(void *to, void *from)
76{
77 copy_4K_page(to, from);
78}
79#endif /* CONFIG_PPC_64K_PAGES */
80 63
81/* Log 2 of page table size */ 64/* Log 2 of page table size */
82extern u64 ppc64_pft_size; 65extern u64 ppc64_pft_size;
@@ -130,7 +113,7 @@ extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
130extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, 113extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
131 unsigned long len, unsigned int psize); 114 unsigned long len, unsigned int psize);
132 115
133#define slice_mm_new_context(mm) ((mm)->context.id == 0) 116#define slice_mm_new_context(mm) ((mm)->context.id == MMU_NO_CONTEXT)
134 117
135#endif /* __ASSEMBLY__ */ 118#endif /* __ASSEMBLY__ */
136#else 119#else
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 2b09cd522d33..81576ee0cfb1 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -257,21 +257,20 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
257static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 257static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
258 pte_t *ptep) 258 pte_t *ptep)
259{ 259{
260 unsigned long old;
261 260
262 if ((pte_val(*ptep) & _PAGE_RW) == 0) 261 if ((pte_val(*ptep) & _PAGE_RW) == 0)
263 return; 262 return;
264 old = pte_update(mm, addr, ptep, _PAGE_RW, 0); 263
264 pte_update(mm, addr, ptep, _PAGE_RW, 0);
265} 265}
266 266
267static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, 267static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
268 unsigned long addr, pte_t *ptep) 268 unsigned long addr, pte_t *ptep)
269{ 269{
270 unsigned long old;
271
272 if ((pte_val(*ptep) & _PAGE_RW) == 0) 270 if ((pte_val(*ptep) & _PAGE_RW) == 0)
273 return; 271 return;
274 old = pte_update(mm, addr, ptep, _PAGE_RW, 1); 272
273 pte_update(mm, addr, ptep, _PAGE_RW, 1);
275} 274}
276 275
277/* 276/*
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 1255569387b6..e472659d906c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -41,6 +41,10 @@
41#define PPC_INST_RFCI 0x4c000066 41#define PPC_INST_RFCI 0x4c000066
42#define PPC_INST_RFDI 0x4c00004e 42#define PPC_INST_RFDI 0x4c00004e
43#define PPC_INST_RFMCI 0x4c00004c 43#define PPC_INST_RFMCI 0x4c00004c
44#define PPC_INST_MFSPR_DSCR 0x7c1102a6
45#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
46#define PPC_INST_MTSPR_DSCR 0x7c1103a6
47#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
44 48
45#define PPC_INST_STRING 0x7c00042a 49#define PPC_INST_STRING 0x7c00042a
46#define PPC_INST_STRING_MASK 0xfc0007fe 50#define PPC_INST_STRING_MASK 0xfc0007fe
@@ -56,6 +60,17 @@
56#define PPC_INST_TLBSRX_DOT 0x7c0006a5 60#define PPC_INST_TLBSRX_DOT 0x7c0006a5
57#define PPC_INST_XXLOR 0xf0000510 61#define PPC_INST_XXLOR 0xf0000510
58 62
63#define PPC_INST_NAP 0x4c000364
64#define PPC_INST_SLEEP 0x4c0003a4
65
66/* A2 specific instructions */
67#define PPC_INST_ERATWE 0x7c0001a6
68#define PPC_INST_ERATRE 0x7c000166
69#define PPC_INST_ERATILX 0x7c000066
70#define PPC_INST_ERATIVAX 0x7c000666
71#define PPC_INST_ERATSX 0x7c000126
72#define PPC_INST_ERATSX_DOT 0x7c000127
73
59/* macros to insert fields into opcodes */ 74/* macros to insert fields into opcodes */
60#define __PPC_RA(a) (((a) & 0x1f) << 16) 75#define __PPC_RA(a) (((a) & 0x1f) << 16)
61#define __PPC_RB(b) (((b) & 0x1f) << 11) 76#define __PPC_RB(b) (((b) & 0x1f) << 11)
@@ -67,6 +82,8 @@
67#define __PPC_XT(s) __PPC_XS(s) 82#define __PPC_XT(s) __PPC_XS(s)
68#define __PPC_T_TLB(t) (((t) & 0x3) << 21) 83#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
69#define __PPC_WC(w) (((w) & 0x3) << 21) 84#define __PPC_WC(w) (((w) & 0x3) << 21)
85#define __PPC_WS(w) (((w) & 0x1f) << 11)
86
70/* 87/*
71 * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a 88 * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
72 * larx with EH set as an illegal instruction. 89 * larx with EH set as an illegal instruction.
@@ -113,6 +130,21 @@
113#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \ 130#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \
114 __PPC_RA(a) | __PPC_RB(b)) 131 __PPC_RA(a) | __PPC_RB(b))
115 132
133#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \
134 __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
135#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \
136 __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
137#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \
138 __PPC_T_TLB(t) | __PPC_RA(a) | \
139 __PPC_RB(b))
140#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \
141 __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
142#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \
143 __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
144#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
145 __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
146
147
116/* 148/*
117 * Define what the VSX XX1 form instructions will look like, then add 149 * Define what the VSX XX1 form instructions will look like, then add
118 * the 128 bit load store instructions based on that. 150 * the 128 bit load store instructions based on that.
@@ -126,4 +158,7 @@
126#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ 158#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
127 VSX_XX3((t), (a), (b))) 159 VSX_XX3((t), (a), (b)))
128 160
161#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
162#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
163
129#endif /* _ASM_POWERPC_PPC_OPCODE_H */ 164#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 98210067c1cc..1b422381fc16 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -170,6 +170,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
170#define HMT_MEDIUM or 2,2,2 170#define HMT_MEDIUM or 2,2,2
171#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority 171#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
172#define HMT_HIGH or 3,3,3 172#define HMT_HIGH or 3,3,3
173#define HMT_EXTRA_HIGH or 7,7,7 # power7 only
173 174
174#ifdef __KERNEL__ 175#ifdef __KERNEL__
175#ifdef CONFIG_PPC64 176#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index de1967a1ff57..d50c2b6d9bc3 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -238,6 +238,10 @@ struct thread_struct {
238#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 238#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
239 void* kvm_shadow_vcpu; /* KVM internal data */ 239 void* kvm_shadow_vcpu; /* KVM internal data */
240#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ 240#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
241#ifdef CONFIG_PPC64
242 unsigned long dscr;
243 int dscr_inherit;
244#endif
241}; 245};
242 246
243#define ARCH_MIN_TASKALIGN 16 247#define ARCH_MIN_TASKALIGN 16
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7e4abebe76c0..c5cae0dd176c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -99,17 +99,23 @@
99#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */ 99#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
100 100
101#if defined(CONFIG_PPC_BOOK3S_64) 101#if defined(CONFIG_PPC_BOOK3S_64)
102#define MSR_64BIT MSR_SF
103
102/* Server variant */ 104/* Server variant */
103#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV 105#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV
104#define MSR_KERNEL MSR_ | MSR_SF 106#define MSR_KERNEL MSR_ | MSR_64BIT
105#define MSR_USER32 MSR_ | MSR_PR | MSR_EE 107#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
106#define MSR_USER64 MSR_USER32 | MSR_SF 108#define MSR_USER64 MSR_USER32 | MSR_64BIT
107#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx) 109#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx)
108/* Default MSR for kernel mode. */ 110/* Default MSR for kernel mode. */
109#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) 111#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
110#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) 112#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
111#endif 113#endif
112 114
115#ifndef MSR_64BIT
116#define MSR_64BIT 0
117#endif
118
113/* Floating Point Status and Control Register (FPSCR) Fields */ 119/* Floating Point Status and Control Register (FPSCR) Fields */
114#define FPSCR_FX 0x80000000 /* FPU exception summary */ 120#define FPSCR_FX 0x80000000 /* FPU exception summary */
115#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */ 121#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
@@ -182,6 +188,8 @@
182 188
183#define SPRN_CTR 0x009 /* Count Register */ 189#define SPRN_CTR 0x009 /* Count Register */
184#define SPRN_DSCR 0x11 190#define SPRN_DSCR 0x11
191#define SPRN_CFAR 0x1c /* Come From Address Register */
192#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
185#define SPRN_CTRLF 0x088 193#define SPRN_CTRLF 0x088
186#define SPRN_CTRLT 0x098 194#define SPRN_CTRLT 0x098
187#define CTRL_CT 0xc0000000 /* current thread */ 195#define CTRL_CT 0xc0000000 /* current thread */
@@ -210,8 +218,43 @@
210#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ 218#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
211#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ 219#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
212#define SPRN_SPURR 0x134 /* Scaled PURR */ 220#define SPRN_SPURR 0x134 /* Scaled PURR */
221#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */
222#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */
223#define SPRN_HDSISR 0x132
224#define SPRN_HDAR 0x133
225#define SPRN_HDEC 0x136 /* Hypervisor Decrementer */
213#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ 226#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
227#define SPRN_RMOR 0x138 /* Real mode offset register */
228#define SPRN_HRMOR 0x139 /* Real mode offset register */
229#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
230#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
214#define SPRN_LPCR 0x13E /* LPAR Control Register */ 231#define SPRN_LPCR 0x13E /* LPAR Control Register */
232#define LPCR_VPM0 (1ul << (63-0))
233#define LPCR_VPM1 (1ul << (63-1))
234#define LPCR_ISL (1ul << (63-2))
235#define LPCR_DPFD_SH (63-11)
236#define LPCR_VRMA_L (1ul << (63-12))
237#define LPCR_VRMA_LP0 (1ul << (63-15))
238#define LPCR_VRMA_LP1 (1ul << (63-16))
239#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
240#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
241#define LPCR_PECE 0x00007000 /* powersave exit cause enable */
242#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
243#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
244#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
245#define LPCR_MER 0x00000800 /* Mediated External Exception */
246#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
247#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
248#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
249#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
250#define SPRN_LPID 0x13F /* Logical Partition Identifier */
251#define SPRN_HMER 0x150 /* Hardware m? error recovery */
252#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
253#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
254#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
255#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
256#define SPRN_TLBRPNR 0x156 /* P7 TLB control register */
257#define SPRN_TLBLPIDR 0x157 /* P7 TLB control register */
215#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */ 258#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
216#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */ 259#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
217#define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */ 260#define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */
@@ -434,16 +477,23 @@
434#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ 477#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
435#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ 478#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
436#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ 479#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
437#define SRR1_WAKERESET 0x00380000 /* System reset */
438#define SRR1_WAKESYSERR 0x00300000 /* System error */ 480#define SRR1_WAKESYSERR 0x00300000 /* System error */
439#define SRR1_WAKEEE 0x00200000 /* External interrupt */ 481#define SRR1_WAKEEE 0x00200000 /* External interrupt */
440#define SRR1_WAKEMT 0x00280000 /* mtctrl */ 482#define SRR1_WAKEMT 0x00280000 /* mtctrl */
483#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
441#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ 484#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
442#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ 485#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
486#define SRR1_WAKERESET 0x00100000 /* System reset */
487#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
488#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
489 * may not be recoverable */
490#define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */
491#define SRR1_WS_DEEP 0x00010000 /* All resources maintained */
443#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */ 492#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
444#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */ 493#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
445#define SRR1_PROGTRAP 0x00020000 /* Trap */ 494#define SRR1_PROGTRAP 0x00020000 /* Trap */
446#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */ 495#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
496
447#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ 497#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
448#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ 498#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
449 499
@@ -673,12 +723,15 @@
673 * SPRG usage: 723 * SPRG usage:
674 * 724 *
675 * All 64-bit: 725 * All 64-bit:
676 * - SPRG1 stores PACA pointer 726 * - SPRG1 stores PACA pointer except 64-bit server in
727 * HV mode in which case it is HSPRG0
677 * 728 *
678 * 64-bit server: 729 * 64-bit server:
679 * - SPRG0 unused (reserved for HV on Power4) 730 * - SPRG0 unused (reserved for HV on Power4)
680 * - SPRG2 scratch for exception vectors 731 * - SPRG2 scratch for exception vectors
681 * - SPRG3 unused (user visible) 732 * - SPRG3 unused (user visible)
733 * - HSPRG0 stores PACA in HV mode
734 * - HSPRG1 scratch for "HV" exceptions
682 * 735 *
683 * 64-bit embedded 736 * 64-bit embedded
684 * - SPRG0 generic exception scratch 737 * - SPRG0 generic exception scratch
@@ -741,6 +794,41 @@
741 794
742#ifdef CONFIG_PPC_BOOK3S_64 795#ifdef CONFIG_PPC_BOOK3S_64
743#define SPRN_SPRG_SCRATCH0 SPRN_SPRG2 796#define SPRN_SPRG_SCRATCH0 SPRN_SPRG2
797#define SPRN_SPRG_HPACA SPRN_HSPRG0
798#define SPRN_SPRG_HSCRATCH0 SPRN_HSPRG1
799
800#define GET_PACA(rX) \
801 BEGIN_FTR_SECTION_NESTED(66); \
802 mfspr rX,SPRN_SPRG_PACA; \
803 FTR_SECTION_ELSE_NESTED(66); \
804 mfspr rX,SPRN_SPRG_HPACA; \
805 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
806
807#define SET_PACA(rX) \
808 BEGIN_FTR_SECTION_NESTED(66); \
809 mtspr SPRN_SPRG_PACA,rX; \
810 FTR_SECTION_ELSE_NESTED(66); \
811 mtspr SPRN_SPRG_HPACA,rX; \
812 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
813
814#define GET_SCRATCH0(rX) \
815 BEGIN_FTR_SECTION_NESTED(66); \
816 mfspr rX,SPRN_SPRG_SCRATCH0; \
817 FTR_SECTION_ELSE_NESTED(66); \
818 mfspr rX,SPRN_SPRG_HSCRATCH0; \
819 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
820
821#define SET_SCRATCH0(rX) \
822 BEGIN_FTR_SECTION_NESTED(66); \
823 mtspr SPRN_SPRG_SCRATCH0,rX; \
824 FTR_SECTION_ELSE_NESTED(66); \
825 mtspr SPRN_SPRG_HSCRATCH0,rX; \
826 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
827
828#else /* CONFIG_PPC_BOOK3S_64 */
829#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0
830#define SET_SCRATCH0(rX) mtspr SPRN_SPRG_SCRATCH0,rX
831
744#endif 832#endif
745 833
746#ifdef CONFIG_PPC_BOOK3E_64 834#ifdef CONFIG_PPC_BOOK3E_64
@@ -750,6 +838,10 @@
750#define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 838#define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2
751#define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 839#define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6
752#define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0 840#define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0
841
842#define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX
843#define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA
844
753#endif 845#endif
754 846
755#ifdef CONFIG_PPC_BOOK3S_32 847#ifdef CONFIG_PPC_BOOK3S_32
@@ -800,6 +892,8 @@
800#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 892#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
801#endif 893#endif
802 894
895
896
803/* 897/*
804 * An mtfsf instruction with the L bit set. On CPUs that support this a 898 * An mtfsf instruction with the L bit set. On CPUs that support this a
805 * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored. 899 * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored.
@@ -894,6 +988,8 @@
894#define PV_POWER5p 0x003B 988#define PV_POWER5p 0x003B
895#define PV_POWER7 0x003F 989#define PV_POWER7 0x003F
896#define PV_970FX 0x003C 990#define PV_970FX 0x003C
991#define PV_POWER6 0x003E
992#define PV_POWER7 0x003F
897#define PV_630 0x0040 993#define PV_630 0x0040
898#define PV_630p 0x0041 994#define PV_630p 0x0041
899#define PV_970MP 0x0044 995#define PV_970MP 0x0044
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h
new file mode 100644
index 000000000000..3d52a1132f3d
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_a2.h
@@ -0,0 +1,165 @@
1/*
2 * Register definitions specific to the A2 core
3 *
4 * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef __ASM_POWERPC_REG_A2_H__
13#define __ASM_POWERPC_REG_A2_H__
14
15#define SPRN_TENSR 0x1b5
16#define SPRN_TENS 0x1b6 /* Thread ENable Set */
17#define SPRN_TENC 0x1b7 /* Thread ENable Clear */
18
19#define SPRN_A2_CCR0 0x3f0 /* Core Configuration Register 0 */
20#define SPRN_A2_CCR1 0x3f1 /* Core Configuration Register 1 */
21#define SPRN_A2_CCR2 0x3f2 /* Core Configuration Register 2 */
22#define SPRN_MMUCR0 0x3fc /* MMU Control Register 0 */
23#define SPRN_MMUCR1 0x3fd /* MMU Control Register 1 */
24#define SPRN_MMUCR2 0x3fe /* MMU Control Register 2 */
25#define SPRN_MMUCR3 0x3ff /* MMU Control Register 3 */
26
27#define SPRN_IAR 0x372
28
29#define SPRN_IUCR0 0x3f3
30#define IUCR0_ICBI_ACK 0x1000
31
32#define SPRN_XUCR0 0x3f6 /* Execution Unit Config Register 0 */
33
34#define A2_IERAT_SIZE 16
35#define A2_DERAT_SIZE 32
36
37/* A2 MMUCR0 bits */
38#define MMUCR0_ECL 0x80000000 /* Extended Class for TLB fills */
39#define MMUCR0_TID_NZ 0x40000000 /* TID is non-zero */
40#define MMUCR0_TS 0x10000000 /* Translation space for TLB fills */
41#define MMUCR0_TGS 0x20000000 /* Guest space for TLB fills */
42#define MMUCR0_TLBSEL 0x0c000000 /* TLB or ERAT target for TLB fills */
43#define MMUCR0_TLBSEL_U 0x00000000 /* TLBSEL = UTLB */
44#define MMUCR0_TLBSEL_I 0x08000000 /* TLBSEL = I-ERAT */
45#define MMUCR0_TLBSEL_D 0x0c000000 /* TLBSEL = D-ERAT */
46#define MMUCR0_LOCKSRSH 0x02000000 /* Use TLB lock on tlbsx. */
47#define MMUCR0_TID_MASK 0x000000ff /* TID field */
48
49/* A2 MMUCR1 bits */
50#define MMUCR1_IRRE 0x80000000 /* I-ERAT round robin enable */
51#define MMUCR1_DRRE 0x40000000 /* D-ERAT round robin enable */
52#define MMUCR1_REE 0x20000000 /* Reference Exception Enable*/
53#define MMUCR1_CEE 0x10000000 /* Change exception enable */
54#define MMUCR1_CSINV_ALL 0x00000000 /* Inval ERAT on all CS evts */
55#define MMUCR1_CSINV_NISYNC 0x04000000 /* Inval ERAT on all ex isync*/
56#define MMUCR1_CSINV_NEVER 0x0c000000 /* Don't inval ERAT on CS */
57#define MMUCR1_ICTID 0x00080000 /* IERAT class field as TID */
58#define MMUCR1_ITTID 0x00040000 /* IERAT thdid field as TID */
59#define MMUCR1_DCTID 0x00020000 /* DERAT class field as TID */
60#define MMUCR1_DTTID 0x00010000 /* DERAT thdid field as TID */
61#define MMUCR1_DCCD 0x00008000 /* DERAT class ignore */
62#define MMUCR1_TLBWE_BINV 0x00004000 /* back invalidate on tlbwe */
63
64/* A2 MMUCR2 bits */
65#define MMUCR2_PSSEL_SHIFT 4
66
67/* A2 MMUCR3 bits */
68#define MMUCR3_THID 0x0000000f /* Thread ID */
69
70/* *** ERAT TLB bits definitions */
71#define TLB0_EPN_MASK ASM_CONST(0xfffffffffffff000)
72#define TLB0_CLASS_MASK ASM_CONST(0x0000000000000c00)
73#define TLB0_CLASS_00 ASM_CONST(0x0000000000000000)
74#define TLB0_CLASS_01 ASM_CONST(0x0000000000000400)
75#define TLB0_CLASS_10 ASM_CONST(0x0000000000000800)
76#define TLB0_CLASS_11 ASM_CONST(0x0000000000000c00)
77#define TLB0_V ASM_CONST(0x0000000000000200)
78#define TLB0_X ASM_CONST(0x0000000000000100)
79#define TLB0_SIZE_MASK ASM_CONST(0x00000000000000f0)
80#define TLB0_SIZE_4K ASM_CONST(0x0000000000000010)
81#define TLB0_SIZE_64K ASM_CONST(0x0000000000000030)
82#define TLB0_SIZE_1M ASM_CONST(0x0000000000000050)
83#define TLB0_SIZE_16M ASM_CONST(0x0000000000000070)
84#define TLB0_SIZE_1G ASM_CONST(0x00000000000000a0)
85#define TLB0_THDID_MASK ASM_CONST(0x000000000000000f)
86#define TLB0_THDID_0 ASM_CONST(0x0000000000000001)
87#define TLB0_THDID_1 ASM_CONST(0x0000000000000002)
88#define TLB0_THDID_2 ASM_CONST(0x0000000000000004)
89#define TLB0_THDID_3 ASM_CONST(0x0000000000000008)
90#define TLB0_THDID_ALL ASM_CONST(0x000000000000000f)
91
92#define TLB1_RESVATTR ASM_CONST(0x00f0000000000000)
93#define TLB1_U0 ASM_CONST(0x0008000000000000)
94#define TLB1_U1 ASM_CONST(0x0004000000000000)
95#define TLB1_U2 ASM_CONST(0x0002000000000000)
96#define TLB1_U3 ASM_CONST(0x0001000000000000)
97#define TLB1_R ASM_CONST(0x0000800000000000)
98#define TLB1_C ASM_CONST(0x0000400000000000)
99#define TLB1_RPN_MASK ASM_CONST(0x000003fffffff000)
100#define TLB1_W ASM_CONST(0x0000000000000800)
101#define TLB1_I ASM_CONST(0x0000000000000400)
102#define TLB1_M ASM_CONST(0x0000000000000200)
103#define TLB1_G ASM_CONST(0x0000000000000100)
104#define TLB1_E ASM_CONST(0x0000000000000080)
105#define TLB1_VF ASM_CONST(0x0000000000000040)
106#define TLB1_UX ASM_CONST(0x0000000000000020)
107#define TLB1_SX ASM_CONST(0x0000000000000010)
108#define TLB1_UW ASM_CONST(0x0000000000000008)
109#define TLB1_SW ASM_CONST(0x0000000000000004)
110#define TLB1_UR ASM_CONST(0x0000000000000002)
111#define TLB1_SR ASM_CONST(0x0000000000000001)
112
113#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
114#define WSP_UART_PHYS 0xffc000c000
115/* This needs to be careful chosen to hit a !0 congruence class
116 * in the TLB since we bolt it in way 3, which is already occupied
117 * by our linear mapping primary bolted entry in CC 0.
118 */
119#define WSP_UART_VIRT 0xf000000000001000
120#endif
121
122/* A2 erativax attributes definitions */
123#define ERATIVAX_RS_IS_ALL 0x000
124#define ERATIVAX_RS_IS_TID 0x040
125#define ERATIVAX_RS_IS_CLASS 0x080
126#define ERATIVAX_RS_IS_FULLMATCH 0x0c0
127#define ERATIVAX_CLASS_00 0x000
128#define ERATIVAX_CLASS_01 0x010
129#define ERATIVAX_CLASS_10 0x020
130#define ERATIVAX_CLASS_11 0x030
131#define ERATIVAX_PSIZE_4K (TLB_PSIZE_4K >> 1)
132#define ERATIVAX_PSIZE_64K (TLB_PSIZE_64K >> 1)
133#define ERATIVAX_PSIZE_1M (TLB_PSIZE_1M >> 1)
134#define ERATIVAX_PSIZE_16M (TLB_PSIZE_16M >> 1)
135#define ERATIVAX_PSIZE_1G (TLB_PSIZE_1G >> 1)
136
137/* A2 eratilx attributes definitions */
138#define ERATILX_T_ALL 0
139#define ERATILX_T_TID 1
140#define ERATILX_T_TGS 2
141#define ERATILX_T_FULLMATCH 3
142#define ERATILX_T_CLASS0 4
143#define ERATILX_T_CLASS1 5
144#define ERATILX_T_CLASS2 6
145#define ERATILX_T_CLASS3 7
146
147/* XUCR0 bits */
148#define XUCR0_TRACE_UM_T0 0x40000000 /* Thread 0 */
149#define XUCR0_TRACE_UM_T1 0x20000000 /* Thread 1 */
150#define XUCR0_TRACE_UM_T2 0x10000000 /* Thread 2 */
151#define XUCR0_TRACE_UM_T3 0x08000000 /* Thread 3 */
152
153/* A2 CCR0 register */
154#define A2_CCR0_PME_DISABLED 0x00000000
155#define A2_CCR0_PME_SLEEP 0x40000000
156#define A2_CCR0_PME_RVW 0x80000000
157#define A2_CCR0_PME_DISABLED2 0xc0000000
158
159/* A2 CCR2 register */
160#define A2_CCR2_ERAT_ONLY_MODE 0x00000001
161#define A2_CCR2_ENABLE_ICSWX 0x00000002
162#define A2_CCR2_ENABLE_PC 0x20000000
163#define A2_CCR2_ENABLE_TRACE 0x40000000
164
165#endif /* __ASM_POWERPC_REG_A2_H__ */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index b316794aa2b5..0f0ad9fa01c1 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -27,10 +27,12 @@
27#define MSR_CM (1<<31) /* Computation Mode (0=32-bit, 1=64-bit) */ 27#define MSR_CM (1<<31) /* Computation Mode (0=32-bit, 1=64-bit) */
28 28
29#if defined(CONFIG_PPC_BOOK3E_64) 29#if defined(CONFIG_PPC_BOOK3E_64)
30#define MSR_64BIT MSR_CM
31
30#define MSR_ MSR_ME | MSR_CE 32#define MSR_ MSR_ME | MSR_CE
31#define MSR_KERNEL MSR_ | MSR_CM 33#define MSR_KERNEL MSR_ | MSR_64BIT
32#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE 34#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE
33#define MSR_USER64 MSR_USER32 | MSR_CM | MSR_DE 35#define MSR_USER64 MSR_USER32 | MSR_64BIT
34#elif defined (CONFIG_40x) 36#elif defined (CONFIG_40x)
35#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) 37#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
36#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) 38#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
@@ -81,6 +83,10 @@
81#define SPRN_IVOR13 0x19D /* Interrupt Vector Offset Register 13 */ 83#define SPRN_IVOR13 0x19D /* Interrupt Vector Offset Register 13 */
82#define SPRN_IVOR14 0x19E /* Interrupt Vector Offset Register 14 */ 84#define SPRN_IVOR14 0x19E /* Interrupt Vector Offset Register 14 */
83#define SPRN_IVOR15 0x19F /* Interrupt Vector Offset Register 15 */ 85#define SPRN_IVOR15 0x19F /* Interrupt Vector Offset Register 15 */
86#define SPRN_IVOR38 0x1B0 /* Interrupt Vector Offset Register 38 */
87#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
88#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
89#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
84#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ 90#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
85#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ 91#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
86#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ 92#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 9a1193e30f26..58625d1e7802 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -158,7 +158,50 @@ struct rtas_error_log {
158 unsigned long target:4; /* Target of failed operation */ 158 unsigned long target:4; /* Target of failed operation */
159 unsigned long type:8; /* General event or error*/ 159 unsigned long type:8; /* General event or error*/
160 unsigned long extended_log_length:32; /* length in bytes */ 160 unsigned long extended_log_length:32; /* length in bytes */
161 unsigned char buffer[1]; 161 unsigned char buffer[1]; /* Start of extended log */
162 /* Variable length. */
163};
164
165#define RTAS_V6EXT_LOG_FORMAT_EVENT_LOG 14
166
167#define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8))
168
169/* RTAS general extended event log, Version 6. The extended log starts
170 * from "buffer" field of struct rtas_error_log defined above.
171 */
172struct rtas_ext_event_log_v6 {
173 /* Byte 0 */
174 uint32_t log_valid:1; /* 1:Log valid */
175 uint32_t unrecoverable_error:1; /* 1:Unrecoverable error */
176 uint32_t recoverable_error:1; /* 1:recoverable (correctable */
177 /* or successfully retried) */
178 uint32_t degraded_operation:1; /* 1:Unrecoverable err, bypassed*/
179 /* - degraded operation (e.g. */
180 /* CPU or mem taken off-line) */
181 uint32_t predictive_error:1;
182 uint32_t new_log:1; /* 1:"New" log (Always 1 for */
183 /* data returned from RTAS */
184 uint32_t big_endian:1; /* 1: Big endian */
185 uint32_t :1; /* reserved */
186 /* Byte 1 */
187 uint32_t :8; /* reserved */
188 /* Byte 2 */
189 uint32_t powerpc_format:1; /* Set to 1 (indicating log is */
190 /* in PowerPC format */
191 uint32_t :3; /* reserved */
192 uint32_t log_format:4; /* Log format indicator. Define */
193 /* format used for byte 12-2047 */
194 /* Byte 3 */
195 uint32_t :8; /* reserved */
196 /* Byte 4-11 */
197 uint8_t reserved[8]; /* reserved */
198 /* Byte 12-15 */
199 uint32_t company_id; /* Company ID of the company */
200 /* that defines the format for */
201 /* the vendor specific log type */
202 /* Byte 16-end of log */
203 uint8_t vendor_log[1]; /* Start of vendor specific log */
204 /* Variable length. */
162}; 205};
163 206
164/* 207/*
diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h
new file mode 100644
index 000000000000..0cabfd7bc2d1
--- /dev/null
+++ b/arch/powerpc/include/asm/scom.h
@@ -0,0 +1,156 @@
1/*
2 * Copyright 2010 Benjamin Herrenschmidt, IBM Corp
3 * <benh@kernel.crashing.org>
4 * and David Gibson, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
14 * the GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef _ASM_POWERPC_SCOM_H
22#define _ASM_POWERPC_SCOM_H
23
24#ifdef __KERNEL__
25#ifndef __ASSEMBLY__
26#ifdef CONFIG_PPC_SCOM
27
28/*
29 * The SCOM bus is a sideband bus used for accessing various internal
30 * registers of the processor or the chipset. The implementation details
31 * differ between processors and platforms, and the access method as
32 * well.
33 *
34 * This API allows to "map" ranges of SCOM register numbers associated
35 * with a given SCOM controller. The later must be represented by a
36 * device node, though some implementations might support NULL if there
37 * is no possible ambiguity
38 *
39 * Then, scom_read/scom_write can be used to accesses registers inside
40 * that range. The argument passed is a register number relative to
41 * the beginning of the range mapped.
42 */
43
44typedef void *scom_map_t;
45
46/* Value for an invalid SCOM map */
47#define SCOM_MAP_INVALID (NULL)
48
49/* The scom_controller data structure is what the platform passes
50 * to the core code in scom_init, it provides the actual implementation
51 * of all the SCOM functions
52 */
53struct scom_controller {
54 scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count);
55 void (*unmap)(scom_map_t map);
56
57 u64 (*read)(scom_map_t map, u32 reg);
58 void (*write)(scom_map_t map, u32 reg, u64 value);
59};
60
61extern const struct scom_controller *scom_controller;
62
63/**
64 * scom_init - Initialize the SCOM backend, called by the platform
65 * @controller: The platform SCOM controller
66 */
67static inline void scom_init(const struct scom_controller *controller)
68{
69 scom_controller = controller;
70}
71
72/**
73 * scom_map_ok - Test is a SCOM mapping is successful
74 * @map: The result of scom_map to test
75 */
76static inline int scom_map_ok(scom_map_t map)
77{
78 return map != SCOM_MAP_INVALID;
79}
80
81/**
82 * scom_map - Map a block of SCOM registers
83 * @ctrl_dev: Device node of the SCOM controller
84 * some implementations allow NULL here
85 * @reg: first SCOM register to map
86 * @count: Number of SCOM registers to map
87 */
88
89static inline scom_map_t scom_map(struct device_node *ctrl_dev,
90 u64 reg, u64 count)
91{
92 return scom_controller->map(ctrl_dev, reg, count);
93}
94
95/**
96 * scom_find_parent - Find the SCOM controller for a device
97 * @dev: OF node of the device
98 *
99 * This is not meant for general usage, but in combination with
100 * scom_map() allows to map registers not represented by the
101 * device own scom-reg property. Useful for applying HW workarounds
102 * on things not properly represented in the device-tree for example.
103 */
104struct device_node *scom_find_parent(struct device_node *dev);
105
106
107/**
108 * scom_map_device - Map a device's block of SCOM registers
109 * @dev: OF node of the device
110 * @index: Register bank index (index in "scom-reg" property)
111 *
112 * This function will use the device-tree binding for SCOM which
113 * is to follow "scom-parent" properties until it finds a node with
114 * a "scom-controller" property to find the controller. It will then
115 * use the "scom-reg" property which is made of reg/count pairs,
116 * each of them having a size defined by the controller's #scom-cells
117 * property
118 */
119extern scom_map_t scom_map_device(struct device_node *dev, int index);
120
121
122/**
123 * scom_unmap - Unmap a block of SCOM registers
124 * @map: Result of scom_map is to be unmapped
125 */
126static inline void scom_unmap(scom_map_t map)
127{
128 if (scom_map_ok(map))
129 scom_controller->unmap(map);
130}
131
132/**
133 * scom_read - Read a SCOM register
134 * @map: Result of scom_map
135 * @reg: Register index within that map
136 */
137static inline u64 scom_read(scom_map_t map, u32 reg)
138{
139 return scom_controller->read(map, reg);
140}
141
142/**
143 * scom_write - Write to a SCOM register
144 * @map: Result of scom_map
145 * @reg: Register index within that map
146 * @value: Value to write
147 */
148static inline void scom_write(scom_map_t map, u32 reg, u64 value)
149{
150 scom_controller->write(map, reg, value);
151}
152
153#endif /* CONFIG_PPC_SCOM */
154#endif /* __ASSEMBLY__ */
155#endif /* __KERNEL__ */
156#endif /* _ASM_POWERPC_SCOM_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index a902a0d3ae0d..880b8c1e6e53 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -20,6 +20,7 @@
20#include <linux/threads.h> 20#include <linux/threads.h>
21#include <linux/cpumask.h> 21#include <linux/cpumask.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/irqreturn.h>
23 24
24#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
25 26
@@ -29,14 +30,32 @@
29#include <asm/percpu.h> 30#include <asm/percpu.h>
30 31
31extern int boot_cpuid; 32extern int boot_cpuid;
33extern int boot_cpu_count;
32 34
33extern void cpu_die(void); 35extern void cpu_die(void);
34 36
35#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
36 38
37extern void smp_send_debugger_break(int cpu); 39struct smp_ops_t {
38extern void smp_message_recv(int); 40 void (*message_pass)(int cpu, int msg);
41#ifdef CONFIG_PPC_SMP_MUXED_IPI
42 void (*cause_ipi)(int cpu, unsigned long data);
43#endif
44 int (*probe)(void);
45 int (*kick_cpu)(int nr);
46 void (*setup_cpu)(int nr);
47 void (*bringup_done)(void);
48 void (*take_timebase)(void);
49 void (*give_timebase)(void);
50 int (*cpu_disable)(void);
51 void (*cpu_die)(unsigned int nr);
52 int (*cpu_bootable)(unsigned int nr);
53};
54
55extern void smp_send_debugger_break(void);
39extern void start_secondary_resume(void); 56extern void start_secondary_resume(void);
57extern void __devinit smp_generic_give_timebase(void);
58extern void __devinit smp_generic_take_timebase(void);
40 59
41DECLARE_PER_CPU(unsigned int, cpu_pvr); 60DECLARE_PER_CPU(unsigned int, cpu_pvr);
42 61
@@ -93,13 +112,16 @@ extern int cpu_to_core_id(int cpu);
93#define PPC_MSG_CALL_FUNC_SINGLE 2 112#define PPC_MSG_CALL_FUNC_SINGLE 2
94#define PPC_MSG_DEBUGGER_BREAK 3 113#define PPC_MSG_DEBUGGER_BREAK 3
95 114
96/* 115/* for irq controllers that have dedicated ipis per message (4) */
97 * irq controllers that have dedicated ipis per message and don't
98 * need additional code in the action handler may use this
99 */
100extern int smp_request_message_ipi(int virq, int message); 116extern int smp_request_message_ipi(int virq, int message);
101extern const char *smp_ipi_name[]; 117extern const char *smp_ipi_name[];
102 118
119/* for irq controllers with only a single ipi */
120extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
121extern void smp_muxed_ipi_message_pass(int cpu, int msg);
122extern void smp_muxed_ipi_resend(void);
123extern irqreturn_t smp_ipi_demux(void);
124
103void smp_init_iSeries(void); 125void smp_init_iSeries(void);
104void smp_init_pSeries(void); 126void smp_init_pSeries(void);
105void smp_init_cell(void); 127void smp_init_cell(void);
@@ -149,7 +171,7 @@ extern int smt_enabled_at_boot;
149 171
150extern int smp_mpic_probe(void); 172extern int smp_mpic_probe(void);
151extern void smp_mpic_setup_cpu(int cpu); 173extern void smp_mpic_setup_cpu(int cpu);
152extern void smp_generic_kick_cpu(int nr); 174extern int smp_generic_kick_cpu(int nr);
153 175
154extern void smp_generic_give_timebase(void); 176extern void smp_generic_give_timebase(void);
155extern void smp_generic_take_timebase(void); 177extern void smp_generic_take_timebase(void);
@@ -169,6 +191,8 @@ extern unsigned long __secondary_hold_spinloop;
169extern unsigned long __secondary_hold_acknowledge; 191extern unsigned long __secondary_hold_acknowledge;
170extern char __secondary_hold; 192extern char __secondary_hold;
171 193
194extern irqreturn_t debug_ipi_action(int irq, void *data);
195
172#endif /* __ASSEMBLY__ */ 196#endif /* __ASSEMBLY__ */
173 197
174#endif /* __KERNEL__ */ 198#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index 5e474ddd2273..2dc595dda03b 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -219,8 +219,6 @@ extern int mem_init_done; /* set on boot once kmalloc can be called */
219extern int init_bootmem_done; /* set once bootmem is available */ 219extern int init_bootmem_done; /* set once bootmem is available */
220extern phys_addr_t memory_limit; 220extern phys_addr_t memory_limit;
221extern unsigned long klimit; 221extern unsigned long klimit;
222
223extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
224extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); 222extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
225 223
226extern int powersave_nap; /* set if nap mode can be used in idle loop */ 224extern int powersave_nap; /* set if nap mode can be used in idle loop */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index d50a380b2b6f..81143fcbd113 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -79,6 +79,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
79 79
80#elif defined(CONFIG_PPC_STD_MMU_64) 80#elif defined(CONFIG_PPC_STD_MMU_64)
81 81
82#define MMU_NO_CONTEXT 0
83
82/* 84/*
83 * TLB flushing for 64-bit hash-MMU CPUs 85 * TLB flushing for 64-bit hash-MMU CPUs
84 */ 86 */
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 11ae699135ba..58580e94a2bb 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -52,6 +52,7 @@ extern void __init udbg_init_44x_as1(void);
52extern void __init udbg_init_40x_realmode(void); 52extern void __init udbg_init_40x_realmode(void);
53extern void __init udbg_init_cpm(void); 53extern void __init udbg_init_cpm(void);
54extern void __init udbg_init_usbgecko(void); 54extern void __init udbg_init_usbgecko(void);
55extern void __init udbg_init_wsp(void);
55 56
56#endif /* __KERNEL__ */ 57#endif /* __KERNEL__ */
57#endif /* _ASM_POWERPC_UDBG_H */ 58#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/arch/powerpc/include/asm/wsp.h b/arch/powerpc/include/asm/wsp.h
new file mode 100644
index 000000000000..c7dc83088a33
--- /dev/null
+++ b/arch/powerpc/include/asm/wsp.h
@@ -0,0 +1,14 @@
1/*
2 * Copyright 2011 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef __ASM_POWERPC_WSP_H
10#define __ASM_POWERPC_WSP_H
11
12extern int wsp_get_chip_id(struct device_node *dn);
13
14#endif /* __ASM_POWERPC_WSP_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
new file mode 100644
index 000000000000..b183a4062011
--- /dev/null
+++ b/arch/powerpc/include/asm/xics.h
@@ -0,0 +1,142 @@
1/*
2 * Common definitions accross all variants of ICP and ICS interrupt
3 * controllers.
4 */
5
6#ifndef _XICS_H
7#define _XICS_H
8
9#include <linux/interrupt.h>
10
11#define XICS_IPI 2
12#define XICS_IRQ_SPURIOUS 0
13
14/* Want a priority other than 0. Various HW issues require this. */
15#define DEFAULT_PRIORITY 5
16
17/*
18 * Mark IPIs as higher priority so we can take them inside interrupts that
19 * arent marked IRQF_DISABLED
20 */
21#define IPI_PRIORITY 4
22
23/* The least favored priority */
24#define LOWEST_PRIORITY 0xFF
25
26/* The number of priorities defined above */
27#define MAX_NUM_PRIORITIES 3
28
29/* Native ICP */
30extern int icp_native_init(void);
31
32/* PAPR ICP */
33extern int icp_hv_init(void);
34
35/* ICP ops */
36struct icp_ops {
37 unsigned int (*get_irq)(void);
38 void (*eoi)(struct irq_data *d);
39 void (*set_priority)(unsigned char prio);
40 void (*teardown_cpu)(void);
41 void (*flush_ipi)(void);
42#ifdef CONFIG_SMP
43 void (*cause_ipi)(int cpu, unsigned long data);
44 irq_handler_t ipi_action;
45#endif
46};
47
48extern const struct icp_ops *icp_ops;
49
50/* Native ICS */
51extern int ics_native_init(void);
52
53/* RTAS ICS */
54extern int ics_rtas_init(void);
55
56/* ICS instance, hooked up to chip_data of an irq */
57struct ics {
58 struct list_head link;
59 int (*map)(struct ics *ics, unsigned int virq);
60 void (*mask_unknown)(struct ics *ics, unsigned long vec);
61 long (*get_server)(struct ics *ics, unsigned long vec);
62 int (*host_match)(struct ics *ics, struct device_node *node);
63 char data[];
64};
65
66/* Commons */
67extern unsigned int xics_default_server;
68extern unsigned int xics_default_distrib_server;
69extern unsigned int xics_interrupt_server_size;
70extern struct irq_host *xics_host;
71
72struct xics_cppr {
73 unsigned char stack[MAX_NUM_PRIORITIES];
74 int index;
75};
76
77DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
78
79static inline void xics_push_cppr(unsigned int vec)
80{
81 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
82
83 if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
84 return;
85
86 if (vec == XICS_IPI)
87 os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
88 else
89 os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
90}
91
92static inline unsigned char xics_pop_cppr(void)
93{
94 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
95
96 if (WARN_ON(os_cppr->index < 1))
97 return LOWEST_PRIORITY;
98
99 return os_cppr->stack[--os_cppr->index];
100}
101
102static inline void xics_set_base_cppr(unsigned char cppr)
103{
104 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
105
106 /* we only really want to set the priority when there's
107 * just one cppr value on the stack
108 */
109 WARN_ON(os_cppr->index != 0);
110
111 os_cppr->stack[0] = cppr;
112}
113
114static inline unsigned char xics_cppr_top(void)
115{
116 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
117
118 return os_cppr->stack[os_cppr->index];
119}
120
121DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message);
122
123extern void xics_init(void);
124extern void xics_setup_cpu(void);
125extern void xics_update_irq_servers(void);
126extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join);
127extern void xics_mask_unknown_vec(unsigned int vec);
128extern irqreturn_t xics_ipi_dispatch(int cpu);
129extern int xics_smp_probe(void);
130extern void xics_register_ics(struct ics *ics);
131extern void xics_teardown_cpu(void);
132extern void xics_kexec_teardown_cpu(int secondary);
133extern void xics_migrate_irqs_away(void);
134#ifdef CONFIG_SMP
135extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
136 unsigned int strict_check);
137#else
138#define xics_get_irq_server(virq, cpumask, strict_check) (xics_default_server)
139#endif
140
141
142#endif /* _XICS_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 3bb2a3e6a337..9aab36312572 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -38,11 +38,14 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
38 paca.o nvram_64.o firmware.o 38 paca.o nvram_64.o firmware.o
39obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 39obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
40obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o 40obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
41obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power7.o
41obj64-$(CONFIG_RELOCATABLE) += reloc_64.o 42obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
42obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o 43obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
44obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
43obj-$(CONFIG_PPC64) += vdso64/ 45obj-$(CONFIG_PPC64) += vdso64/
44obj-$(CONFIG_ALTIVEC) += vecemu.o 46obj-$(CONFIG_ALTIVEC) += vecemu.o
45obj-$(CONFIG_PPC_970_NAP) += idle_power4.o 47obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
48obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o
46obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o 49obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o
47obj-$(CONFIG_PPC_CLOCK) += clock.o 50obj-$(CONFIG_PPC_CLOCK) += clock.o
48procfs-y := proc_powerpc.o 51procfs-y := proc_powerpc.o
@@ -75,7 +78,6 @@ obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o
75obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o 78obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o
76 79
77extra-y := head_$(CONFIG_WORD_SIZE).o 80extra-y := head_$(CONFIG_WORD_SIZE).o
78extra-$(CONFIG_PPC_BOOK3E_32) := head_new_booke.o
79extra-$(CONFIG_40x) := head_40x.o 81extra-$(CONFIG_40x) := head_40x.o
80extra-$(CONFIG_44x) := head_44x.o 82extra-$(CONFIG_44x) := head_44x.o
81extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o 83extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
@@ -103,6 +105,8 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
103obj-$(CONFIG_AUDIT) += audit.o 105obj-$(CONFIG_AUDIT) += audit.o
104obj64-$(CONFIG_AUDIT) += compat_audit.o 106obj64-$(CONFIG_AUDIT) += compat_audit.o
105 107
108obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o
109
106obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 110obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
107obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 111obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
108obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o 112obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 23e6a93145ab..6887661ac072 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -74,6 +74,7 @@ int main(void)
74 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); 74 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
75 DEFINE(SIGSEGV, SIGSEGV); 75 DEFINE(SIGSEGV, SIGSEGV);
76 DEFINE(NMI_MASK, NMI_MASK); 76 DEFINE(NMI_MASK, NMI_MASK);
77 DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
77#else 78#else
78 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); 79 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
79#endif /* CONFIG_PPC64 */ 80#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S
new file mode 100644
index 000000000000..7f818feaa7a5
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_a2.S
@@ -0,0 +1,114 @@
1/*
2 * A2 specific assembly support code
3 *
4 * Copyright 2009 Ben Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <asm/asm-offsets.h>
13#include <asm/ppc_asm.h>
14#include <asm/ppc-opcode.h>
15#include <asm/processor.h>
16#include <asm/reg_a2.h>
17#include <asm/reg.h>
18#include <asm/thread_info.h>
19
20/*
21 * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity.
22 * This also prevents external LPID accesses but that isn't a problem when not a
23 * guest. Under PV, this setting will be ignored and MMUCR will return the right
24 * number of PID bits we can use.
25 */
26#define MMUCR1_EXTEND_PID \
27 (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \
28 MMUCR1_DTTID | MMUCR1_DCCD)
29
30/*
31 * Use extended PIDs if enabled.
32 * Don't clear the ERATs on context sync events and enable I & D LRU.
33 * Enable ERAT back invalidate when tlbwe overwrites an entry.
34 */
35#define INITIAL_MMUCR1 \
36 (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \
37 MMUCR1_DRRE | MMUCR1_TLBWE_BINV)
38
39_GLOBAL(__setup_cpu_a2)
40 /* Some of these are actually thread local and some are
41 * core local but doing it always won't hurt
42 */
43
44#ifdef CONFIG_PPC_WSP_COPRO
45 /* Make sure ACOP starts out as zero */
46 li r3,0
47 mtspr SPRN_ACOP,r3
48
49 /* Enable icswx instruction */
50 mfspr r3,SPRN_A2_CCR2
51 ori r3,r3,A2_CCR2_ENABLE_ICSWX
52 mtspr SPRN_A2_CCR2,r3
53
54 /* Unmask all CTs in HACOP */
55 li r3,-1
56 mtspr SPRN_HACOP,r3
57#endif /* CONFIG_PPC_WSP_COPRO */
58
59 /* Enable doorbell */
60 mfspr r3,SPRN_A2_CCR2
61 oris r3,r3,A2_CCR2_ENABLE_PC@h
62 mtspr SPRN_A2_CCR2,r3
63 isync
64
65 /* Setup CCR0 to disable power saving for now as it's busted
66 * in the current implementations. Setup CCR1 to wake on
67 * interrupts normally (we write the default value but who
68 * knows what FW may have clobbered...)
69 */
70 li r3,0
71 mtspr SPRN_A2_CCR0, r3
72 LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f)
73 mtspr SPRN_A2_CCR1, r3
74
75 /* Initialise MMUCR1 */
76 lis r3,INITIAL_MMUCR1@h
77 ori r3,r3,INITIAL_MMUCR1@l
78 mtspr SPRN_MMUCR1,r3
79
80 /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
81 LOAD_REG_IMMEDIATE(r3, 0x000a7531)
82 mtspr SPRN_MMUCR2,r3
83
84 /* Set MMUCR3 to write all thids bit to the TLB */
85 LOAD_REG_IMMEDIATE(r3, 0x0000000f)
86 mtspr SPRN_MMUCR3,r3
87
88 /* Don't do ERAT stuff if running guest mode */
89 mfmsr r3
90 andis. r0,r3,MSR_GS@h
91 bne 1f
92
93 /* Now set the I-ERAT watermark to 15 */
94 lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
95 mtspr SPRN_MMUCR0, r4
96 li r4,A2_IERAT_SIZE-1
97 PPC_ERATWE(r4,r4,3)
98
99 /* Now set the D-ERAT watermark to 31 */
100 lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
101 mtspr SPRN_MMUCR0, r4
102 li r4,A2_DERAT_SIZE-1
103 PPC_ERATWE(r4,r4,3)
104
105 /* And invalidate the beast just in case. That won't get rid of
106 * a bolted entry though it will be in LRU and so will go away eventually
107 * but let's not bother for now
108 */
109 PPC_ERATILX(0,0,0)
1101:
111 blr
112
113_GLOBAL(__restore_cpu_a2)
114 b __setup_cpu_a2
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 913611105c1f..8053db02b85e 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -88,6 +88,9 @@ _GLOBAL(__setup_cpu_e5500)
88 bl __e500_dcache_setup 88 bl __e500_dcache_setup
89#ifdef CONFIG_PPC_BOOK3E_64 89#ifdef CONFIG_PPC_BOOK3E_64
90 bl .__setup_base_ivors 90 bl .__setup_base_ivors
91 bl .setup_perfmon_ivor
92 bl .setup_doorbell_ivors
93 bl .setup_ehv_ivors
91#else 94#else
92 bl __setup_e500mc_ivors 95 bl __setup_e500mc_ivors
93#endif 96#endif
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S
new file mode 100644
index 000000000000..4f9a93fcfe07
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_power7.S
@@ -0,0 +1,91 @@
1/*
2 * This file contains low level CPU setup functions.
3 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 */
11
12#include <asm/processor.h>
13#include <asm/page.h>
14#include <asm/cputable.h>
15#include <asm/ppc_asm.h>
16#include <asm/asm-offsets.h>
17#include <asm/cache.h>
18
19/* Entry: r3 = crap, r4 = ptr to cputable entry
20 *
21 * Note that we can be called twice for pseudo-PVRs
22 */
23_GLOBAL(__setup_cpu_power7)
24 mflr r11
25 bl __init_hvmode_206
26 mtlr r11
27 beqlr
28 li r0,0
29 mtspr SPRN_LPID,r0
30 bl __init_LPCR
31 bl __init_TLB
32 mtlr r11
33 blr
34
35_GLOBAL(__restore_cpu_power7)
36 mflr r11
37 mfmsr r3
38 rldicl. r0,r3,4,63
39 beqlr
40 li r0,0
41 mtspr SPRN_LPID,r0
42 bl __init_LPCR
43 bl __init_TLB
44 mtlr r11
45 blr
46
47__init_hvmode_206:
48 /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */
49 mfmsr r3
50 rldicl. r0,r3,4,63
51 bnelr
52 ld r5,CPU_SPEC_FEATURES(r4)
53 LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206)
54 xor r5,r5,r6
55 std r5,CPU_SPEC_FEATURES(r4)
56 blr
57
58__init_LPCR:
59 /* Setup a sane LPCR:
60 *
61 * LPES = 0b01 (HSRR0/1 used for 0x500)
62 * PECE = 0b111
63 * DPFD = 4
64 *
65 * Other bits untouched for now
66 */
67 mfspr r3,SPRN_LPCR
68 ori r3,r3,(LPCR_LPES0|LPCR_LPES1)
69 xori r3,r3, LPCR_LPES0
70 ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
71 li r5,7
72 sldi r5,r5,LPCR_DPFD_SH
73 andc r3,r3,r5
74 li r5,4
75 sldi r5,r5,LPCR_DPFD_SH
76 or r3,r3,r5
77 mtspr SPRN_LPCR,r3
78 isync
79 blr
80
81__init_TLB:
82 /* Clear the TLB */
83 li r6,128
84 mtctr r6
85 li r7,0xc00 /* IS field = 0b11 */
86 ptesync
872: tlbiel r7
88 addi r7,r7,0x1000
89 bdnz 2b
90 ptesync
911: blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index b9602ee06deb..34d2722b9451 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -62,10 +62,12 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
62extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); 62extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
63extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); 63extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
64extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); 64extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec);
65extern void __setup_cpu_a2(unsigned long offset, struct cpu_spec* spec);
65extern void __restore_cpu_pa6t(void); 66extern void __restore_cpu_pa6t(void);
66extern void __restore_cpu_ppc970(void); 67extern void __restore_cpu_ppc970(void);
67extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); 68extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec);
68extern void __restore_cpu_power7(void); 69extern void __restore_cpu_power7(void);
70extern void __restore_cpu_a2(void);
69#endif /* CONFIG_PPC64 */ 71#endif /* CONFIG_PPC64 */
70#if defined(CONFIG_E500) 72#if defined(CONFIG_E500)
71extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); 73extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -199,7 +201,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
199 .cpu_name = "POWER4 (gp)", 201 .cpu_name = "POWER4 (gp)",
200 .cpu_features = CPU_FTRS_POWER4, 202 .cpu_features = CPU_FTRS_POWER4,
201 .cpu_user_features = COMMON_USER_POWER4, 203 .cpu_user_features = COMMON_USER_POWER4,
202 .mmu_features = MMU_FTR_HPTE_TABLE, 204 .mmu_features = MMU_FTRS_POWER4,
203 .icache_bsize = 128, 205 .icache_bsize = 128,
204 .dcache_bsize = 128, 206 .dcache_bsize = 128,
205 .num_pmcs = 8, 207 .num_pmcs = 8,
@@ -214,7 +216,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
214 .cpu_name = "POWER4+ (gq)", 216 .cpu_name = "POWER4+ (gq)",
215 .cpu_features = CPU_FTRS_POWER4, 217 .cpu_features = CPU_FTRS_POWER4,
216 .cpu_user_features = COMMON_USER_POWER4, 218 .cpu_user_features = COMMON_USER_POWER4,
217 .mmu_features = MMU_FTR_HPTE_TABLE, 219 .mmu_features = MMU_FTRS_POWER4,
218 .icache_bsize = 128, 220 .icache_bsize = 128,
219 .dcache_bsize = 128, 221 .dcache_bsize = 128,
220 .num_pmcs = 8, 222 .num_pmcs = 8,
@@ -230,7 +232,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
230 .cpu_features = CPU_FTRS_PPC970, 232 .cpu_features = CPU_FTRS_PPC970,
231 .cpu_user_features = COMMON_USER_POWER4 | 233 .cpu_user_features = COMMON_USER_POWER4 |
232 PPC_FEATURE_HAS_ALTIVEC_COMP, 234 PPC_FEATURE_HAS_ALTIVEC_COMP,
233 .mmu_features = MMU_FTR_HPTE_TABLE, 235 .mmu_features = MMU_FTRS_PPC970,
234 .icache_bsize = 128, 236 .icache_bsize = 128,
235 .dcache_bsize = 128, 237 .dcache_bsize = 128,
236 .num_pmcs = 8, 238 .num_pmcs = 8,
@@ -248,7 +250,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
248 .cpu_features = CPU_FTRS_PPC970, 250 .cpu_features = CPU_FTRS_PPC970,
249 .cpu_user_features = COMMON_USER_POWER4 | 251 .cpu_user_features = COMMON_USER_POWER4 |
250 PPC_FEATURE_HAS_ALTIVEC_COMP, 252 PPC_FEATURE_HAS_ALTIVEC_COMP,
251 .mmu_features = MMU_FTR_HPTE_TABLE, 253 .mmu_features = MMU_FTRS_PPC970,
252 .icache_bsize = 128, 254 .icache_bsize = 128,
253 .dcache_bsize = 128, 255 .dcache_bsize = 128,
254 .num_pmcs = 8, 256 .num_pmcs = 8,
@@ -284,7 +286,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
284 .cpu_features = CPU_FTRS_PPC970, 286 .cpu_features = CPU_FTRS_PPC970,
285 .cpu_user_features = COMMON_USER_POWER4 | 287 .cpu_user_features = COMMON_USER_POWER4 |
286 PPC_FEATURE_HAS_ALTIVEC_COMP, 288 PPC_FEATURE_HAS_ALTIVEC_COMP,
287 .mmu_features = MMU_FTR_HPTE_TABLE, 289 .mmu_features = MMU_FTRS_PPC970,
288 .icache_bsize = 128, 290 .icache_bsize = 128,
289 .dcache_bsize = 128, 291 .dcache_bsize = 128,
290 .num_pmcs = 8, 292 .num_pmcs = 8,
@@ -302,7 +304,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
302 .cpu_features = CPU_FTRS_PPC970, 304 .cpu_features = CPU_FTRS_PPC970,
303 .cpu_user_features = COMMON_USER_POWER4 | 305 .cpu_user_features = COMMON_USER_POWER4 |
304 PPC_FEATURE_HAS_ALTIVEC_COMP, 306 PPC_FEATURE_HAS_ALTIVEC_COMP,
305 .mmu_features = MMU_FTR_HPTE_TABLE, 307 .mmu_features = MMU_FTRS_PPC970,
306 .icache_bsize = 128, 308 .icache_bsize = 128,
307 .dcache_bsize = 128, 309 .dcache_bsize = 128,
308 .num_pmcs = 8, 310 .num_pmcs = 8,
@@ -318,7 +320,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
318 .cpu_name = "POWER5 (gr)", 320 .cpu_name = "POWER5 (gr)",
319 .cpu_features = CPU_FTRS_POWER5, 321 .cpu_features = CPU_FTRS_POWER5,
320 .cpu_user_features = COMMON_USER_POWER5, 322 .cpu_user_features = COMMON_USER_POWER5,
321 .mmu_features = MMU_FTR_HPTE_TABLE, 323 .mmu_features = MMU_FTRS_POWER5,
322 .icache_bsize = 128, 324 .icache_bsize = 128,
323 .dcache_bsize = 128, 325 .dcache_bsize = 128,
324 .num_pmcs = 6, 326 .num_pmcs = 6,
@@ -338,7 +340,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
338 .cpu_name = "POWER5+ (gs)", 340 .cpu_name = "POWER5+ (gs)",
339 .cpu_features = CPU_FTRS_POWER5, 341 .cpu_features = CPU_FTRS_POWER5,
340 .cpu_user_features = COMMON_USER_POWER5_PLUS, 342 .cpu_user_features = COMMON_USER_POWER5_PLUS,
341 .mmu_features = MMU_FTR_HPTE_TABLE, 343 .mmu_features = MMU_FTRS_POWER5,
342 .icache_bsize = 128, 344 .icache_bsize = 128,
343 .dcache_bsize = 128, 345 .dcache_bsize = 128,
344 .num_pmcs = 6, 346 .num_pmcs = 6,
@@ -354,7 +356,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
354 .cpu_name = "POWER5+ (gs)", 356 .cpu_name = "POWER5+ (gs)",
355 .cpu_features = CPU_FTRS_POWER5, 357 .cpu_features = CPU_FTRS_POWER5,
356 .cpu_user_features = COMMON_USER_POWER5_PLUS, 358 .cpu_user_features = COMMON_USER_POWER5_PLUS,
357 .mmu_features = MMU_FTR_HPTE_TABLE, 359 .mmu_features = MMU_FTRS_POWER5,
358 .icache_bsize = 128, 360 .icache_bsize = 128,
359 .dcache_bsize = 128, 361 .dcache_bsize = 128,
360 .num_pmcs = 6, 362 .num_pmcs = 6,
@@ -371,7 +373,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
371 .cpu_name = "POWER5+", 373 .cpu_name = "POWER5+",
372 .cpu_features = CPU_FTRS_POWER5, 374 .cpu_features = CPU_FTRS_POWER5,
373 .cpu_user_features = COMMON_USER_POWER5_PLUS, 375 .cpu_user_features = COMMON_USER_POWER5_PLUS,
374 .mmu_features = MMU_FTR_HPTE_TABLE, 376 .mmu_features = MMU_FTRS_POWER5,
375 .icache_bsize = 128, 377 .icache_bsize = 128,
376 .dcache_bsize = 128, 378 .dcache_bsize = 128,
377 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 379 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
@@ -385,7 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
385 .cpu_features = CPU_FTRS_POWER6, 387 .cpu_features = CPU_FTRS_POWER6,
386 .cpu_user_features = COMMON_USER_POWER6 | 388 .cpu_user_features = COMMON_USER_POWER6 |
387 PPC_FEATURE_POWER6_EXT, 389 PPC_FEATURE_POWER6_EXT,
388 .mmu_features = MMU_FTR_HPTE_TABLE, 390 .mmu_features = MMU_FTRS_POWER6,
389 .icache_bsize = 128, 391 .icache_bsize = 128,
390 .dcache_bsize = 128, 392 .dcache_bsize = 128,
391 .num_pmcs = 6, 393 .num_pmcs = 6,
@@ -404,7 +406,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
404 .cpu_name = "POWER6 (architected)", 406 .cpu_name = "POWER6 (architected)",
405 .cpu_features = CPU_FTRS_POWER6, 407 .cpu_features = CPU_FTRS_POWER6,
406 .cpu_user_features = COMMON_USER_POWER6, 408 .cpu_user_features = COMMON_USER_POWER6,
407 .mmu_features = MMU_FTR_HPTE_TABLE, 409 .mmu_features = MMU_FTRS_POWER6,
408 .icache_bsize = 128, 410 .icache_bsize = 128,
409 .dcache_bsize = 128, 411 .dcache_bsize = 128,
410 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 412 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
@@ -417,12 +419,13 @@ static struct cpu_spec __initdata cpu_specs[] = {
417 .cpu_name = "POWER7 (architected)", 419 .cpu_name = "POWER7 (architected)",
418 .cpu_features = CPU_FTRS_POWER7, 420 .cpu_features = CPU_FTRS_POWER7,
419 .cpu_user_features = COMMON_USER_POWER7, 421 .cpu_user_features = COMMON_USER_POWER7,
420 .mmu_features = MMU_FTR_HPTE_TABLE | 422 .mmu_features = MMU_FTRS_POWER7,
421 MMU_FTR_TLBIE_206,
422 .icache_bsize = 128, 423 .icache_bsize = 128,
423 .dcache_bsize = 128, 424 .dcache_bsize = 128,
424 .oprofile_type = PPC_OPROFILE_POWER4, 425 .oprofile_type = PPC_OPROFILE_POWER4,
425 .oprofile_cpu_type = "ppc64/ibm-compat-v1", 426 .oprofile_cpu_type = "ppc64/ibm-compat-v1",
427 .cpu_setup = __setup_cpu_power7,
428 .cpu_restore = __restore_cpu_power7,
426 .platform = "power7", 429 .platform = "power7",
427 }, 430 },
428 { /* Power7 */ 431 { /* Power7 */
@@ -431,14 +434,15 @@ static struct cpu_spec __initdata cpu_specs[] = {
431 .cpu_name = "POWER7 (raw)", 434 .cpu_name = "POWER7 (raw)",
432 .cpu_features = CPU_FTRS_POWER7, 435 .cpu_features = CPU_FTRS_POWER7,
433 .cpu_user_features = COMMON_USER_POWER7, 436 .cpu_user_features = COMMON_USER_POWER7,
434 .mmu_features = MMU_FTR_HPTE_TABLE | 437 .mmu_features = MMU_FTRS_POWER7,
435 MMU_FTR_TLBIE_206,
436 .icache_bsize = 128, 438 .icache_bsize = 128,
437 .dcache_bsize = 128, 439 .dcache_bsize = 128,
438 .num_pmcs = 6, 440 .num_pmcs = 6,
439 .pmc_type = PPC_PMC_IBM, 441 .pmc_type = PPC_PMC_IBM,
440 .oprofile_cpu_type = "ppc64/power7", 442 .oprofile_cpu_type = "ppc64/power7",
441 .oprofile_type = PPC_OPROFILE_POWER4, 443 .oprofile_type = PPC_OPROFILE_POWER4,
444 .cpu_setup = __setup_cpu_power7,
445 .cpu_restore = __restore_cpu_power7,
442 .platform = "power7", 446 .platform = "power7",
443 }, 447 },
444 { /* Power7+ */ 448 { /* Power7+ */
@@ -447,14 +451,15 @@ static struct cpu_spec __initdata cpu_specs[] = {
447 .cpu_name = "POWER7+ (raw)", 451 .cpu_name = "POWER7+ (raw)",
448 .cpu_features = CPU_FTRS_POWER7, 452 .cpu_features = CPU_FTRS_POWER7,
449 .cpu_user_features = COMMON_USER_POWER7, 453 .cpu_user_features = COMMON_USER_POWER7,
450 .mmu_features = MMU_FTR_HPTE_TABLE | 454 .mmu_features = MMU_FTRS_POWER7,
451 MMU_FTR_TLBIE_206,
452 .icache_bsize = 128, 455 .icache_bsize = 128,
453 .dcache_bsize = 128, 456 .dcache_bsize = 128,
454 .num_pmcs = 6, 457 .num_pmcs = 6,
455 .pmc_type = PPC_PMC_IBM, 458 .pmc_type = PPC_PMC_IBM,
456 .oprofile_cpu_type = "ppc64/power7", 459 .oprofile_cpu_type = "ppc64/power7",
457 .oprofile_type = PPC_OPROFILE_POWER4, 460 .oprofile_type = PPC_OPROFILE_POWER4,
461 .cpu_setup = __setup_cpu_power7,
462 .cpu_restore = __restore_cpu_power7,
458 .platform = "power7+", 463 .platform = "power7+",
459 }, 464 },
460 { /* Cell Broadband Engine */ 465 { /* Cell Broadband Engine */
@@ -465,7 +470,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
465 .cpu_user_features = COMMON_USER_PPC64 | 470 .cpu_user_features = COMMON_USER_PPC64 |
466 PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | 471 PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP |
467 PPC_FEATURE_SMT, 472 PPC_FEATURE_SMT,
468 .mmu_features = MMU_FTR_HPTE_TABLE, 473 .mmu_features = MMU_FTRS_CELL,
469 .icache_bsize = 128, 474 .icache_bsize = 128,
470 .dcache_bsize = 128, 475 .dcache_bsize = 128,
471 .num_pmcs = 4, 476 .num_pmcs = 4,
@@ -480,7 +485,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
480 .cpu_name = "PA6T", 485 .cpu_name = "PA6T",
481 .cpu_features = CPU_FTRS_PA6T, 486 .cpu_features = CPU_FTRS_PA6T,
482 .cpu_user_features = COMMON_USER_PA6T, 487 .cpu_user_features = COMMON_USER_PA6T,
483 .mmu_features = MMU_FTR_HPTE_TABLE, 488 .mmu_features = MMU_FTRS_PA6T,
484 .icache_bsize = 64, 489 .icache_bsize = 64,
485 .dcache_bsize = 64, 490 .dcache_bsize = 64,
486 .num_pmcs = 6, 491 .num_pmcs = 6,
@@ -497,7 +502,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
497 .cpu_name = "POWER4 (compatible)", 502 .cpu_name = "POWER4 (compatible)",
498 .cpu_features = CPU_FTRS_COMPATIBLE, 503 .cpu_features = CPU_FTRS_COMPATIBLE,
499 .cpu_user_features = COMMON_USER_PPC64, 504 .cpu_user_features = COMMON_USER_PPC64,
500 .mmu_features = MMU_FTR_HPTE_TABLE, 505 .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2,
501 .icache_bsize = 128, 506 .icache_bsize = 128,
502 .dcache_bsize = 128, 507 .dcache_bsize = 128,
503 .num_pmcs = 6, 508 .num_pmcs = 6,
@@ -2005,7 +2010,22 @@ static struct cpu_spec __initdata cpu_specs[] = {
2005#endif /* CONFIG_PPC32 */ 2010#endif /* CONFIG_PPC32 */
2006#endif /* CONFIG_E500 */ 2011#endif /* CONFIG_E500 */
2007 2012
2008#ifdef CONFIG_PPC_BOOK3E_64 2013#ifdef CONFIG_PPC_A2
2014 { /* Standard A2 (>= DD2) + FPU core */
2015 .pvr_mask = 0xffff0000,
2016 .pvr_value = 0x00480000,
2017 .cpu_name = "A2 (>= DD2)",
2018 .cpu_features = CPU_FTRS_A2,
2019 .cpu_user_features = COMMON_USER_PPC64,
2020 .mmu_features = MMU_FTRS_A2,
2021 .icache_bsize = 64,
2022 .dcache_bsize = 64,
2023 .num_pmcs = 0,
2024 .cpu_setup = __setup_cpu_a2,
2025 .cpu_restore = __restore_cpu_a2,
2026 .machine_check = machine_check_generic,
2027 .platform = "ppca2",
2028 },
2009 { /* This is a default entry to get going, to be replaced by 2029 { /* This is a default entry to get going, to be replaced by
2010 * a real one at some stage 2030 * a real one at some stage
2011 */ 2031 */
@@ -2026,7 +2046,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
2026 .machine_check = machine_check_generic, 2046 .machine_check = machine_check_generic,
2027 .platform = "power6", 2047 .platform = "power6",
2028 }, 2048 },
2029#endif 2049#endif /* CONFIG_PPC_A2 */
2030}; 2050};
2031 2051
2032static struct cpu_spec the_cpu_spec; 2052static struct cpu_spec the_cpu_spec;
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 5b5e1f002a8e..4e6ee944495a 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs)
64 return; 64 return;
65 65
66 hard_irq_disable(); 66 hard_irq_disable();
67 if (!cpu_isset(cpu, cpus_in_crash)) 67 if (!cpumask_test_cpu(cpu, &cpus_in_crash))
68 crash_save_cpu(regs, cpu); 68 crash_save_cpu(regs, cpu);
69 cpu_set(cpu, cpus_in_crash); 69 cpumask_set_cpu(cpu, &cpus_in_crash);
70 70
71 /* 71 /*
72 * Entered via soft-reset - could be the kdump 72 * Entered via soft-reset - could be the kdump
@@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs)
77 * Tell the kexec CPU that entered via soft-reset and ready 77 * Tell the kexec CPU that entered via soft-reset and ready
78 * to go down. 78 * to go down.
79 */ 79 */
80 if (cpu_isset(cpu, cpus_in_sr)) { 80 if (cpumask_test_cpu(cpu, &cpus_in_sr)) {
81 cpu_clear(cpu, cpus_in_sr); 81 cpumask_clear_cpu(cpu, &cpus_in_sr);
82 atomic_inc(&enter_on_soft_reset); 82 atomic_inc(&enter_on_soft_reset);
83 } 83 }
84 84
@@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs)
87 * This barrier is needed to make sure that all CPUs are stopped. 87 * This barrier is needed to make sure that all CPUs are stopped.
88 * If not, soft-reset will be invoked to bring other CPUs. 88 * If not, soft-reset will be invoked to bring other CPUs.
89 */ 89 */
90 while (!cpu_isset(crashing_cpu, cpus_in_crash)) 90 while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash))
91 cpu_relax(); 91 cpu_relax();
92 92
93 if (ppc_md.kexec_cpu_down) 93 if (ppc_md.kexec_cpu_down)
@@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu)
109{ 109{
110 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ 110 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
111 111
112 cpu_clear(cpu, cpus_in_sr); 112 cpumask_clear_cpu(cpu, &cpus_in_sr);
113 while (atomic_read(&enter_on_soft_reset) != ncpus) 113 while (atomic_read(&enter_on_soft_reset) != ncpus)
114 cpu_relax(); 114 cpu_relax();
115} 115}
@@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int cpu)
132 */ 132 */
133 printk(KERN_EMERG "Sending IPI to other cpus...\n"); 133 printk(KERN_EMERG "Sending IPI to other cpus...\n");
134 msecs = 10000; 134 msecs = 10000;
135 while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { 135 while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
136 cpu_relax(); 136 cpu_relax();
137 mdelay(1); 137 mdelay(1);
138 } 138 }
@@ -144,52 +144,24 @@ static void crash_kexec_prepare_cpus(int cpu)
144 * user to do soft reset such that we get all. 144 * user to do soft reset such that we get all.
145 * Soft-reset will be used until better mechanism is implemented. 145 * Soft-reset will be used until better mechanism is implemented.
146 */ 146 */
147 if (cpus_weight(cpus_in_crash) < ncpus) { 147 if (cpumask_weight(&cpus_in_crash) < ncpus) {
148 printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", 148 printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
149 ncpus - cpus_weight(cpus_in_crash)); 149 ncpus - cpumask_weight(&cpus_in_crash));
150 printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); 150 printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
151 cpus_in_sr = CPU_MASK_NONE; 151 cpumask_clear(&cpus_in_sr);
152 atomic_set(&enter_on_soft_reset, 0); 152 atomic_set(&enter_on_soft_reset, 0);
153 while (cpus_weight(cpus_in_crash) < ncpus) 153 while (cpumask_weight(&cpus_in_crash) < ncpus)
154 cpu_relax(); 154 cpu_relax();
155 } 155 }
156 /* 156 /*
157 * Make sure all CPUs are entered via soft-reset if the kdump is 157 * Make sure all CPUs are entered via soft-reset if the kdump is
158 * invoked using soft-reset. 158 * invoked using soft-reset.
159 */ 159 */
160 if (cpu_isset(cpu, cpus_in_sr)) 160 if (cpumask_test_cpu(cpu, &cpus_in_sr))
161 crash_soft_reset_check(cpu); 161 crash_soft_reset_check(cpu);
162 /* Leave the IPI callback set */ 162 /* Leave the IPI callback set */
163} 163}
164 164
165/* wait for all the CPUs to hit real mode but timeout if they don't come in */
166#ifdef CONFIG_PPC_STD_MMU_64
167static void crash_kexec_wait_realmode(int cpu)
168{
169 unsigned int msecs;
170 int i;
171
172 msecs = 10000;
173 for (i=0; i < NR_CPUS && msecs > 0; i++) {
174 if (i == cpu)
175 continue;
176
177 while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
178 barrier();
179 if (!cpu_possible(i)) {
180 break;
181 }
182 if (!cpu_online(i)) {
183 break;
184 }
185 msecs--;
186 mdelay(1);
187 }
188 }
189 mb();
190}
191#endif /* CONFIG_PPC_STD_MMU_64 */
192
193/* 165/*
194 * This function will be called by secondary cpus or by kexec cpu 166 * This function will be called by secondary cpus or by kexec cpu
195 * if soft-reset is activated to stop some CPUs. 167 * if soft-reset is activated to stop some CPUs.
@@ -210,7 +182,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
210 * exited using 'x'(exit and recover) or 182 * exited using 'x'(exit and recover) or
211 * kexec_should_crash() failed for all running tasks. 183 * kexec_should_crash() failed for all running tasks.
212 */ 184 */
213 cpu_clear(cpu, cpus_in_sr); 185 cpumask_clear_cpu(cpu, &cpus_in_sr);
214 local_irq_restore(flags); 186 local_irq_restore(flags);
215 return; 187 return;
216 } 188 }
@@ -224,7 +196,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
224 * then start kexec boot. 196 * then start kexec boot.
225 */ 197 */
226 crash_soft_reset_check(cpu); 198 crash_soft_reset_check(cpu);
227 cpu_set(crashing_cpu, cpus_in_crash); 199 cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
228 if (ppc_md.kexec_cpu_down) 200 if (ppc_md.kexec_cpu_down)
229 ppc_md.kexec_cpu_down(1, 0); 201 ppc_md.kexec_cpu_down(1, 0);
230 machine_kexec(kexec_crash_image); 202 machine_kexec(kexec_crash_image);
@@ -234,7 +206,6 @@ void crash_kexec_secondary(struct pt_regs *regs)
234} 206}
235 207
236#else /* ! CONFIG_SMP */ 208#else /* ! CONFIG_SMP */
237static inline void crash_kexec_wait_realmode(int cpu) {}
238 209
239static void crash_kexec_prepare_cpus(int cpu) 210static void crash_kexec_prepare_cpus(int cpu)
240{ 211{
@@ -253,10 +224,40 @@ static void crash_kexec_prepare_cpus(int cpu)
253 224
254void crash_kexec_secondary(struct pt_regs *regs) 225void crash_kexec_secondary(struct pt_regs *regs)
255{ 226{
256 cpus_in_sr = CPU_MASK_NONE; 227 cpumask_clear(&cpus_in_sr);
257} 228}
258#endif /* CONFIG_SMP */ 229#endif /* CONFIG_SMP */
259 230
231/* wait for all the CPUs to hit real mode but timeout if they don't come in */
232#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_64)
233static void crash_kexec_wait_realmode(int cpu)
234{
235 unsigned int msecs;
236 int i;
237
238 msecs = 10000;
239 for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
240 if (i == cpu)
241 continue;
242
243 while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
244 barrier();
245 if (!cpu_possible(i)) {
246 break;
247 }
248 if (!cpu_online(i)) {
249 break;
250 }
251 msecs--;
252 mdelay(1);
253 }
254 }
255 mb();
256}
257#else
258static inline void crash_kexec_wait_realmode(int cpu) {}
259#endif /* CONFIG_SMP && CONFIG_PPC_STD_MMU_64 */
260
260/* 261/*
261 * Register a function to be called on shutdown. Only use this if you 262 * Register a function to be called on shutdown. Only use this if you
262 * can't reset your device in the second kernel. 263 * can't reset your device in the second kernel.
@@ -345,7 +346,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
345 crashing_cpu = smp_processor_id(); 346 crashing_cpu = smp_processor_id();
346 crash_save_cpu(regs, crashing_cpu); 347 crash_save_cpu(regs, crashing_cpu);
347 crash_kexec_prepare_cpus(crashing_cpu); 348 crash_kexec_prepare_cpus(crashing_cpu);
348 cpu_set(crashing_cpu, cpus_in_crash); 349 cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
349 crash_kexec_wait_realmode(crashing_cpu); 350 crash_kexec_wait_realmode(crashing_cpu);
350 351
351 machine_kexec_mask_interrupts(); 352 machine_kexec_mask_interrupts();
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 3307a52d797f..2cc451aaaca7 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -13,84 +13,35 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/threads.h> 15#include <linux/threads.h>
16#include <linux/percpu.h> 16#include <linux/hardirq.h>
17 17
18#include <asm/dbell.h> 18#include <asm/dbell.h>
19#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
20 20
21#ifdef CONFIG_SMP 21#ifdef CONFIG_SMP
22struct doorbell_cpu_info {
23 unsigned long messages; /* current messages bits */
24 unsigned int tag; /* tag value */
25};
26
27static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info);
28
29void doorbell_setup_this_cpu(void) 22void doorbell_setup_this_cpu(void)
30{ 23{
31 struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); 24 unsigned long tag = mfspr(SPRN_PIR) & 0x3fff;
32 25
33 info->messages = 0; 26 smp_muxed_ipi_set_data(smp_processor_id(), tag);
34 info->tag = mfspr(SPRN_PIR) & 0x3fff;
35} 27}
36 28
37void doorbell_message_pass(int target, int msg) 29void doorbell_cause_ipi(int cpu, unsigned long data)
38{ 30{
39 struct doorbell_cpu_info *info; 31 ppc_msgsnd(PPC_DBELL, 0, data);
40 int i;
41
42 if (target < NR_CPUS) {
43 info = &per_cpu(doorbell_cpu_info, target);
44 set_bit(msg, &info->messages);
45 ppc_msgsnd(PPC_DBELL, 0, info->tag);
46 }
47 else if (target == MSG_ALL_BUT_SELF) {
48 for_each_online_cpu(i) {
49 if (i == smp_processor_id())
50 continue;
51 info = &per_cpu(doorbell_cpu_info, i);
52 set_bit(msg, &info->messages);
53 ppc_msgsnd(PPC_DBELL, 0, info->tag);
54 }
55 }
56 else { /* target == MSG_ALL */
57 for_each_online_cpu(i) {
58 info = &per_cpu(doorbell_cpu_info, i);
59 set_bit(msg, &info->messages);
60 }
61 ppc_msgsnd(PPC_DBELL, PPC_DBELL_MSG_BRDCAST, 0);
62 }
63} 32}
64 33
65void doorbell_exception(struct pt_regs *regs) 34void doorbell_exception(struct pt_regs *regs)
66{ 35{
67 struct pt_regs *old_regs = set_irq_regs(regs); 36 struct pt_regs *old_regs = set_irq_regs(regs);
68 struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
69 int msg;
70 37
71 /* Warning: regs can be NULL when called from irq enable */ 38 irq_enter();
72 39
73 if (!info->messages || (num_online_cpus() < 2)) 40 smp_ipi_demux();
74 goto out;
75 41
76 for (msg = 0; msg < 4; msg++) 42 irq_exit();
77 if (test_and_clear_bit(msg, &info->messages))
78 smp_message_recv(msg);
79
80out:
81 set_irq_regs(old_regs); 43 set_irq_regs(old_regs);
82} 44}
83
84void doorbell_check_self(void)
85{
86 struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
87
88 if (!info->messages)
89 return;
90
91 ppc_msgsnd(PPC_DBELL, 0, info->tag);
92}
93
94#else /* CONFIG_SMP */ 45#else /* CONFIG_SMP */
95void doorbell_exception(struct pt_regs *regs) 46void doorbell_exception(struct pt_regs *regs)
96{ 47{
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d82878c4daa6..d834425186ae 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -421,6 +421,12 @@ BEGIN_FTR_SECTION
421 std r24,THREAD_VRSAVE(r3) 421 std r24,THREAD_VRSAVE(r3)
422END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 422END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
423#endif /* CONFIG_ALTIVEC */ 423#endif /* CONFIG_ALTIVEC */
424#ifdef CONFIG_PPC64
425BEGIN_FTR_SECTION
426 mfspr r25,SPRN_DSCR
427 std r25,THREAD_DSCR(r3)
428END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
429#endif
424 and. r0,r0,r22 430 and. r0,r0,r22
425 beq+ 1f 431 beq+ 1f
426 andc r22,r22,r0 432 andc r22,r22,r0
@@ -462,10 +468,10 @@ BEGIN_FTR_SECTION
462 FTR_SECTION_ELSE_NESTED(95) 468 FTR_SECTION_ELSE_NESTED(95)
463 clrrdi r6,r8,40 /* get its 1T ESID */ 469 clrrdi r6,r8,40 /* get its 1T ESID */
464 clrrdi r9,r1,40 /* get current sp 1T ESID */ 470 clrrdi r9,r1,40 /* get current sp 1T ESID */
465 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95) 471 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
466FTR_SECTION_ELSE 472FTR_SECTION_ELSE
467 b 2f 473 b 2f
468ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB) 474ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
469 clrldi. r0,r6,2 /* is new ESID c00000000? */ 475 clrldi. r0,r6,2 /* is new ESID c00000000? */
470 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ 476 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
471 cror eq,4*cr1+eq,eq 477 cror eq,4*cr1+eq,eq
@@ -479,7 +485,7 @@ BEGIN_FTR_SECTION
479 li r9,MMU_SEGSIZE_1T /* insert B field */ 485 li r9,MMU_SEGSIZE_1T /* insert B field */
480 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h 486 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
481 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 487 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
482END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 488END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
483 489
484 /* Update the last bolted SLB. No write barriers are needed 490 /* Update the last bolted SLB. No write barriers are needed
485 * here, provided we only update the current CPU's SLB shadow 491 * here, provided we only update the current CPU's SLB shadow
@@ -491,7 +497,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
491 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 497 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
492 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 498 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
493 499
494 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when 500 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
495 * we have 1TB segments, the only CPUs known to have the errata 501 * we have 1TB segments, the only CPUs known to have the errata
496 * only support less than 1TB of system memory and we'll never 502 * only support less than 1TB of system memory and we'll never
497 * actually hit this code path. 503 * actually hit this code path.
@@ -522,6 +528,15 @@ BEGIN_FTR_SECTION
522 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ 528 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
523END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 529END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
524#endif /* CONFIG_ALTIVEC */ 530#endif /* CONFIG_ALTIVEC */
531#ifdef CONFIG_PPC64
532BEGIN_FTR_SECTION
533 ld r0,THREAD_DSCR(r4)
534 cmpd r0,r25
535 beq 1f
536 mtspr SPRN_DSCR,r0
5371:
538END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
539#endif
525 540
526 /* r3-r13 are destroyed -- Cort */ 541 /* r3-r13 are destroyed -- Cort */
527 REST_8GPRS(14, r1) 542 REST_8GPRS(14, r1)
@@ -838,7 +853,7 @@ _GLOBAL(enter_rtas)
838 853
839_STATIC(rtas_return_loc) 854_STATIC(rtas_return_loc)
840 /* relocation is off at this point */ 855 /* relocation is off at this point */
841 mfspr r4,SPRN_SPRG_PACA /* Get PACA */ 856 GET_PACA(r4)
842 clrldi r4,r4,2 /* convert to realmode address */ 857 clrldi r4,r4,2 /* convert to realmode address */
843 858
844 bcl 20,31,$+4 859 bcl 20,31,$+4
@@ -869,7 +884,7 @@ _STATIC(rtas_restore_regs)
869 REST_8GPRS(14, r1) /* Restore the non-volatiles */ 884 REST_8GPRS(14, r1) /* Restore the non-volatiles */
870 REST_10GPRS(22, r1) /* ditto */ 885 REST_10GPRS(22, r1) /* ditto */
871 886
872 mfspr r13,SPRN_SPRG_PACA 887 GET_PACA(r13)
873 888
874 ld r4,_CCR(r1) 889 ld r4,_CCR(r1)
875 mtcr r4 890 mtcr r4
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 9651acc3504a..d24d4400cc79 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -17,6 +17,7 @@
17#include <asm/cputable.h> 17#include <asm/cputable.h>
18#include <asm/setup.h> 18#include <asm/setup.h>
19#include <asm/thread_info.h> 19#include <asm/thread_info.h>
20#include <asm/reg_a2.h>
20#include <asm/exception-64e.h> 21#include <asm/exception-64e.h>
21#include <asm/bug.h> 22#include <asm/bug.h>
22#include <asm/irqflags.h> 23#include <asm/irqflags.h>
@@ -252,9 +253,6 @@ exception_marker:
252 .balign 0x1000 253 .balign 0x1000
253 .globl interrupt_base_book3e 254 .globl interrupt_base_book3e
254interrupt_base_book3e: /* fake trap */ 255interrupt_base_book3e: /* fake trap */
255 /* Note: If real debug exceptions are supported by the HW, the vector
256 * below will have to be patched up to point to an appropriate handler
257 */
258 EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */ 256 EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */
259 EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */ 257 EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */
260 EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ 258 EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */
@@ -271,8 +269,13 @@ interrupt_base_book3e: /* fake trap */
271 EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ 269 EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */
272 EXCEPTION_STUB(0x1c0, data_tlb_miss) 270 EXCEPTION_STUB(0x1c0, data_tlb_miss)
273 EXCEPTION_STUB(0x1e0, instruction_tlb_miss) 271 EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
272 EXCEPTION_STUB(0x260, perfmon)
274 EXCEPTION_STUB(0x280, doorbell) 273 EXCEPTION_STUB(0x280, doorbell)
275 EXCEPTION_STUB(0x2a0, doorbell_crit) 274 EXCEPTION_STUB(0x2a0, doorbell_crit)
275 EXCEPTION_STUB(0x2c0, guest_doorbell)
276 EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
277 EXCEPTION_STUB(0x300, hypercall)
278 EXCEPTION_STUB(0x320, ehpriv)
276 279
277 .globl interrupt_end_book3e 280 .globl interrupt_end_book3e
278interrupt_end_book3e: 281interrupt_end_book3e:
@@ -454,6 +457,70 @@ interrupt_end_book3e:
454kernel_dbg_exc: 457kernel_dbg_exc:
455 b . /* NYI */ 458 b . /* NYI */
456 459
460/* Debug exception as a debug interrupt*/
461 START_EXCEPTION(debug_debug);
462 DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS)
463
464 /*
465 * If there is a single step or branch-taken exception in an
466 * exception entry sequence, it was probably meant to apply to
467 * the code where the exception occurred (since exception entry
468 * doesn't turn off DE automatically). We simulate the effect
469 * of turning off DE on entry to an exception handler by turning
470 * off DE in the DSRR1 value and clearing the debug status.
471 */
472
473 mfspr r14,SPRN_DBSR /* check single-step/branch taken */
474 andis. r15,r14,DBSR_IC@h
475 beq+ 1f
476
477 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
478 LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
479 cmpld cr0,r10,r14
480 cmpld cr1,r10,r15
481 blt+ cr0,1f
482 bge+ cr1,1f
483
484 /* here it looks like we got an inappropriate debug exception. */
485 lis r14,DBSR_IC@h /* clear the IC event */
486 rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
487 mtspr SPRN_DBSR,r14
488 mtspr SPRN_DSRR1,r11
489 lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */
490 ld r1,PACA_EXDBG+EX_R1(r13)
491 ld r14,PACA_EXDBG+EX_R14(r13)
492 ld r15,PACA_EXDBG+EX_R15(r13)
493 mtcr r10
494 ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */
495 ld r11,PACA_EXDBG+EX_R11(r13)
496 mfspr r13,SPRN_SPRG_DBG_SCRATCH
497 rfdi
498
499 /* Normal debug exception */
500 /* XXX We only handle coming from userspace for now since we can't
501 * quite save properly an interrupted kernel state yet
502 */
5031: andi. r14,r11,MSR_PR; /* check for userspace again */
504 beq kernel_dbg_exc; /* if from kernel mode */
505
506 /* Now we mash up things to make it look like we are coming on a
507 * normal exception
508 */
509 mfspr r15,SPRN_SPRG_DBG_SCRATCH
510 mtspr SPRN_SPRG_GEN_SCRATCH,r15
511 mfspr r14,SPRN_DBSR
512 EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL)
513 std r14,_DSISR(r1)
514 addi r3,r1,STACK_FRAME_OVERHEAD
515 mr r4,r14
516 ld r14,PACA_EXDBG+EX_R14(r13)
517 ld r15,PACA_EXDBG+EX_R15(r13)
518 bl .save_nvgprs
519 bl .DebugException
520 b .ret_from_except
521
522 MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE)
523
457/* Doorbell interrupt */ 524/* Doorbell interrupt */
458 MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE) 525 MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE)
459 526
@@ -468,6 +535,11 @@ kernel_dbg_exc:
468// b ret_from_crit_except 535// b ret_from_crit_except
469 b . 536 b .
470 537
538 MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE)
539 MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE)
540 MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE)
541 MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE)
542
471 543
472/* 544/*
473 * An interrupt came in while soft-disabled; clear EE in SRR1, 545 * An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -587,7 +659,12 @@ fast_exception_return:
587BAD_STACK_TRAMPOLINE(0x000) 659BAD_STACK_TRAMPOLINE(0x000)
588BAD_STACK_TRAMPOLINE(0x100) 660BAD_STACK_TRAMPOLINE(0x100)
589BAD_STACK_TRAMPOLINE(0x200) 661BAD_STACK_TRAMPOLINE(0x200)
662BAD_STACK_TRAMPOLINE(0x260)
663BAD_STACK_TRAMPOLINE(0x2c0)
664BAD_STACK_TRAMPOLINE(0x2e0)
590BAD_STACK_TRAMPOLINE(0x300) 665BAD_STACK_TRAMPOLINE(0x300)
666BAD_STACK_TRAMPOLINE(0x310)
667BAD_STACK_TRAMPOLINE(0x320)
591BAD_STACK_TRAMPOLINE(0x400) 668BAD_STACK_TRAMPOLINE(0x400)
592BAD_STACK_TRAMPOLINE(0x500) 669BAD_STACK_TRAMPOLINE(0x500)
593BAD_STACK_TRAMPOLINE(0x600) 670BAD_STACK_TRAMPOLINE(0x600)
@@ -864,8 +941,23 @@ have_hes:
864 * that will have to be made dependent on whether we are running under 941 * that will have to be made dependent on whether we are running under
865 * a hypervisor I suppose. 942 * a hypervisor I suppose.
866 */ 943 */
867 ori r3,r3,MAS0_HES | MAS0_WQ_ALLWAYS 944
868 mtspr SPRN_MAS0,r3 945 /* BEWARE, MAGIC
946 * This code is called as an ordinary function on the boot CPU. But to
947 * avoid duplication, this code is also used in SCOM bringup of
948 * secondary CPUs. We read the code between the initial_tlb_code_start
949 * and initial_tlb_code_end labels one instruction at a time and RAM it
950 * into the new core via SCOM. That doesn't process branches, so there
951 * must be none between those two labels. It also means if this code
952 * ever takes any parameters, the SCOM code must also be updated to
953 * provide them.
954 */
955 .globl a2_tlbinit_code_start
956a2_tlbinit_code_start:
957
958 ori r11,r3,MAS0_WQ_ALLWAYS
959 oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
960 mtspr SPRN_MAS0,r11
869 lis r3,(MAS1_VALID | MAS1_IPROT)@h 961 lis r3,(MAS1_VALID | MAS1_IPROT)@h
870 ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT 962 ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
871 mtspr SPRN_MAS1,r3 963 mtspr SPRN_MAS1,r3
@@ -879,18 +971,86 @@ have_hes:
879 /* Write the TLB entry */ 971 /* Write the TLB entry */
880 tlbwe 972 tlbwe
881 973
974 .globl a2_tlbinit_after_linear_map
975a2_tlbinit_after_linear_map:
976
882 /* Now we branch the new virtual address mapped by this entry */ 977 /* Now we branch the new virtual address mapped by this entry */
883 LOAD_REG_IMMEDIATE(r3,1f) 978 LOAD_REG_IMMEDIATE(r3,1f)
884 mtctr r3 979 mtctr r3
885 bctr 980 bctr
886 981
8871: /* We are now running at PAGE_OFFSET, clean the TLB of everything 9821: /* We are now running at PAGE_OFFSET, clean the TLB of everything
888 * else (XXX we should scan for bolted crap from the firmware too) 983 * else (including IPROTed things left by firmware)
984 * r4 = TLBnCFG
985 * r3 = current address (more or less)
889 */ 986 */
987
988 li r5,0
989 mtspr SPRN_MAS6,r5
990 tlbsx 0,r3
991
992 rlwinm r9,r4,0,TLBnCFG_N_ENTRY
993 rlwinm r10,r4,8,0xff
994 addi r10,r10,-1 /* Get inner loop mask */
995
996 li r3,1
997
998 mfspr r5,SPRN_MAS1
999 rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
1000
1001 mfspr r6,SPRN_MAS2
1002 rldicr r6,r6,0,51 /* Extract EPN */
1003
1004 mfspr r7,SPRN_MAS0
1005 rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */
1006
1007 rlwinm r8,r7,16,0xfff /* Extract ESEL */
1008
10092: add r4,r3,r8
1010 and r4,r4,r10
1011
1012 rlwimi r7,r4,16,MAS0_ESEL_MASK
1013
1014 mtspr SPRN_MAS0,r7
1015 mtspr SPRN_MAS1,r5
1016 mtspr SPRN_MAS2,r6
1017 tlbwe
1018
1019 addi r3,r3,1
1020 and. r4,r3,r10
1021
1022 bne 3f
1023 addis r6,r6,(1<<30)@h
10243:
1025 cmpw r3,r9
1026 blt 2b
1027
1028 .globl a2_tlbinit_after_iprot_flush
1029a2_tlbinit_after_iprot_flush:
1030
1031#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
1032 /* Now establish early debug mappings if applicable */
1033 /* Restore the MAS0 we used for linear mapping load */
1034 mtspr SPRN_MAS0,r11
1035
1036 lis r3,(MAS1_VALID | MAS1_IPROT)@h
1037 ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT)
1038 mtspr SPRN_MAS1,r3
1039 LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G)
1040 mtspr SPRN_MAS2,r3
1041 LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW)
1042 mtspr SPRN_MAS7_MAS3,r3
1043 /* re-use the MAS8 value from the linear mapping */
1044 tlbwe
1045#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
1046
890 PPC_TLBILX(0,0,0) 1047 PPC_TLBILX(0,0,0)
891 sync 1048 sync
892 isync 1049 isync
893 1050
1051 .globl a2_tlbinit_code_end
1052a2_tlbinit_code_end:
1053
894 /* We translate LR and return */ 1054 /* We translate LR and return */
895 mflr r3 1055 mflr r3
896 tovirt(r3,r3) 1056 tovirt(r3,r3)
@@ -1040,3 +1200,33 @@ _GLOBAL(__setup_base_ivors)
1040 sync 1200 sync
1041 1201
1042 blr 1202 blr
1203
1204_GLOBAL(setup_perfmon_ivor)
1205 SET_IVOR(35, 0x260) /* Performance Monitor */
1206 blr
1207
1208_GLOBAL(setup_doorbell_ivors)
1209 SET_IVOR(36, 0x280) /* Processor Doorbell */
1210 SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
1211
1212 /* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */
1213 mfspr r10,SPRN_MMUCFG
1214 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
1215 beqlr
1216
1217 SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
1218 SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
1219 blr
1220
1221_GLOBAL(setup_ehv_ivors)
1222 /*
1223 * We may be running as a guest and lack E.HV even on a chip
1224 * that normally has it.
1225 */
1226 mfspr r10,SPRN_MMUCFG
1227 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
1228 beqlr
1229
1230 SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
1231 SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
1232 blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index aeb739e18769..a85f4874cba7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -37,23 +37,51 @@
37 .globl __start_interrupts 37 .globl __start_interrupts
38__start_interrupts: 38__start_interrupts:
39 39
40 STD_EXCEPTION_PSERIES(0x100, system_reset) 40 .globl system_reset_pSeries;
41system_reset_pSeries:
42 HMT_MEDIUM;
43 DO_KVM 0x100;
44 SET_SCRATCH0(r13)
45#ifdef CONFIG_PPC_P7_NAP
46BEGIN_FTR_SECTION
47 /* Running native on arch 2.06 or later, check if we are
48 * waking up from nap. We only handle no state loss and
49 * supervisor state loss. We do -not- handle hypervisor
50 * state loss at this time.
51 */
52 mfspr r13,SPRN_SRR1
53 rlwinm r13,r13,47-31,30,31
54 cmpwi cr0,r13,1
55 bne 1f
56 b .power7_wakeup_noloss
571: cmpwi cr0,r13,2
58 bne 1f
59 b .power7_wakeup_loss
60 /* Total loss of HV state is fatal, we could try to use the
61 * PIR to locate a PACA, then use an emergency stack etc...
62 * but for now, let's just stay stuck here
63 */
641: cmpwi cr0,r13,3
65 beq .
66END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
67#endif /* CONFIG_PPC_P7_NAP */
68 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
41 69
42 . = 0x200 70 . = 0x200
43_machine_check_pSeries: 71_machine_check_pSeries:
44 HMT_MEDIUM 72 HMT_MEDIUM
45 DO_KVM 0x200 73 DO_KVM 0x200
46 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ 74 SET_SCRATCH0(r13)
47 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 75 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
48 76
49 . = 0x300 77 . = 0x300
50 .globl data_access_pSeries 78 .globl data_access_pSeries
51data_access_pSeries: 79data_access_pSeries:
52 HMT_MEDIUM 80 HMT_MEDIUM
53 DO_KVM 0x300 81 DO_KVM 0x300
54 mtspr SPRN_SPRG_SCRATCH0,r13 82 SET_SCRATCH0(r13)
55BEGIN_FTR_SECTION 83BEGIN_FTR_SECTION
56 mfspr r13,SPRN_SPRG_PACA 84 GET_PACA(r13)
57 std r9,PACA_EXSLB+EX_R9(r13) 85 std r9,PACA_EXSLB+EX_R9(r13)
58 std r10,PACA_EXSLB+EX_R10(r13) 86 std r10,PACA_EXSLB+EX_R10(r13)
59 mfspr r10,SPRN_DAR 87 mfspr r10,SPRN_DAR
@@ -67,22 +95,22 @@ BEGIN_FTR_SECTION
67 std r11,PACA_EXGEN+EX_R11(r13) 95 std r11,PACA_EXGEN+EX_R11(r13)
68 ld r11,PACA_EXSLB+EX_R9(r13) 96 ld r11,PACA_EXSLB+EX_R9(r13)
69 std r12,PACA_EXGEN+EX_R12(r13) 97 std r12,PACA_EXGEN+EX_R12(r13)
70 mfspr r12,SPRN_SPRG_SCRATCH0 98 GET_SCRATCH0(r12)
71 std r10,PACA_EXGEN+EX_R10(r13) 99 std r10,PACA_EXGEN+EX_R10(r13)
72 std r11,PACA_EXGEN+EX_R9(r13) 100 std r11,PACA_EXGEN+EX_R9(r13)
73 std r12,PACA_EXGEN+EX_R13(r13) 101 std r12,PACA_EXGEN+EX_R13(r13)
74 EXCEPTION_PROLOG_PSERIES_1(data_access_common) 102 EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
75FTR_SECTION_ELSE 103FTR_SECTION_ELSE
76 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) 104 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
77ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) 105ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
78 106
79 . = 0x380 107 . = 0x380
80 .globl data_access_slb_pSeries 108 .globl data_access_slb_pSeries
81data_access_slb_pSeries: 109data_access_slb_pSeries:
82 HMT_MEDIUM 110 HMT_MEDIUM
83 DO_KVM 0x380 111 DO_KVM 0x380
84 mtspr SPRN_SPRG_SCRATCH0,r13 112 SET_SCRATCH0(r13)
85 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ 113 GET_PACA(r13)
86 std r3,PACA_EXSLB+EX_R3(r13) 114 std r3,PACA_EXSLB+EX_R3(r13)
87 mfspr r3,SPRN_DAR 115 mfspr r3,SPRN_DAR
88 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 116 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
@@ -95,7 +123,7 @@ data_access_slb_pSeries:
95 std r10,PACA_EXSLB+EX_R10(r13) 123 std r10,PACA_EXSLB+EX_R10(r13)
96 std r11,PACA_EXSLB+EX_R11(r13) 124 std r11,PACA_EXSLB+EX_R11(r13)
97 std r12,PACA_EXSLB+EX_R12(r13) 125 std r12,PACA_EXSLB+EX_R12(r13)
98 mfspr r10,SPRN_SPRG_SCRATCH0 126 GET_SCRATCH0(r10)
99 std r10,PACA_EXSLB+EX_R13(r13) 127 std r10,PACA_EXSLB+EX_R13(r13)
100 mfspr r12,SPRN_SRR1 /* and SRR1 */ 128 mfspr r12,SPRN_SRR1 /* and SRR1 */
101#ifndef CONFIG_RELOCATABLE 129#ifndef CONFIG_RELOCATABLE
@@ -113,15 +141,15 @@ data_access_slb_pSeries:
113 bctr 141 bctr
114#endif 142#endif
115 143
116 STD_EXCEPTION_PSERIES(0x400, instruction_access) 144 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
117 145
118 . = 0x480 146 . = 0x480
119 .globl instruction_access_slb_pSeries 147 .globl instruction_access_slb_pSeries
120instruction_access_slb_pSeries: 148instruction_access_slb_pSeries:
121 HMT_MEDIUM 149 HMT_MEDIUM
122 DO_KVM 0x480 150 DO_KVM 0x480
123 mtspr SPRN_SPRG_SCRATCH0,r13 151 SET_SCRATCH0(r13)
124 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ 152 GET_PACA(r13)
125 std r3,PACA_EXSLB+EX_R3(r13) 153 std r3,PACA_EXSLB+EX_R3(r13)
126 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 154 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
127 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 155 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
@@ -134,7 +162,7 @@ instruction_access_slb_pSeries:
134 std r10,PACA_EXSLB+EX_R10(r13) 162 std r10,PACA_EXSLB+EX_R10(r13)
135 std r11,PACA_EXSLB+EX_R11(r13) 163 std r11,PACA_EXSLB+EX_R11(r13)
136 std r12,PACA_EXSLB+EX_R12(r13) 164 std r12,PACA_EXSLB+EX_R12(r13)
137 mfspr r10,SPRN_SPRG_SCRATCH0 165 GET_SCRATCH0(r10)
138 std r10,PACA_EXSLB+EX_R13(r13) 166 std r10,PACA_EXSLB+EX_R13(r13)
139 mfspr r12,SPRN_SRR1 /* and SRR1 */ 167 mfspr r12,SPRN_SRR1 /* and SRR1 */
140#ifndef CONFIG_RELOCATABLE 168#ifndef CONFIG_RELOCATABLE
@@ -147,13 +175,29 @@ instruction_access_slb_pSeries:
147 bctr 175 bctr
148#endif 176#endif
149 177
150 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) 178 /* We open code these as we can't have a ". = x" (even with
151 STD_EXCEPTION_PSERIES(0x600, alignment) 179 * x = "." within a feature section
152 STD_EXCEPTION_PSERIES(0x700, program_check) 180 */
153 STD_EXCEPTION_PSERIES(0x800, fp_unavailable) 181 . = 0x500;
154 MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) 182 .globl hardware_interrupt_pSeries;
155 STD_EXCEPTION_PSERIES(0xa00, trap_0a) 183 .globl hardware_interrupt_hv;
156 STD_EXCEPTION_PSERIES(0xb00, trap_0b) 184hardware_interrupt_pSeries:
185hardware_interrupt_hv:
186 BEGIN_FTR_SECTION
187 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD)
188 FTR_SECTION_ELSE
189 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV)
190 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206)
191
192 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
193 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
194 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
195
196 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
197 MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer)
198
199 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
200 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
157 201
158 . = 0xc00 202 . = 0xc00
159 .globl system_call_pSeries 203 .globl system_call_pSeries
@@ -165,13 +209,13 @@ BEGIN_FTR_SECTION
165 beq- 1f 209 beq- 1f
166END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) 210END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
167 mr r9,r13 211 mr r9,r13
168 mfspr r13,SPRN_SPRG_PACA 212 GET_PACA(r13)
169 mfspr r11,SPRN_SRR0 213 mfspr r11,SPRN_SRR0
170 ld r12,PACAKBASE(r13)
171 ld r10,PACAKMSR(r13)
172 LOAD_HANDLER(r12, system_call_entry)
173 mtspr SPRN_SRR0,r12
174 mfspr r12,SPRN_SRR1 214 mfspr r12,SPRN_SRR1
215 ld r10,PACAKBASE(r13)
216 LOAD_HANDLER(r10, system_call_entry)
217 mtspr SPRN_SRR0,r10
218 ld r10,PACAKMSR(r13)
175 mtspr SPRN_SRR1,r10 219 mtspr SPRN_SRR1,r10
176 rfid 220 rfid
177 b . /* prevent speculative execution */ 221 b . /* prevent speculative execution */
@@ -183,8 +227,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
183 rfid /* return to userspace */ 227 rfid /* return to userspace */
184 b . 228 b .
185 229
186 STD_EXCEPTION_PSERIES(0xd00, single_step) 230 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
187 STD_EXCEPTION_PSERIES(0xe00, trap_0e) 231
232 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
233 * out of line to handle them
234 */
235 . = 0xe00
236 b h_data_storage_hv
237 . = 0xe20
238 b h_instr_storage_hv
239 . = 0xe40
240 b emulation_assist_hv
241 . = 0xe50
242 b hmi_exception_hv
243 . = 0xe60
244 b hmi_exception_hv
188 245
189 /* We need to deal with the Altivec unavailable exception 246 /* We need to deal with the Altivec unavailable exception
190 * here which is at 0xf20, thus in the middle of the 247 * here which is at 0xf20, thus in the middle of the
@@ -193,39 +250,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
193 */ 250 */
194performance_monitor_pSeries_1: 251performance_monitor_pSeries_1:
195 . = 0xf00 252 . = 0xf00
196 DO_KVM 0xf00
197 b performance_monitor_pSeries 253 b performance_monitor_pSeries
198 254
199altivec_unavailable_pSeries_1: 255altivec_unavailable_pSeries_1:
200 . = 0xf20 256 . = 0xf20
201 DO_KVM 0xf20
202 b altivec_unavailable_pSeries 257 b altivec_unavailable_pSeries
203 258
204vsx_unavailable_pSeries_1: 259vsx_unavailable_pSeries_1:
205 . = 0xf40 260 . = 0xf40
206 DO_KVM 0xf40
207 b vsx_unavailable_pSeries 261 b vsx_unavailable_pSeries
208 262
209#ifdef CONFIG_CBE_RAS 263#ifdef CONFIG_CBE_RAS
210 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) 264 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
211#endif /* CONFIG_CBE_RAS */ 265#endif /* CONFIG_CBE_RAS */
212 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 266 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
213#ifdef CONFIG_CBE_RAS 267#ifdef CONFIG_CBE_RAS
214 HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) 268 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
215#endif /* CONFIG_CBE_RAS */ 269#endif /* CONFIG_CBE_RAS */
216 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 270 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
217#ifdef CONFIG_CBE_RAS 271#ifdef CONFIG_CBE_RAS
218 HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) 272 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
219#endif /* CONFIG_CBE_RAS */ 273#endif /* CONFIG_CBE_RAS */
220 274
221 . = 0x3000 275 . = 0x3000
222 276
223/*** pSeries interrupt support ***/ 277/*** Out of line interrupts support ***/
278
279 /* moved from 0xe00 */
280 STD_EXCEPTION_HV(., 0xe00, h_data_storage)
281 STD_EXCEPTION_HV(., 0xe20, h_instr_storage)
282 STD_EXCEPTION_HV(., 0xe40, emulation_assist)
283 STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */
224 284
225 /* moved from 0xf00 */ 285 /* moved from 0xf00 */
226 STD_EXCEPTION_PSERIES(., performance_monitor) 286 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
227 STD_EXCEPTION_PSERIES(., altivec_unavailable) 287 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
228 STD_EXCEPTION_PSERIES(., vsx_unavailable) 288 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
229 289
230/* 290/*
231 * An interrupt came in while soft-disabled; clear EE in SRR1, 291 * An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -240,17 +300,30 @@ masked_interrupt:
240 rotldi r10,r10,16 300 rotldi r10,r10,16
241 mtspr SPRN_SRR1,r10 301 mtspr SPRN_SRR1,r10
242 ld r10,PACA_EXGEN+EX_R10(r13) 302 ld r10,PACA_EXGEN+EX_R10(r13)
243 mfspr r13,SPRN_SPRG_SCRATCH0 303 GET_SCRATCH0(r13)
244 rfid 304 rfid
245 b . 305 b .
246 306
307masked_Hinterrupt:
308 stb r10,PACAHARDIRQEN(r13)
309 mtcrf 0x80,r9
310 ld r9,PACA_EXGEN+EX_R9(r13)
311 mfspr r10,SPRN_HSRR1
312 rldicl r10,r10,48,1 /* clear MSR_EE */
313 rotldi r10,r10,16
314 mtspr SPRN_HSRR1,r10
315 ld r10,PACA_EXGEN+EX_R10(r13)
316 GET_SCRATCH0(r13)
317 hrfid
318 b .
319
247 .align 7 320 .align 7
248do_stab_bolted_pSeries: 321do_stab_bolted_pSeries:
249 std r11,PACA_EXSLB+EX_R11(r13) 322 std r11,PACA_EXSLB+EX_R11(r13)
250 std r12,PACA_EXSLB+EX_R12(r13) 323 std r12,PACA_EXSLB+EX_R12(r13)
251 mfspr r10,SPRN_SPRG_SCRATCH0 324 GET_SCRATCH0(r10)
252 std r10,PACA_EXSLB+EX_R13(r13) 325 std r10,PACA_EXSLB+EX_R13(r13)
253 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted) 326 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
254 327
255#ifdef CONFIG_PPC_PSERIES 328#ifdef CONFIG_PPC_PSERIES
256/* 329/*
@@ -260,15 +333,15 @@ do_stab_bolted_pSeries:
260 .align 7 333 .align 7
261system_reset_fwnmi: 334system_reset_fwnmi:
262 HMT_MEDIUM 335 HMT_MEDIUM
263 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ 336 SET_SCRATCH0(r13) /* save r13 */
264 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 337 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
265 338
266 .globl machine_check_fwnmi 339 .globl machine_check_fwnmi
267 .align 7 340 .align 7
268machine_check_fwnmi: 341machine_check_fwnmi:
269 HMT_MEDIUM 342 HMT_MEDIUM
270 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ 343 SET_SCRATCH0(r13) /* save r13 */
271 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 344 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
272 345
273#endif /* CONFIG_PPC_PSERIES */ 346#endif /* CONFIG_PPC_PSERIES */
274 347
@@ -282,7 +355,7 @@ slb_miss_user_pseries:
282 std r10,PACA_EXGEN+EX_R10(r13) 355 std r10,PACA_EXGEN+EX_R10(r13)
283 std r11,PACA_EXGEN+EX_R11(r13) 356 std r11,PACA_EXGEN+EX_R11(r13)
284 std r12,PACA_EXGEN+EX_R12(r13) 357 std r12,PACA_EXGEN+EX_R12(r13)
285 mfspr r10,SPRG_SCRATCH0 358 GET_SCRATCH0(r10)
286 ld r11,PACA_EXSLB+EX_R9(r13) 359 ld r11,PACA_EXSLB+EX_R9(r13)
287 ld r12,PACA_EXSLB+EX_R3(r13) 360 ld r12,PACA_EXSLB+EX_R3(r13)
288 std r10,PACA_EXGEN+EX_R13(r13) 361 std r10,PACA_EXGEN+EX_R13(r13)
@@ -342,6 +415,8 @@ machine_check_common:
342 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 415 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
343 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 416 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
344 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 417 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
418 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
419 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
345 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) 420 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
346 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 421 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
347#ifdef CONFIG_ALTIVEC 422#ifdef CONFIG_ALTIVEC
@@ -386,9 +461,24 @@ bad_stack:
386 std r12,_XER(r1) 461 std r12,_XER(r1)
387 SAVE_GPR(0,r1) 462 SAVE_GPR(0,r1)
388 SAVE_GPR(2,r1) 463 SAVE_GPR(2,r1)
389 SAVE_4GPRS(3,r1) 464 ld r10,EX_R3(r3)
390 SAVE_2GPRS(7,r1) 465 std r10,GPR3(r1)
391 SAVE_10GPRS(12,r1) 466 SAVE_GPR(4,r1)
467 SAVE_4GPRS(5,r1)
468 ld r9,EX_R9(r3)
469 ld r10,EX_R10(r3)
470 SAVE_2GPRS(9,r1)
471 ld r9,EX_R11(r3)
472 ld r10,EX_R12(r3)
473 ld r11,EX_R13(r3)
474 std r9,GPR11(r1)
475 std r10,GPR12(r1)
476 std r11,GPR13(r1)
477BEGIN_FTR_SECTION
478 ld r10,EX_CFAR(r3)
479 std r10,ORIG_GPR3(r1)
480END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
481 SAVE_8GPRS(14,r1)
392 SAVE_10GPRS(22,r1) 482 SAVE_10GPRS(22,r1)
393 lhz r12,PACA_TRAP_SAVE(r13) 483 lhz r12,PACA_TRAP_SAVE(r13)
394 std r12,_TRAP(r1) 484 std r12,_TRAP(r1)
@@ -397,6 +487,9 @@ bad_stack:
397 li r12,0 487 li r12,0
398 std r12,0(r11) 488 std r12,0(r11)
399 ld r2,PACATOC(r13) 489 ld r2,PACATOC(r13)
490 ld r11,exception_marker@toc(r2)
491 std r12,RESULT(r1)
492 std r11,STACK_FRAME_OVERHEAD-16(r1)
4001: addi r3,r1,STACK_FRAME_OVERHEAD 4931: addi r3,r1,STACK_FRAME_OVERHEAD
401 bl .kernel_bad_stack 494 bl .kernel_bad_stack
402 b 1b 495 b 1b
@@ -419,6 +512,19 @@ data_access_common:
419 li r5,0x300 512 li r5,0x300
420 b .do_hash_page /* Try to handle as hpte fault */ 513 b .do_hash_page /* Try to handle as hpte fault */
421 514
515 .align 7
516 .globl h_data_storage_common
517h_data_storage_common:
518 mfspr r10,SPRN_HDAR
519 std r10,PACA_EXGEN+EX_DAR(r13)
520 mfspr r10,SPRN_HDSISR
521 stw r10,PACA_EXGEN+EX_DSISR(r13)
522 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
523 bl .save_nvgprs
524 addi r3,r1,STACK_FRAME_OVERHEAD
525 bl .unknown_exception
526 b .ret_from_except
527
422 .align 7 528 .align 7
423 .globl instruction_access_common 529 .globl instruction_access_common
424instruction_access_common: 530instruction_access_common:
@@ -428,6 +534,8 @@ instruction_access_common:
428 li r5,0x400 534 li r5,0x400
429 b .do_hash_page /* Try to handle as hpte fault */ 535 b .do_hash_page /* Try to handle as hpte fault */
430 536
537 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
538
431/* 539/*
432 * Here is the common SLB miss user that is used when going to virtual 540 * Here is the common SLB miss user that is used when going to virtual
433 * mode for SLB misses, that is currently not used 541 * mode for SLB misses, that is currently not used
@@ -750,7 +858,7 @@ _STATIC(do_hash_page)
750BEGIN_FTR_SECTION 858BEGIN_FTR_SECTION
751 andis. r0,r4,0x0020 /* Is it a segment table fault? */ 859 andis. r0,r4,0x0020 /* Is it a segment table fault? */
752 bne- do_ste_alloc /* If so handle it */ 860 bne- do_ste_alloc /* If so handle it */
753END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 861END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
754 862
755 clrrdi r11,r1,THREAD_SHIFT 863 clrrdi r11,r1,THREAD_SHIFT
756 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 864 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index c5c24beb8387..ba250d505e07 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -805,19 +805,6 @@ _ENTRY(copy_and_flush)
805 blr 805 blr
806 806
807#ifdef CONFIG_SMP 807#ifdef CONFIG_SMP
808#ifdef CONFIG_GEMINI
809 .globl __secondary_start_gemini
810__secondary_start_gemini:
811 mfspr r4,SPRN_HID0
812 ori r4,r4,HID0_ICFI
813 li r3,0
814 ori r3,r3,HID0_ICE
815 andc r4,r4,r3
816 mtspr SPRN_HID0,r4
817 sync
818 b __secondary_start
819#endif /* CONFIG_GEMINI */
820
821 .globl __secondary_start_mpc86xx 808 .globl __secondary_start_mpc86xx
822__secondary_start_mpc86xx: 809__secondary_start_mpc86xx:
823 mfspr r3, SPRN_PIR 810 mfspr r3, SPRN_PIR
@@ -890,15 +877,6 @@ __secondary_start:
890 mtspr SPRN_SRR1,r4 877 mtspr SPRN_SRR1,r4
891 SYNC 878 SYNC
892 RFI 879 RFI
893
894_GLOBAL(start_secondary_resume)
895 /* Reset stack */
896 rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
897 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
898 li r3,0
899 std r3,0(r1) /* Zero the stack frame pointer */
900 bl start_secondary
901 b .
902#endif /* CONFIG_SMP */ 880#endif /* CONFIG_SMP */
903 881
904#ifdef CONFIG_KVM_BOOK3S_HANDLER 882#ifdef CONFIG_KVM_BOOK3S_HANDLER
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3a319f9c9d3e..ba504099844a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -147,6 +147,8 @@ __secondary_hold:
147 mtctr r4 147 mtctr r4
148 mr r3,r24 148 mr r3,r24
149 li r4,0 149 li r4,0
150 /* Make sure that patched code is visible */
151 isync
150 bctr 152 bctr
151#else 153#else
152 BUG_OPCODE 154 BUG_OPCODE
@@ -216,19 +218,25 @@ generic_secondary_common_init:
216 */ 218 */
217 LOAD_REG_ADDR(r13, paca) /* Load paca pointer */ 219 LOAD_REG_ADDR(r13, paca) /* Load paca pointer */
218 ld r13,0(r13) /* Get base vaddr of paca array */ 220 ld r13,0(r13) /* Get base vaddr of paca array */
221#ifndef CONFIG_SMP
222 addi r13,r13,PACA_SIZE /* know r13 if used accidentally */
223 b .kexec_wait /* wait for next kernel if !SMP */
224#else
225 LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
226 lwz r7,0(r7) /* also the max paca allocated */
219 li r5,0 /* logical cpu id */ 227 li r5,0 /* logical cpu id */
2201: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ 2281: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
221 cmpw r6,r24 /* Compare to our id */ 229 cmpw r6,r24 /* Compare to our id */
222 beq 2f 230 beq 2f
223 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ 231 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
224 addi r5,r5,1 232 addi r5,r5,1
225 cmpwi r5,NR_CPUS 233 cmpw r5,r7 /* Check if more pacas exist */
226 blt 1b 234 blt 1b
227 235
228 mr r3,r24 /* not found, copy phys to r3 */ 236 mr r3,r24 /* not found, copy phys to r3 */
229 b .kexec_wait /* next kernel might do better */ 237 b .kexec_wait /* next kernel might do better */
230 238
2312: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */ 2392: SET_PACA(r13)
232#ifdef CONFIG_PPC_BOOK3E 240#ifdef CONFIG_PPC_BOOK3E
233 addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ 241 addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */
234 mtspr SPRN_SPRG_TLB_EXFRAME,r12 242 mtspr SPRN_SPRG_TLB_EXFRAME,r12
@@ -236,34 +244,39 @@ generic_secondary_common_init:
236 244
237 /* From now on, r24 is expected to be logical cpuid */ 245 /* From now on, r24 is expected to be logical cpuid */
238 mr r24,r5 246 mr r24,r5
2393: HMT_LOW
240 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
241 /* start. */
242
243#ifndef CONFIG_SMP
244 b 3b /* Never go on non-SMP */
245#else
246 cmpwi 0,r23,0
247 beq 3b /* Loop until told to go */
248
249 sync /* order paca.run and cur_cpu_spec */
250 247
251 /* See if we need to call a cpu state restore handler */ 248 /* See if we need to call a cpu state restore handler */
252 LOAD_REG_ADDR(r23, cur_cpu_spec) 249 LOAD_REG_ADDR(r23, cur_cpu_spec)
253 ld r23,0(r23) 250 ld r23,0(r23)
254 ld r23,CPU_SPEC_RESTORE(r23) 251 ld r23,CPU_SPEC_RESTORE(r23)
255 cmpdi 0,r23,0 252 cmpdi 0,r23,0
256 beq 4f 253 beq 3f
257 ld r23,0(r23) 254 ld r23,0(r23)
258 mtctr r23 255 mtctr r23
259 bctrl 256 bctrl
260 257
2614: /* Create a temp kernel stack for use before relocation is on. */ 2583: LOAD_REG_ADDR(r3, boot_cpu_count) /* Decrement boot_cpu_count */
259 lwarx r4,0,r3
260 subi r4,r4,1
261 stwcx. r4,0,r3
262 bne 3b
263 isync
264
2654: HMT_LOW
266 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
267 /* start. */
268 cmpwi 0,r23,0
269 beq 4b /* Loop until told to go */
270
271 sync /* order paca.run and cur_cpu_spec */
272 isync /* In case code patching happened */
273
274 /* Create a temp kernel stack for use before relocation is on. */
262 ld r1,PACAEMERGSP(r13) 275 ld r1,PACAEMERGSP(r13)
263 subi r1,r1,STACK_FRAME_OVERHEAD 276 subi r1,r1,STACK_FRAME_OVERHEAD
264 277
265 b __secondary_start 278 b __secondary_start
266#endif 279#endif /* SMP */
267 280
268/* 281/*
269 * Turn the MMU off. 282 * Turn the MMU off.
@@ -534,7 +547,7 @@ _GLOBAL(pmac_secondary_start)
534 ld r4,0(r4) /* Get base vaddr of paca array */ 547 ld r4,0(r4) /* Get base vaddr of paca array */
535 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 548 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
536 add r13,r13,r4 /* for this processor. */ 549 add r13,r13,r4 /* for this processor. */
537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ 550 SET_PACA(r13) /* Save vaddr of paca in an SPRG*/
538 551
539 /* Mark interrupts soft and hard disabled (they might be enabled 552 /* Mark interrupts soft and hard disabled (they might be enabled
540 * in the PACA when doing hotplug) 553 * in the PACA when doing hotplug)
@@ -645,7 +658,7 @@ _GLOBAL(enable_64b_mode)
645 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ 658 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
646 mtmsr r11 659 mtmsr r11
647#else /* CONFIG_PPC_BOOK3E */ 660#else /* CONFIG_PPC_BOOK3E */
648 li r12,(MSR_SF | MSR_ISF)@highest 661 li r12,(MSR_64BIT | MSR_ISF)@highest
649 sldi r12,r12,48 662 sldi r12,r12,48
650 or r11,r11,r12 663 or r11,r11,r12
651 mtmsrd r11 664 mtmsrd r11
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
new file mode 100644
index 000000000000..f8f0bc7f1d4f
--- /dev/null
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -0,0 +1,97 @@
1/*
2 * This file contains the power_save function for 970-family CPUs.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/threads.h>
11#include <asm/processor.h>
12#include <asm/page.h>
13#include <asm/cputable.h>
14#include <asm/thread_info.h>
15#include <asm/ppc_asm.h>
16#include <asm/asm-offsets.h>
17#include <asm/ppc-opcode.h>
18
19#undef DEBUG
20
21 .text
22
23_GLOBAL(power7_idle)
24 /* Now check if user or arch enabled NAP mode */
25 LOAD_REG_ADDRBASE(r3,powersave_nap)
26 lwz r4,ADDROFF(powersave_nap)(r3)
27 cmpwi 0,r4,0
28 beqlr
29
30 /* NAP is a state loss, we create a regs frame on the
31 * stack, fill it up with the state we care about and
32 * stick a pointer to it in PACAR1. We really only
33 * need to save PC, some CR bits and the NV GPRs,
34 * but for now an interrupt frame will do.
35 */
36 mflr r0
37 std r0,16(r1)
38 stdu r1,-INT_FRAME_SIZE(r1)
39 std r0,_LINK(r1)
40 std r0,_NIP(r1)
41
42#ifndef CONFIG_SMP
43 /* Make sure FPU, VSX etc... are flushed as we may lose
44 * state when going to nap mode
45 */
46 bl .discard_lazy_cpu_state
47#endif /* CONFIG_SMP */
48
49 /* Hard disable interrupts */
50 mfmsr r9
51 rldicl r9,r9,48,1
52 rotldi r9,r9,16
53 mtmsrd r9,1 /* hard-disable interrupts */
54 li r0,0
55 stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
56 stb r0,PACAHARDIRQEN(r13)
57
58 /* Continue saving state */
59 SAVE_GPR(2, r1)
60 SAVE_NVGPRS(r1)
61 mfcr r3
62 std r3,_CCR(r1)
63 std r9,_MSR(r1)
64 std r1,PACAR1(r13)
65
66 /* Magic NAP mode enter sequence */
67 std r0,0(r1)
68 ptesync
69 ld r0,0(r1)
701: cmp cr0,r0,r0
71 bne 1b
72 PPC_NAP
73 b .
74
75_GLOBAL(power7_wakeup_loss)
76 GET_PACA(r13)
77 ld r1,PACAR1(r13)
78 REST_NVGPRS(r1)
79 REST_GPR(2, r1)
80 ld r3,_CCR(r1)
81 ld r4,_MSR(r1)
82 ld r5,_NIP(r1)
83 addi r1,r1,INT_FRAME_SIZE
84 mtcr r3
85 mtspr SPRN_SRR1,r4
86 mtspr SPRN_SRR0,r5
87 rfid
88
89_GLOBAL(power7_wakeup_noloss)
90 GET_PACA(r13)
91 ld r1,PACAR1(r13)
92 ld r4,_MSR(r1)
93 ld r5,_NIP(r1)
94 addi r1,r1,INT_FRAME_SIZE
95 mtspr SPRN_SRR1,r4
96 mtspr SPRN_SRR0,r5
97 rfid
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 5c1118e31940..ffafaea3d261 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -17,8 +17,7 @@
17#include <asm/machdep.h> 17#include <asm/machdep.h>
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/ppc-pci.h> 19#include <asm/ppc-pci.h>
20 20#include <asm/io-workarounds.h>
21#include "io-workarounds.h"
22 21
23#define IOWA_MAX_BUS 8 22#define IOWA_MAX_BUS 8
24 23
@@ -145,7 +144,19 @@ static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
145 return res; 144 return res;
146} 145}
147 146
148/* Regist new bus to support workaround */ 147/* Enable IO workaround */
148static void __devinit io_workaround_init(void)
149{
150 static int io_workaround_inited;
151
152 if (io_workaround_inited)
153 return;
154 ppc_pci_io = iowa_pci_io;
155 ppc_md.ioremap = iowa_ioremap;
156 io_workaround_inited = 1;
157}
158
159/* Register new bus to support workaround */
149void __devinit iowa_register_bus(struct pci_controller *phb, 160void __devinit iowa_register_bus(struct pci_controller *phb,
150 struct ppc_pci_io *ops, 161 struct ppc_pci_io *ops,
151 int (*initfunc)(struct iowa_bus *, void *), void *data) 162 int (*initfunc)(struct iowa_bus *, void *), void *data)
@@ -153,6 +164,8 @@ void __devinit iowa_register_bus(struct pci_controller *phb,
153 struct iowa_bus *bus; 164 struct iowa_bus *bus;
154 struct device_node *np = phb->dn; 165 struct device_node *np = phb->dn;
155 166
167 io_workaround_init();
168
156 if (iowa_bus_count >= IOWA_MAX_BUS) { 169 if (iowa_bus_count >= IOWA_MAX_BUS) {
157 pr_err("IOWA:Too many pci bridges, " 170 pr_err("IOWA:Too many pci bridges, "
158 "workarounds disabled for %s\n", np->full_name); 171 "workarounds disabled for %s\n", np->full_name);
@@ -162,6 +175,7 @@ void __devinit iowa_register_bus(struct pci_controller *phb,
162 bus = &iowa_busses[iowa_bus_count]; 175 bus = &iowa_busses[iowa_bus_count];
163 bus->phb = phb; 176 bus->phb = phb;
164 bus->ops = ops; 177 bus->ops = ops;
178 bus->private = data;
165 179
166 if (initfunc) 180 if (initfunc)
167 if ((*initfunc)(bus, data)) 181 if ((*initfunc)(bus, data))
@@ -172,14 +186,3 @@ void __devinit iowa_register_bus(struct pci_controller *phb,
172 pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name); 186 pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name);
173} 187}
174 188
175/* enable IO workaround */
176void __devinit io_workaround_init(void)
177{
178 static int io_workaround_inited;
179
180 if (io_workaround_inited)
181 return;
182 ppc_pci_io = iowa_pci_io;
183 ppc_md.ioremap = iowa_ioremap;
184 io_workaround_inited = 1;
185}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f621b7d2d869..a24d37d4cf51 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -66,7 +66,6 @@
66#include <asm/ptrace.h> 66#include <asm/ptrace.h>
67#include <asm/machdep.h> 67#include <asm/machdep.h>
68#include <asm/udbg.h> 68#include <asm/udbg.h>
69#include <asm/dbell.h>
70#include <asm/smp.h> 69#include <asm/smp.h>
71 70
72#ifdef CONFIG_PPC64 71#ifdef CONFIG_PPC64
@@ -160,7 +159,8 @@ notrace void arch_local_irq_restore(unsigned long en)
160 159
161#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) 160#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
162 /* Check for pending doorbell interrupts and resend to ourself */ 161 /* Check for pending doorbell interrupts and resend to ourself */
163 doorbell_check_self(); 162 if (cpu_has_feature(CPU_FTR_DBELL))
163 smp_muxed_ipi_resend();
164#endif 164#endif
165 165
166 /* 166 /*
@@ -397,24 +397,28 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
397void exc_lvl_ctx_init(void) 397void exc_lvl_ctx_init(void)
398{ 398{
399 struct thread_info *tp; 399 struct thread_info *tp;
400 int i, hw_cpu; 400 int i, cpu_nr;
401 401
402 for_each_possible_cpu(i) { 402 for_each_possible_cpu(i) {
403 hw_cpu = get_hard_smp_processor_id(i); 403#ifdef CONFIG_PPC64
404 memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE); 404 cpu_nr = i;
405 tp = critirq_ctx[hw_cpu]; 405#else
406 tp->cpu = i; 406 cpu_nr = get_hard_smp_processor_id(i);
407#endif
408 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
409 tp = critirq_ctx[cpu_nr];
410 tp->cpu = cpu_nr;
407 tp->preempt_count = 0; 411 tp->preempt_count = 0;
408 412
409#ifdef CONFIG_BOOKE 413#ifdef CONFIG_BOOKE
410 memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE); 414 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
411 tp = dbgirq_ctx[hw_cpu]; 415 tp = dbgirq_ctx[cpu_nr];
412 tp->cpu = i; 416 tp->cpu = cpu_nr;
413 tp->preempt_count = 0; 417 tp->preempt_count = 0;
414 418
415 memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE); 419 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
416 tp = mcheckirq_ctx[hw_cpu]; 420 tp = mcheckirq_ctx[cpu_nr];
417 tp->cpu = i; 421 tp->cpu = cpu_nr;
418 tp->preempt_count = HARDIRQ_OFFSET; 422 tp->preempt_count = HARDIRQ_OFFSET;
419#endif 423#endif
420 } 424 }
@@ -477,20 +481,41 @@ void do_softirq(void)
477 * IRQ controller and virtual interrupts 481 * IRQ controller and virtual interrupts
478 */ 482 */
479 483
484/* The main irq map itself is an array of NR_IRQ entries containing the
485 * associate host and irq number. An entry with a host of NULL is free.
486 * An entry can be allocated if it's free, the allocator always then sets
487 * hwirq first to the host's invalid irq number and then fills ops.
488 */
489struct irq_map_entry {
490 irq_hw_number_t hwirq;
491 struct irq_host *host;
492};
493
480static LIST_HEAD(irq_hosts); 494static LIST_HEAD(irq_hosts);
481static DEFINE_RAW_SPINLOCK(irq_big_lock); 495static DEFINE_RAW_SPINLOCK(irq_big_lock);
482static unsigned int revmap_trees_allocated;
483static DEFINE_MUTEX(revmap_trees_mutex); 496static DEFINE_MUTEX(revmap_trees_mutex);
484struct irq_map_entry irq_map[NR_IRQS]; 497static struct irq_map_entry irq_map[NR_IRQS];
485static unsigned int irq_virq_count = NR_IRQS; 498static unsigned int irq_virq_count = NR_IRQS;
486static struct irq_host *irq_default_host; 499static struct irq_host *irq_default_host;
487 500
501irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
502{
503 return irq_map[d->irq].hwirq;
504}
505EXPORT_SYMBOL_GPL(irqd_to_hwirq);
506
488irq_hw_number_t virq_to_hw(unsigned int virq) 507irq_hw_number_t virq_to_hw(unsigned int virq)
489{ 508{
490 return irq_map[virq].hwirq; 509 return irq_map[virq].hwirq;
491} 510}
492EXPORT_SYMBOL_GPL(virq_to_hw); 511EXPORT_SYMBOL_GPL(virq_to_hw);
493 512
513bool virq_is_host(unsigned int virq, struct irq_host *host)
514{
515 return irq_map[virq].host == host;
516}
517EXPORT_SYMBOL_GPL(virq_is_host);
518
494static int default_irq_host_match(struct irq_host *h, struct device_node *np) 519static int default_irq_host_match(struct irq_host *h, struct device_node *np)
495{ 520{
496 return h->of_node != NULL && h->of_node == np; 521 return h->of_node != NULL && h->of_node == np;
@@ -511,7 +536,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
511 /* Allocate structure and revmap table if using linear mapping */ 536 /* Allocate structure and revmap table if using linear mapping */
512 if (revmap_type == IRQ_HOST_MAP_LINEAR) 537 if (revmap_type == IRQ_HOST_MAP_LINEAR)
513 size += revmap_arg * sizeof(unsigned int); 538 size += revmap_arg * sizeof(unsigned int);
514 host = zalloc_maybe_bootmem(size, GFP_KERNEL); 539 host = kzalloc(size, GFP_KERNEL);
515 if (host == NULL) 540 if (host == NULL)
516 return NULL; 541 return NULL;
517 542
@@ -561,14 +586,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
561 irq_map[i].host = host; 586 irq_map[i].host = host;
562 smp_wmb(); 587 smp_wmb();
563 588
564 /* Clear norequest flags */
565 irq_clear_status_flags(i, IRQ_NOREQUEST);
566
567 /* Legacy flags are left to default at this point, 589 /* Legacy flags are left to default at this point,
568 * one can then use irq_create_mapping() to 590 * one can then use irq_create_mapping() to
569 * explicitly change them 591 * explicitly change them
570 */ 592 */
571 ops->map(host, i, i); 593 ops->map(host, i, i);
594
595 /* Clear norequest flags */
596 irq_clear_status_flags(i, IRQ_NOREQUEST);
572 } 597 }
573 break; 598 break;
574 case IRQ_HOST_MAP_LINEAR: 599 case IRQ_HOST_MAP_LINEAR:
@@ -579,6 +604,9 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
579 smp_wmb(); 604 smp_wmb();
580 host->revmap_data.linear.revmap = rmap; 605 host->revmap_data.linear.revmap = rmap;
581 break; 606 break;
607 case IRQ_HOST_MAP_TREE:
608 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
609 break;
582 default: 610 default:
583 break; 611 break;
584 } 612 }
@@ -636,8 +664,6 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
636 goto error; 664 goto error;
637 } 665 }
638 666
639 irq_clear_status_flags(virq, IRQ_NOREQUEST);
640
641 /* map it */ 667 /* map it */
642 smp_wmb(); 668 smp_wmb();
643 irq_map[virq].hwirq = hwirq; 669 irq_map[virq].hwirq = hwirq;
@@ -648,6 +674,8 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
648 goto errdesc; 674 goto errdesc;
649 } 675 }
650 676
677 irq_clear_status_flags(virq, IRQ_NOREQUEST);
678
651 return 0; 679 return 0;
652 680
653errdesc: 681errdesc:
@@ -704,8 +732,6 @@ unsigned int irq_create_mapping(struct irq_host *host,
704 */ 732 */
705 virq = irq_find_mapping(host, hwirq); 733 virq = irq_find_mapping(host, hwirq);
706 if (virq != NO_IRQ) { 734 if (virq != NO_IRQ) {
707 if (host->ops->remap)
708 host->ops->remap(host, virq, hwirq);
709 pr_debug("irq: -> existing mapping on virq %d\n", virq); 735 pr_debug("irq: -> existing mapping on virq %d\n", virq);
710 return virq; 736 return virq;
711 } 737 }
@@ -786,14 +812,15 @@ void irq_dispose_mapping(unsigned int virq)
786 return; 812 return;
787 813
788 host = irq_map[virq].host; 814 host = irq_map[virq].host;
789 WARN_ON (host == NULL); 815 if (WARN_ON(host == NULL))
790 if (host == NULL)
791 return; 816 return;
792 817
793 /* Never unmap legacy interrupts */ 818 /* Never unmap legacy interrupts */
794 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 819 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
795 return; 820 return;
796 821
822 irq_set_status_flags(virq, IRQ_NOREQUEST);
823
797 /* remove chip and handler */ 824 /* remove chip and handler */
798 irq_set_chip_and_handler(virq, NULL, NULL); 825 irq_set_chip_and_handler(virq, NULL, NULL);
799 826
@@ -813,13 +840,6 @@ void irq_dispose_mapping(unsigned int virq)
813 host->revmap_data.linear.revmap[hwirq] = NO_IRQ; 840 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
814 break; 841 break;
815 case IRQ_HOST_MAP_TREE: 842 case IRQ_HOST_MAP_TREE:
816 /*
817 * Check if radix tree allocated yet, if not then nothing to
818 * remove.
819 */
820 smp_rmb();
821 if (revmap_trees_allocated < 1)
822 break;
823 mutex_lock(&revmap_trees_mutex); 843 mutex_lock(&revmap_trees_mutex);
824 radix_tree_delete(&host->revmap_data.tree, hwirq); 844 radix_tree_delete(&host->revmap_data.tree, hwirq);
825 mutex_unlock(&revmap_trees_mutex); 845 mutex_unlock(&revmap_trees_mutex);
@@ -830,8 +850,6 @@ void irq_dispose_mapping(unsigned int virq)
830 smp_mb(); 850 smp_mb();
831 irq_map[virq].hwirq = host->inval_irq; 851 irq_map[virq].hwirq = host->inval_irq;
832 852
833 irq_set_status_flags(virq, IRQ_NOREQUEST);
834
835 irq_free_descs(virq, 1); 853 irq_free_descs(virq, 1);
836 /* Free it */ 854 /* Free it */
837 irq_free_virt(virq, 1); 855 irq_free_virt(virq, 1);
@@ -877,16 +895,9 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
877 struct irq_map_entry *ptr; 895 struct irq_map_entry *ptr;
878 unsigned int virq; 896 unsigned int virq;
879 897
880 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 898 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
881
882 /*
883 * Check if the radix tree exists and has bee initialized.
884 * If not, we fallback to slow mode
885 */
886 if (revmap_trees_allocated < 2)
887 return irq_find_mapping(host, hwirq); 899 return irq_find_mapping(host, hwirq);
888 900
889 /* Now try to resolve */
890 /* 901 /*
891 * No rcu_read_lock(ing) needed, the ptr returned can't go under us 902 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
892 * as it's referencing an entry in the static irq_map table. 903 * as it's referencing an entry in the static irq_map table.
@@ -909,16 +920,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
909void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, 920void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
910 irq_hw_number_t hwirq) 921 irq_hw_number_t hwirq)
911{ 922{
912 923 if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
913 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
914
915 /*
916 * Check if the radix tree exists yet.
917 * If not, then the irq will be inserted into the tree when it gets
918 * initialized.
919 */
920 smp_rmb();
921 if (revmap_trees_allocated < 1)
922 return; 924 return;
923 925
924 if (virq != NO_IRQ) { 926 if (virq != NO_IRQ) {
@@ -934,7 +936,8 @@ unsigned int irq_linear_revmap(struct irq_host *host,
934{ 936{
935 unsigned int *revmap; 937 unsigned int *revmap;
936 938
937 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); 939 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
940 return irq_find_mapping(host, hwirq);
938 941
939 /* Check revmap bounds */ 942 /* Check revmap bounds */
940 if (unlikely(hwirq >= host->revmap_data.linear.size)) 943 if (unlikely(hwirq >= host->revmap_data.linear.size))
@@ -1028,53 +1031,6 @@ int arch_early_irq_init(void)
1028 return 0; 1031 return 0;
1029} 1032}
1030 1033
1031/* We need to create the radix trees late */
1032static int irq_late_init(void)
1033{
1034 struct irq_host *h;
1035 unsigned int i;
1036
1037 /*
1038 * No mutual exclusion with respect to accessors of the tree is needed
1039 * here as the synchronization is done via the state variable
1040 * revmap_trees_allocated.
1041 */
1042 list_for_each_entry(h, &irq_hosts, link) {
1043 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1044 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1045 }
1046
1047 /*
1048 * Make sure the radix trees inits are visible before setting
1049 * the flag
1050 */
1051 smp_wmb();
1052 revmap_trees_allocated = 1;
1053
1054 /*
1055 * Insert the reverse mapping for those interrupts already present
1056 * in irq_map[].
1057 */
1058 mutex_lock(&revmap_trees_mutex);
1059 for (i = 0; i < irq_virq_count; i++) {
1060 if (irq_map[i].host &&
1061 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1062 radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1063 irq_map[i].hwirq, &irq_map[i]);
1064 }
1065 mutex_unlock(&revmap_trees_mutex);
1066
1067 /*
1068 * Make sure the radix trees insertions are visible before setting
1069 * the flag
1070 */
1071 smp_wmb();
1072 revmap_trees_allocated = 2;
1073
1074 return 0;
1075}
1076arch_initcall(irq_late_init);
1077
1078#ifdef CONFIG_VIRQ_DEBUG 1034#ifdef CONFIG_VIRQ_DEBUG
1079static int virq_debug_show(struct seq_file *m, void *private) 1035static int virq_debug_show(struct seq_file *m, void *private)
1080{ 1036{
@@ -1082,10 +1038,11 @@ static int virq_debug_show(struct seq_file *m, void *private)
1082 struct irq_desc *desc; 1038 struct irq_desc *desc;
1083 const char *p; 1039 const char *p;
1084 static const char none[] = "none"; 1040 static const char none[] = "none";
1041 void *data;
1085 int i; 1042 int i;
1086 1043
1087 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", 1044 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
1088 "chip name", "host name"); 1045 "chip name", "chip data", "host name");
1089 1046
1090 for (i = 1; i < nr_irqs; i++) { 1047 for (i = 1; i < nr_irqs; i++) {
1091 desc = irq_to_desc(i); 1048 desc = irq_to_desc(i);
@@ -1098,7 +1055,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
1098 struct irq_chip *chip; 1055 struct irq_chip *chip;
1099 1056
1100 seq_printf(m, "%5d ", i); 1057 seq_printf(m, "%5d ", i);
1101 seq_printf(m, "0x%05lx ", virq_to_hw(i)); 1058 seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
1102 1059
1103 chip = irq_desc_get_chip(desc); 1060 chip = irq_desc_get_chip(desc);
1104 if (chip && chip->name) 1061 if (chip && chip->name)
@@ -1107,6 +1064,9 @@ static int virq_debug_show(struct seq_file *m, void *private)
1107 p = none; 1064 p = none;
1108 seq_printf(m, "%-15s ", p); 1065 seq_printf(m, "%-15s ", p);
1109 1066
1067 data = irq_desc_get_chip_data(desc);
1068 seq_printf(m, "0x%16p ", data);
1069
1110 if (irq_map[i].host && irq_map[i].host->of_node) 1070 if (irq_map[i].host && irq_map[i].host->of_node)
1111 p = irq_map[i].host->of_node->full_name; 1071 p = irq_map[i].host->of_node->full_name;
1112 else 1072 else
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 42850ee00ada..bd9d35f59cf4 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -109,7 +109,7 @@ static int kgdb_call_nmi_hook(struct pt_regs *regs)
109#ifdef CONFIG_SMP 109#ifdef CONFIG_SMP
110void kgdb_roundup_cpus(unsigned long flags) 110void kgdb_roundup_cpus(unsigned long flags)
111{ 111{
112 smp_send_debugger_break(MSG_ALL_BUT_SELF); 112 smp_send_debugger_break();
113} 113}
114#endif 114#endif
115 115
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 301db65f05a1..84daabe2fcba 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -132,34 +132,6 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v)
132/* 132/*
133 * Methods used to fetch LPAR data when running on a pSeries platform. 133 * Methods used to fetch LPAR data when running on a pSeries platform.
134 */ 134 */
135/**
136 * h_get_mpp
137 * H_GET_MPP hcall returns info in 7 parms
138 */
139int h_get_mpp(struct hvcall_mpp_data *mpp_data)
140{
141 int rc;
142 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
143
144 rc = plpar_hcall9(H_GET_MPP, retbuf);
145
146 mpp_data->entitled_mem = retbuf[0];
147 mpp_data->mapped_mem = retbuf[1];
148
149 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
150 mpp_data->pool_num = retbuf[2] & 0xffff;
151
152 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
153 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
154 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
155
156 mpp_data->pool_size = retbuf[4];
157 mpp_data->loan_request = retbuf[5];
158 mpp_data->backing_mem = retbuf[6];
159
160 return rc;
161}
162EXPORT_SYMBOL(h_get_mpp);
163 135
164struct hvcall_ppp_data { 136struct hvcall_ppp_data {
165 u64 entitlement; 137 u64 entitlement;
@@ -345,6 +317,30 @@ static void parse_mpp_data(struct seq_file *m)
345 seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem); 317 seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
346} 318}
347 319
320/**
321 * parse_mpp_x_data
322 * Parse out data returned from h_get_mpp_x
323 */
324static void parse_mpp_x_data(struct seq_file *m)
325{
326 struct hvcall_mpp_x_data mpp_x_data;
327
328 if (!firmware_has_feature(FW_FEATURE_XCMO))
329 return;
330 if (h_get_mpp_x(&mpp_x_data))
331 return;
332
333 seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes);
334
335 if (mpp_x_data.pool_coalesced_bytes)
336 seq_printf(m, "pool_coalesced_bytes=%ld\n",
337 mpp_x_data.pool_coalesced_bytes);
338 if (mpp_x_data.pool_purr_cycles)
339 seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles);
340 if (mpp_x_data.pool_spurr_cycles)
341 seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);
342}
343
348#define SPLPAR_CHARACTERISTICS_TOKEN 20 344#define SPLPAR_CHARACTERISTICS_TOKEN 20
349#define SPLPAR_MAXLENGTH 1026*(sizeof(char)) 345#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
350 346
@@ -520,6 +516,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
520 parse_system_parameter_string(m); 516 parse_system_parameter_string(m);
521 parse_ppp_data(m); 517 parse_ppp_data(m);
522 parse_mpp_data(m); 518 parse_mpp_data(m);
519 parse_mpp_x_data(m);
523 pseries_cmo_data(m); 520 pseries_cmo_data(m);
524 splpar_dispatch_data(m); 521 splpar_dispatch_data(m);
525 522
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 094bd9821ad4..402560e957bd 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -694,6 +694,17 @@ _GLOBAL(kernel_thread)
694 addi r1,r1,16 694 addi r1,r1,16
695 blr 695 blr
696 696
697#ifdef CONFIG_SMP
698_GLOBAL(start_secondary_resume)
699 /* Reset stack */
700 rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
701 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
702 li r3,0
703 std r3,0(r1) /* Zero the stack frame pointer */
704 bl start_secondary
705 b .
706#endif /* CONFIG_SMP */
707
697/* 708/*
698 * This routine is just here to keep GCC happy - sigh... 709 * This routine is just here to keep GCC happy - sigh...
699 */ 710 */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 206a321a71d3..e89df59cdc5a 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -462,7 +462,8 @@ _GLOBAL(disable_kernel_fp)
462 * wait for the flag to change, indicating this kernel is going away but 462 * wait for the flag to change, indicating this kernel is going away but
463 * the slave code for the next one is at addresses 0 to 100. 463 * the slave code for the next one is at addresses 0 to 100.
464 * 464 *
465 * This is used by all slaves. 465 * This is used by all slaves, even those that did not find a matching
466 * paca in the secondary startup code.
466 * 467 *
467 * Physical (hardware) cpu id should be in r3. 468 * Physical (hardware) cpu id should be in r3.
468 */ 469 */
@@ -471,10 +472,6 @@ _GLOBAL(kexec_wait)
4711: mflr r5 4721: mflr r5
472 addi r5,r5,kexec_flag-1b 473 addi r5,r5,kexec_flag-1b
473 474
474 li r4,KEXEC_STATE_REAL_MODE
475 stb r4,PACAKEXECSTATE(r13)
476 SYNC
477
47899: HMT_LOW 47599: HMT_LOW
479#ifdef CONFIG_KEXEC /* use no memory without kexec */ 476#ifdef CONFIG_KEXEC /* use no memory without kexec */
480 lwz r4,0(r5) 477 lwz r4,0(r5)
@@ -499,11 +496,17 @@ kexec_flag:
499 * 496 *
500 * get phys id from paca 497 * get phys id from paca
501 * switch to real mode 498 * switch to real mode
499 * mark the paca as no longer used
502 * join other cpus in kexec_wait(phys_id) 500 * join other cpus in kexec_wait(phys_id)
503 */ 501 */
504_GLOBAL(kexec_smp_wait) 502_GLOBAL(kexec_smp_wait)
505 lhz r3,PACAHWCPUID(r13) 503 lhz r3,PACAHWCPUID(r13)
506 bl real_mode 504 bl real_mode
505
506 li r4,KEXEC_STATE_REAL_MODE
507 stb r4,PACAKEXECSTATE(r13)
508 SYNC
509
507 b .kexec_wait 510 b .kexec_wait
508 511
509/* 512/*
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 10f0aadee95b..efeb88184182 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -7,7 +7,7 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 9
10#include <linux/threads.h> 10#include <linux/smp.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/memblock.h> 12#include <linux/memblock.h>
13 13
@@ -156,18 +156,29 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
156/* Put the paca pointer into r13 and SPRG_PACA */ 156/* Put the paca pointer into r13 and SPRG_PACA */
157void setup_paca(struct paca_struct *new_paca) 157void setup_paca(struct paca_struct *new_paca)
158{ 158{
159 /* Setup r13 */
159 local_paca = new_paca; 160 local_paca = new_paca;
160 mtspr(SPRN_SPRG_PACA, local_paca); 161
161#ifdef CONFIG_PPC_BOOK3E 162#ifdef CONFIG_PPC_BOOK3E
163 /* On Book3E, initialize the TLB miss exception frames */
162 mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); 164 mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
165#else
166 /* In HV mode, we setup both HPACA and PACA to avoid problems
167 * if we do a GET_PACA() before the feature fixups have been
168 * applied
169 */
170 if (cpu_has_feature(CPU_FTR_HVMODE_206))
171 mtspr(SPRN_SPRG_HPACA, local_paca);
163#endif 172#endif
173 mtspr(SPRN_SPRG_PACA, local_paca);
174
164} 175}
165 176
166static int __initdata paca_size; 177static int __initdata paca_size;
167 178
168void __init allocate_pacas(void) 179void __init allocate_pacas(void)
169{ 180{
170 int nr_cpus, cpu, limit; 181 int cpu, limit;
171 182
172 /* 183 /*
173 * We can't take SLB misses on the paca, and we want to access them 184 * We can't take SLB misses on the paca, and we want to access them
@@ -179,23 +190,18 @@ void __init allocate_pacas(void)
179 if (firmware_has_feature(FW_FEATURE_ISERIES)) 190 if (firmware_has_feature(FW_FEATURE_ISERIES))
180 limit = min(limit, HvPagesToMap * HVPAGESIZE); 191 limit = min(limit, HvPagesToMap * HVPAGESIZE);
181 192
182 nr_cpus = NR_CPUS; 193 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
183 /* On iSeries we know we can never have more than 64 cpus */
184 if (firmware_has_feature(FW_FEATURE_ISERIES))
185 nr_cpus = min(64, nr_cpus);
186
187 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);
188 194
189 paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); 195 paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
190 memset(paca, 0, paca_size); 196 memset(paca, 0, paca_size);
191 197
192 printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", 198 printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
193 paca_size, nr_cpus, paca); 199 paca_size, nr_cpu_ids, paca);
194 200
195 allocate_lppacas(nr_cpus, limit); 201 allocate_lppacas(nr_cpu_ids, limit);
196 202
197 /* Can't use for_each_*_cpu, as they aren't functional yet */ 203 /* Can't use for_each_*_cpu, as they aren't functional yet */
198 for (cpu = 0; cpu < nr_cpus; cpu++) 204 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
199 initialise_paca(&paca[cpu], cpu); 205 initialise_paca(&paca[cpu], cpu);
200} 206}
201 207
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index d225d99fe39d..6baabc13306a 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -43,10 +43,9 @@ void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
43 const u32 *regs; 43 const u32 *regs;
44 struct pci_dn *pdn; 44 struct pci_dn *pdn;
45 45
46 pdn = alloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); 46 pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
47 if (pdn == NULL) 47 if (pdn == NULL)
48 return NULL; 48 return NULL;
49 memset(pdn, 0, sizeof(*pdn));
50 dn->data = pdn; 49 dn->data = pdn;
51 pdn->node = dn; 50 pdn->node = dn;
52 pdn->phb = phb; 51 pdn->phb = phb;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ef3ef566235e..7d28f540200c 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -54,7 +54,6 @@ extern void single_step_exception(struct pt_regs *regs);
54extern int sys_sigreturn(struct pt_regs *regs); 54extern int sys_sigreturn(struct pt_regs *regs);
55 55
56EXPORT_SYMBOL(clear_pages); 56EXPORT_SYMBOL(clear_pages);
57EXPORT_SYMBOL(copy_page);
58EXPORT_SYMBOL(ISA_DMA_THRESHOLD); 57EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
59EXPORT_SYMBOL(DMA_MODE_READ); 58EXPORT_SYMBOL(DMA_MODE_READ);
60EXPORT_SYMBOL(DMA_MODE_WRITE); 59EXPORT_SYMBOL(DMA_MODE_WRITE);
@@ -88,9 +87,7 @@ EXPORT_SYMBOL(__copy_tofrom_user);
88EXPORT_SYMBOL(__clear_user); 87EXPORT_SYMBOL(__clear_user);
89EXPORT_SYMBOL(__strncpy_from_user); 88EXPORT_SYMBOL(__strncpy_from_user);
90EXPORT_SYMBOL(__strnlen_user); 89EXPORT_SYMBOL(__strnlen_user);
91#ifdef CONFIG_PPC64 90EXPORT_SYMBOL(copy_page);
92EXPORT_SYMBOL(copy_4K_page);
93#endif
94 91
95#if defined(CONFIG_PCI) && defined(CONFIG_PPC32) 92#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
96EXPORT_SYMBOL(isa_io_base); 93EXPORT_SYMBOL(isa_io_base);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f74f355a9617..095043d79946 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -702,6 +702,8 @@ void prepare_to_copy(struct task_struct *tsk)
702/* 702/*
703 * Copy a thread.. 703 * Copy a thread..
704 */ 704 */
705extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
706
705int copy_thread(unsigned long clone_flags, unsigned long usp, 707int copy_thread(unsigned long clone_flags, unsigned long usp,
706 unsigned long unused, struct task_struct *p, 708 unsigned long unused, struct task_struct *p,
707 struct pt_regs *regs) 709 struct pt_regs *regs)
@@ -755,11 +757,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
755 _ALIGN_UP(sizeof(struct thread_info), 16); 757 _ALIGN_UP(sizeof(struct thread_info), 16);
756 758
757#ifdef CONFIG_PPC_STD_MMU_64 759#ifdef CONFIG_PPC_STD_MMU_64
758 if (cpu_has_feature(CPU_FTR_SLB)) { 760 if (mmu_has_feature(MMU_FTR_SLB)) {
759 unsigned long sp_vsid; 761 unsigned long sp_vsid;
760 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 762 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
761 763
762 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) 764 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
763 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) 765 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
764 << SLB_VSID_SHIFT_1T; 766 << SLB_VSID_SHIFT_1T;
765 else 767 else
@@ -769,6 +771,20 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
769 p->thread.ksp_vsid = sp_vsid; 771 p->thread.ksp_vsid = sp_vsid;
770 } 772 }
771#endif /* CONFIG_PPC_STD_MMU_64 */ 773#endif /* CONFIG_PPC_STD_MMU_64 */
774#ifdef CONFIG_PPC64
775 if (cpu_has_feature(CPU_FTR_DSCR)) {
776 if (current->thread.dscr_inherit) {
777 p->thread.dscr_inherit = 1;
778 p->thread.dscr = current->thread.dscr;
779 } else if (0 != dscr_default) {
780 p->thread.dscr_inherit = 1;
781 p->thread.dscr = dscr_default;
782 } else {
783 p->thread.dscr_inherit = 0;
784 p->thread.dscr = 0;
785 }
786 }
787#endif
772 788
773 /* 789 /*
774 * The PPC64 ABI makes use of a TOC to contain function 790 * The PPC64 ABI makes use of a TOC to contain function
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index e74fa12afc82..48aeb55faae9 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -68,6 +68,7 @@ int __initdata iommu_force_on;
68unsigned long tce_alloc_start, tce_alloc_end; 68unsigned long tce_alloc_start, tce_alloc_end;
69u64 ppc64_rma_size; 69u64 ppc64_rma_size;
70#endif 70#endif
71static phys_addr_t first_memblock_size;
71 72
72static int __init early_parse_mem(char *p) 73static int __init early_parse_mem(char *p)
73{ 74{
@@ -123,18 +124,19 @@ static void __init move_device_tree(void)
123 */ 124 */
124static struct ibm_pa_feature { 125static struct ibm_pa_feature {
125 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 126 unsigned long cpu_features; /* CPU_FTR_xxx bit */
127 unsigned long mmu_features; /* MMU_FTR_xxx bit */
126 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 128 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
127 unsigned char pabyte; /* byte number in ibm,pa-features */ 129 unsigned char pabyte; /* byte number in ibm,pa-features */
128 unsigned char pabit; /* bit number (big-endian) */ 130 unsigned char pabit; /* bit number (big-endian) */
129 unsigned char invert; /* if 1, pa bit set => clear feature */ 131 unsigned char invert; /* if 1, pa bit set => clear feature */
130} ibm_pa_features[] __initdata = { 132} ibm_pa_features[] __initdata = {
131 {0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, 133 {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
132 {0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, 134 {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
133 {CPU_FTR_SLB, 0, 0, 2, 0}, 135 {0, MMU_FTR_SLB, 0, 0, 2, 0},
134 {CPU_FTR_CTRL, 0, 0, 3, 0}, 136 {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
135 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, 137 {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
136 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, 138 {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
137 {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 139 {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
138 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 140 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
139}; 141};
140 142
@@ -166,9 +168,11 @@ static void __init scan_features(unsigned long node, unsigned char *ftrs,
166 if (bit ^ fp->invert) { 168 if (bit ^ fp->invert) {
167 cur_cpu_spec->cpu_features |= fp->cpu_features; 169 cur_cpu_spec->cpu_features |= fp->cpu_features;
168 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 170 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
171 cur_cpu_spec->mmu_features |= fp->mmu_features;
169 } else { 172 } else {
170 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 173 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
171 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 174 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
175 cur_cpu_spec->mmu_features &= ~fp->mmu_features;
172 } 176 }
173 } 177 }
174} 178}
@@ -268,13 +272,13 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
268 const char *uname, int depth, 272 const char *uname, int depth,
269 void *data) 273 void *data)
270{ 274{
271 static int logical_cpuid = 0;
272 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 275 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
273 const u32 *prop; 276 const u32 *prop;
274 const u32 *intserv; 277 const u32 *intserv;
275 int i, nthreads; 278 int i, nthreads;
276 unsigned long len; 279 unsigned long len;
277 int found = 0; 280 int found = -1;
281 int found_thread = 0;
278 282
279 /* We are scanning "cpu" nodes only */ 283 /* We are scanning "cpu" nodes only */
280 if (type == NULL || strcmp(type, "cpu") != 0) 284 if (type == NULL || strcmp(type, "cpu") != 0)
@@ -298,11 +302,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
298 * version 2 of the kexec param format adds the phys cpuid of 302 * version 2 of the kexec param format adds the phys cpuid of
299 * booted proc. 303 * booted proc.
300 */ 304 */
301 if (initial_boot_params && initial_boot_params->version >= 2) { 305 if (initial_boot_params->version >= 2) {
302 if (intserv[i] == 306 if (intserv[i] == initial_boot_params->boot_cpuid_phys) {
303 initial_boot_params->boot_cpuid_phys) { 307 found = boot_cpu_count;
304 found = 1; 308 found_thread = i;
305 break;
306 } 309 }
307 } else { 310 } else {
308 /* 311 /*
@@ -311,23 +314,20 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
311 * off secondary threads. 314 * off secondary threads.
312 */ 315 */
313 if (of_get_flat_dt_prop(node, 316 if (of_get_flat_dt_prop(node,
314 "linux,boot-cpu", NULL) != NULL) { 317 "linux,boot-cpu", NULL) != NULL)
315 found = 1; 318 found = boot_cpu_count;
316 break;
317 }
318 } 319 }
319
320#ifdef CONFIG_SMP 320#ifdef CONFIG_SMP
321 /* logical cpu id is always 0 on UP kernels */ 321 /* logical cpu id is always 0 on UP kernels */
322 logical_cpuid++; 322 boot_cpu_count++;
323#endif 323#endif
324 } 324 }
325 325
326 if (found) { 326 if (found >= 0) {
327 DBG("boot cpu: logical %d physical %d\n", logical_cpuid, 327 DBG("boot cpu: logical %d physical %d\n", found,
328 intserv[i]); 328 intserv[found_thread]);
329 boot_cpuid = logical_cpuid; 329 boot_cpuid = found;
330 set_hard_smp_processor_id(boot_cpuid, intserv[i]); 330 set_hard_smp_processor_id(found, intserv[found_thread]);
331 331
332 /* 332 /*
333 * PAPR defines "logical" PVR values for cpus that 333 * PAPR defines "logical" PVR values for cpus that
@@ -509,11 +509,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
509 size = 0x80000000ul - base; 509 size = 0x80000000ul - base;
510 } 510 }
511#endif 511#endif
512 512 /* Keep track of the beginning of memory -and- the size of
513 /* First MEMBLOCK added, do some special initializations */ 513 * the very first block in the device-tree as it represents
514 if (memstart_addr == ~(phys_addr_t)0) 514 * the RMA on ppc64 server
515 setup_initial_memory_limit(base, size); 515 */
516 memstart_addr = min((u64)memstart_addr, base); 516 if (base < memstart_addr) {
517 memstart_addr = base;
518 first_memblock_size = size;
519 }
517 520
518 /* Add the chunk to the MEMBLOCK list */ 521 /* Add the chunk to the MEMBLOCK list */
519 memblock_add(base, size); 522 memblock_add(base, size);
@@ -698,6 +701,7 @@ void __init early_init_devtree(void *params)
698 701
699 of_scan_flat_dt(early_init_dt_scan_root, NULL); 702 of_scan_flat_dt(early_init_dt_scan_root, NULL);
700 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); 703 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
704 setup_initial_memory_limit(memstart_addr, first_memblock_size);
701 705
702 /* Save command line for /proc/cmdline and then parse parameters */ 706 /* Save command line for /proc/cmdline and then parse parameters */
703 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); 707 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 941ff4dbc567..c016033ba78d 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -335,6 +335,7 @@ static void __init prom_printf(const char *format, ...)
335 const char *p, *q, *s; 335 const char *p, *q, *s;
336 va_list args; 336 va_list args;
337 unsigned long v; 337 unsigned long v;
338 long vs;
338 struct prom_t *_prom = &RELOC(prom); 339 struct prom_t *_prom = &RELOC(prom);
339 340
340 va_start(args, format); 341 va_start(args, format);
@@ -368,12 +369,35 @@ static void __init prom_printf(const char *format, ...)
368 v = va_arg(args, unsigned long); 369 v = va_arg(args, unsigned long);
369 prom_print_hex(v); 370 prom_print_hex(v);
370 break; 371 break;
372 case 'd':
373 ++q;
374 vs = va_arg(args, int);
375 if (vs < 0) {
376 prom_print(RELOC("-"));
377 vs = -vs;
378 }
379 prom_print_dec(vs);
380 break;
371 case 'l': 381 case 'l':
372 ++q; 382 ++q;
373 if (*q == 'u') { /* '%lu' */ 383 if (*q == 0)
384 break;
385 else if (*q == 'x') {
386 ++q;
387 v = va_arg(args, unsigned long);
388 prom_print_hex(v);
389 } else if (*q == 'u') { /* '%lu' */
374 ++q; 390 ++q;
375 v = va_arg(args, unsigned long); 391 v = va_arg(args, unsigned long);
376 prom_print_dec(v); 392 prom_print_dec(v);
393 } else if (*q == 'd') { /* %ld */
394 ++q;
395 vs = va_arg(args, long);
396 if (vs < 0) {
397 prom_print(RELOC("-"));
398 vs = -vs;
399 }
400 prom_print_dec(vs);
377 } 401 }
378 break; 402 break;
379 } 403 }
@@ -676,8 +700,10 @@ static void __init early_cmdline_parse(void)
676#endif /* CONFIG_PCI_MSI */ 700#endif /* CONFIG_PCI_MSI */
677#ifdef CONFIG_PPC_SMLPAR 701#ifdef CONFIG_PPC_SMLPAR
678#define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */ 702#define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */
703#define OV5_XCMO 0x40 /* Page Coalescing */
679#else 704#else
680#define OV5_CMO 0x00 705#define OV5_CMO 0x00
706#define OV5_XCMO 0x00
681#endif 707#endif
682#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */ 708#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
683 709
@@ -732,7 +758,7 @@ static unsigned char ibm_architecture_vec[] = {
732 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | 758 OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
733 OV5_DONATE_DEDICATE_CPU | OV5_MSI, 759 OV5_DONATE_DEDICATE_CPU | OV5_MSI,
734 0, 760 0,
735 OV5_CMO, 761 OV5_CMO | OV5_XCMO,
736 OV5_TYPE1_AFFINITY, 762 OV5_TYPE1_AFFINITY,
737 0, 763 0,
738 0, 764 0,
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 2097f2b3cba8..271ff6318eda 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -42,6 +42,7 @@
42#include <asm/time.h> 42#include <asm/time.h>
43#include <asm/mmu.h> 43#include <asm/mmu.h>
44#include <asm/topology.h> 44#include <asm/topology.h>
45#include <asm/pSeries_reconfig.h>
45 46
46struct rtas_t rtas = { 47struct rtas_t rtas = {
47 .lock = __ARCH_SPIN_LOCK_UNLOCKED 48 .lock = __ARCH_SPIN_LOCK_UNLOCKED
@@ -494,7 +495,7 @@ unsigned int rtas_busy_delay(int status)
494 495
495 might_sleep(); 496 might_sleep();
496 ms = rtas_busy_delay_time(status); 497 ms = rtas_busy_delay_time(status);
497 if (ms) 498 if (ms && need_resched())
498 msleep(ms); 499 msleep(ms);
499 500
500 return ms; 501 return ms;
@@ -731,6 +732,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
731 732
732 atomic_set(&data->error, rc); 733 atomic_set(&data->error, rc);
733 start_topology_update(); 734 start_topology_update();
735 pSeries_coalesce_init();
734 736
735 if (wake_when_done) { 737 if (wake_when_done) {
736 atomic_set(&data->done, 1); 738 atomic_set(&data->done, 1);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 21f30cb68077..79fca2651b65 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
381 int i; 381 int i;
382 382
383 threads_per_core = tpc; 383 threads_per_core = tpc;
384 threads_core_mask = CPU_MASK_NONE; 384 cpumask_clear(&threads_core_mask);
385 385
386 /* This implementation only supports power of 2 number of threads 386 /* This implementation only supports power of 2 number of threads
387 * for simplicity and performance 387 * for simplicity and performance
@@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
390 BUG_ON(tpc != (1 << threads_shift)); 390 BUG_ON(tpc != (1 << threads_shift));
391 391
392 for (i = 0; i < tpc; i++) 392 for (i = 0; i < tpc; i++)
393 cpu_set(i, threads_core_mask); 393 cpumask_set_cpu(i, &threads_core_mask);
394 394
395 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", 395 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
396 tpc, tpc > 1 ? "s" : ""); 396 tpc, tpc > 1 ? "s" : "");
@@ -404,7 +404,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
404 * cpu_present_mask 404 * cpu_present_mask
405 * 405 *
406 * Having the possible map set up early allows us to restrict allocations 406 * Having the possible map set up early allows us to restrict allocations
407 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. 407 * of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
408 * 408 *
409 * We do not initialize the online map here; cpus set their own bits in 409 * We do not initialize the online map here; cpus set their own bits in
410 * cpu_online_mask as they come up. 410 * cpu_online_mask as they come up.
@@ -424,7 +424,7 @@ void __init smp_setup_cpu_maps(void)
424 424
425 DBG("smp_setup_cpu_maps()\n"); 425 DBG("smp_setup_cpu_maps()\n");
426 426
427 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { 427 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) {
428 const int *intserv; 428 const int *intserv;
429 int j, len; 429 int j, len;
430 430
@@ -443,7 +443,7 @@ void __init smp_setup_cpu_maps(void)
443 intserv = &cpu; /* assume logical == phys */ 443 intserv = &cpu; /* assume logical == phys */
444 } 444 }
445 445
446 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { 446 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
447 DBG(" thread %d -> cpu %d (hard id %d)\n", 447 DBG(" thread %d -> cpu %d (hard id %d)\n",
448 j, cpu, intserv[j]); 448 j, cpu, intserv[j]);
449 set_cpu_present(cpu, true); 449 set_cpu_present(cpu, true);
@@ -483,12 +483,12 @@ void __init smp_setup_cpu_maps(void)
483 if (cpu_has_feature(CPU_FTR_SMT)) 483 if (cpu_has_feature(CPU_FTR_SMT))
484 maxcpus *= nthreads; 484 maxcpus *= nthreads;
485 485
486 if (maxcpus > NR_CPUS) { 486 if (maxcpus > nr_cpu_ids) {
487 printk(KERN_WARNING 487 printk(KERN_WARNING
488 "Partition configured for %d cpus, " 488 "Partition configured for %d cpus, "
489 "operating system maximum is %d.\n", 489 "operating system maximum is %d.\n",
490 maxcpus, NR_CPUS); 490 maxcpus, nr_cpu_ids);
491 maxcpus = NR_CPUS; 491 maxcpus = nr_cpu_ids;
492 } else 492 } else
493 printk(KERN_INFO "Partition configured for %d cpus.\n", 493 printk(KERN_INFO "Partition configured for %d cpus.\n",
494 maxcpus); 494 maxcpus);
@@ -510,7 +510,7 @@ void __init smp_setup_cpu_maps(void)
510 cpu_init_thread_core_maps(nthreads); 510 cpu_init_thread_core_maps(nthreads);
511 511
512 /* Now that possible cpus are set, set nr_cpu_ids for later use */ 512 /* Now that possible cpus are set, set nr_cpu_ids for later use */
513 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; 513 setup_nr_cpu_ids();
514 514
515 free_unused_pacas(); 515 free_unused_pacas();
516} 516}
@@ -602,6 +602,10 @@ int check_legacy_ioport(unsigned long base_port)
602 * name instead */ 602 * name instead */
603 if (!np) 603 if (!np)
604 np = of_find_node_by_name(NULL, "8042"); 604 np = of_find_node_by_name(NULL, "8042");
605 if (np) {
606 of_i8042_kbd_irq = 1;
607 of_i8042_aux_irq = 12;
608 }
605 break; 609 break;
606 case FDC_BASE: /* FDC1 */ 610 case FDC_BASE: /* FDC1 */
607 np = of_find_node_by_type(NULL, "fdc"); 611 np = of_find_node_by_type(NULL, "fdc");
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 1d2fbc905303..620d792b52e4 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -48,6 +48,7 @@ extern void bootx_init(unsigned long r4, unsigned long phys);
48 48
49int boot_cpuid = -1; 49int boot_cpuid = -1;
50EXPORT_SYMBOL_GPL(boot_cpuid); 50EXPORT_SYMBOL_GPL(boot_cpuid);
51int __initdata boot_cpu_count;
51int boot_cpuid_phys; 52int boot_cpuid_phys;
52 53
53int smp_hw_index[NR_CPUS]; 54int smp_hw_index[NR_CPUS];
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 5a0401fcaebd..a88bf2713d41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -62,6 +62,7 @@
62#include <asm/udbg.h> 62#include <asm/udbg.h>
63#include <asm/kexec.h> 63#include <asm/kexec.h>
64#include <asm/mmu_context.h> 64#include <asm/mmu_context.h>
65#include <asm/code-patching.h>
65 66
66#include "setup.h" 67#include "setup.h"
67 68
@@ -72,6 +73,7 @@
72#endif 73#endif
73 74
74int boot_cpuid = 0; 75int boot_cpuid = 0;
76int __initdata boot_cpu_count;
75u64 ppc64_pft_size; 77u64 ppc64_pft_size;
76 78
77/* Pick defaults since we might want to patch instructions 79/* Pick defaults since we might want to patch instructions
@@ -233,6 +235,7 @@ void early_setup_secondary(void)
233void smp_release_cpus(void) 235void smp_release_cpus(void)
234{ 236{
235 unsigned long *ptr; 237 unsigned long *ptr;
238 int i;
236 239
237 DBG(" -> smp_release_cpus()\n"); 240 DBG(" -> smp_release_cpus()\n");
238 241
@@ -245,7 +248,16 @@ void smp_release_cpus(void)
245 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop 248 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
246 - PHYSICAL_START); 249 - PHYSICAL_START);
247 *ptr = __pa(generic_secondary_smp_init); 250 *ptr = __pa(generic_secondary_smp_init);
248 mb(); 251
252 /* And wait a bit for them to catch up */
253 for (i = 0; i < 100000; i++) {
254 mb();
255 HMT_low();
256 if (boot_cpu_count == 0)
257 break;
258 udelay(1);
259 }
260 DBG("boot_cpu_count = %d\n", boot_cpu_count);
249 261
250 DBG(" <- smp_release_cpus()\n"); 262 DBG(" <- smp_release_cpus()\n");
251} 263}
@@ -423,17 +435,30 @@ void __init setup_system(void)
423 DBG(" <- setup_system()\n"); 435 DBG(" <- setup_system()\n");
424} 436}
425 437
426static u64 slb0_limit(void) 438/* This returns the limit below which memory accesses to the linear
439 * mapping are guarnateed not to cause a TLB or SLB miss. This is
440 * used to allocate interrupt or emergency stacks for which our
441 * exception entry path doesn't deal with being interrupted.
442 */
443static u64 safe_stack_limit(void)
427{ 444{
428 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { 445#ifdef CONFIG_PPC_BOOK3E
446 /* Freescale BookE bolts the entire linear mapping */
447 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
448 return linear_map_top;
449 /* Other BookE, we assume the first GB is bolted */
450 return 1ul << 30;
451#else
452 /* BookS, the first segment is bolted */
453 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
429 return 1UL << SID_SHIFT_1T; 454 return 1UL << SID_SHIFT_1T;
430 }
431 return 1UL << SID_SHIFT; 455 return 1UL << SID_SHIFT;
456#endif
432} 457}
433 458
434static void __init irqstack_early_init(void) 459static void __init irqstack_early_init(void)
435{ 460{
436 u64 limit = slb0_limit(); 461 u64 limit = safe_stack_limit();
437 unsigned int i; 462 unsigned int i;
438 463
439 /* 464 /*
@@ -453,6 +478,9 @@ static void __init irqstack_early_init(void)
453#ifdef CONFIG_PPC_BOOK3E 478#ifdef CONFIG_PPC_BOOK3E
454static void __init exc_lvl_early_init(void) 479static void __init exc_lvl_early_init(void)
455{ 480{
481 extern unsigned int interrupt_base_book3e;
482 extern unsigned int exc_debug_debug_book3e;
483
456 unsigned int i; 484 unsigned int i;
457 485
458 for_each_possible_cpu(i) { 486 for_each_possible_cpu(i) {
@@ -463,6 +491,10 @@ static void __init exc_lvl_early_init(void)
463 mcheckirq_ctx[i] = (struct thread_info *) 491 mcheckirq_ctx[i] = (struct thread_info *)
464 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 492 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
465 } 493 }
494
495 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
496 patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
497 (unsigned long)&exc_debug_debug_book3e, 0);
466} 498}
467#else 499#else
468#define exc_lvl_early_init() 500#define exc_lvl_early_init()
@@ -486,7 +518,7 @@ static void __init emergency_stack_init(void)
486 * bringup, we need to get at them in real mode. This means they 518 * bringup, we need to get at them in real mode. This means they
487 * must also be within the RMO region. 519 * must also be within the RMO region.
488 */ 520 */
489 limit = min(slb0_limit(), ppc64_rma_size); 521 limit = min(safe_stack_limit(), ppc64_rma_size);
490 522
491 for_each_possible_cpu(i) { 523 for_each_possible_cpu(i) {
492 unsigned long sp; 524 unsigned long sp;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 27c4a4584f80..da989fff19cc 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -381,7 +381,7 @@ badframe:
381 regs, uc, &uc->uc_mcontext); 381 regs, uc, &uc->uc_mcontext);
382#endif 382#endif
383 if (show_unhandled_signals && printk_ratelimit()) 383 if (show_unhandled_signals && printk_ratelimit())
384 printk(regs->msr & MSR_SF ? fmt64 : fmt32, 384 printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
385 current->comm, current->pid, "rt_sigreturn", 385 current->comm, current->pid, "rt_sigreturn",
386 (long)uc, regs->nip, regs->link); 386 (long)uc, regs->nip, regs->link);
387 387
@@ -469,7 +469,7 @@ badframe:
469 regs, frame, newsp); 469 regs, frame, newsp);
470#endif 470#endif
471 if (show_unhandled_signals && printk_ratelimit()) 471 if (show_unhandled_signals && printk_ratelimit())
472 printk(regs->msr & MSR_SF ? fmt64 : fmt32, 472 printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
473 current->comm, current->pid, "setup_rt_frame", 473 current->comm, current->pid, "setup_rt_frame",
474 (long)frame, regs->nip, regs->link); 474 (long)frame, regs->nip, regs->link);
475 475
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 9f9c204bef69..4a6f2ec7e761 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -95,7 +95,7 @@ int smt_enabled_at_boot = 1;
95static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 95static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
96 96
97#ifdef CONFIG_PPC64 97#ifdef CONFIG_PPC64
98void __devinit smp_generic_kick_cpu(int nr) 98int __devinit smp_generic_kick_cpu(int nr)
99{ 99{
100 BUG_ON(nr < 0 || nr >= NR_CPUS); 100 BUG_ON(nr < 0 || nr >= NR_CPUS);
101 101
@@ -106,37 +106,10 @@ void __devinit smp_generic_kick_cpu(int nr)
106 */ 106 */
107 paca[nr].cpu_start = 1; 107 paca[nr].cpu_start = 1;
108 smp_mb(); 108 smp_mb();
109}
110#endif
111 109
112void smp_message_recv(int msg) 110 return 0;
113{
114 switch(msg) {
115 case PPC_MSG_CALL_FUNCTION:
116 generic_smp_call_function_interrupt();
117 break;
118 case PPC_MSG_RESCHEDULE:
119 scheduler_ipi();
120 break;
121 case PPC_MSG_CALL_FUNC_SINGLE:
122 generic_smp_call_function_single_interrupt();
123 break;
124 case PPC_MSG_DEBUGGER_BREAK:
125 if (crash_ipi_function_ptr) {
126 crash_ipi_function_ptr(get_irq_regs());
127 break;
128 }
129#ifdef CONFIG_DEBUGGER
130 debugger_ipi(get_irq_regs());
131 break;
132#endif /* CONFIG_DEBUGGER */
133 /* FALLTHROUGH */
134 default:
135 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
136 smp_processor_id(), msg);
137 break;
138 }
139} 111}
112#endif
140 113
141static irqreturn_t call_function_action(int irq, void *data) 114static irqreturn_t call_function_action(int irq, void *data)
142{ 115{
@@ -156,9 +129,17 @@ static irqreturn_t call_function_single_action(int irq, void *data)
156 return IRQ_HANDLED; 129 return IRQ_HANDLED;
157} 130}
158 131
159static irqreturn_t debug_ipi_action(int irq, void *data) 132irqreturn_t debug_ipi_action(int irq, void *data)
160{ 133{
161 smp_message_recv(PPC_MSG_DEBUGGER_BREAK); 134 if (crash_ipi_function_ptr) {
135 crash_ipi_function_ptr(get_irq_regs());
136 return IRQ_HANDLED;
137 }
138
139#ifdef CONFIG_DEBUGGER
140 debugger_ipi(get_irq_regs());
141#endif /* CONFIG_DEBUGGER */
142
162 return IRQ_HANDLED; 143 return IRQ_HANDLED;
163} 144}
164 145
@@ -197,6 +178,66 @@ int smp_request_message_ipi(int virq, int msg)
197 return err; 178 return err;
198} 179}
199 180
181#ifdef CONFIG_PPC_SMP_MUXED_IPI
182struct cpu_messages {
183 int messages; /* current messages */
184 unsigned long data; /* data for cause ipi */
185};
186static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
187
188void smp_muxed_ipi_set_data(int cpu, unsigned long data)
189{
190 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
191
192 info->data = data;
193}
194
195void smp_muxed_ipi_message_pass(int cpu, int msg)
196{
197 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
198 char *message = (char *)&info->messages;
199
200 message[msg] = 1;
201 mb();
202 smp_ops->cause_ipi(cpu, info->data);
203}
204
205void smp_muxed_ipi_resend(void)
206{
207 struct cpu_messages *info = &__get_cpu_var(ipi_message);
208
209 if (info->messages)
210 smp_ops->cause_ipi(smp_processor_id(), info->data);
211}
212
213irqreturn_t smp_ipi_demux(void)
214{
215 struct cpu_messages *info = &__get_cpu_var(ipi_message);
216 unsigned int all;
217
218 mb(); /* order any irq clear */
219
220 do {
221 all = xchg_local(&info->messages, 0);
222
223#ifdef __BIG_ENDIAN
224 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
225 generic_smp_call_function_interrupt();
226 if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
227 scheduler_ipi();
228 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
229 generic_smp_call_function_single_interrupt();
230 if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
231 debug_ipi_action(0, NULL);
232#else
233#error Unsupported ENDIAN
234#endif
235 } while (info->messages);
236
237 return IRQ_HANDLED;
238}
239#endif /* CONFIG_PPC_SMP_MUXED_IPI */
240
200void smp_send_reschedule(int cpu) 241void smp_send_reschedule(int cpu)
201{ 242{
202 if (likely(smp_ops)) 243 if (likely(smp_ops))
@@ -216,11 +257,18 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
216 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); 257 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
217} 258}
218 259
219#ifdef CONFIG_DEBUGGER 260#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
220void smp_send_debugger_break(int cpu) 261void smp_send_debugger_break(void)
221{ 262{
222 if (likely(smp_ops)) 263 int cpu;
223 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 264 int me = raw_smp_processor_id();
265
266 if (unlikely(!smp_ops))
267 return;
268
269 for_each_online_cpu(cpu)
270 if (cpu != me)
271 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
224} 272}
225#endif 273#endif
226 274
@@ -228,9 +276,9 @@ void smp_send_debugger_break(int cpu)
228void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) 276void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
229{ 277{
230 crash_ipi_function_ptr = crash_ipi_callback; 278 crash_ipi_function_ptr = crash_ipi_callback;
231 if (crash_ipi_callback && smp_ops) { 279 if (crash_ipi_callback) {
232 mb(); 280 mb();
233 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); 281 smp_send_debugger_break();
234 } 282 }
235} 283}
236#endif 284#endif
@@ -410,8 +458,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
410{ 458{
411 int rc, c; 459 int rc, c;
412 460
413 secondary_ti = current_set[cpu];
414
415 if (smp_ops == NULL || 461 if (smp_ops == NULL ||
416 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 462 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
417 return -EINVAL; 463 return -EINVAL;
@@ -421,6 +467,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
421 if (rc) 467 if (rc)
422 return rc; 468 return rc;
423 469
470 secondary_ti = current_set[cpu];
471
424 /* Make sure callin-map entry is 0 (can be leftover a CPU 472 /* Make sure callin-map entry is 0 (can be leftover a CPU
425 * hotplug 473 * hotplug
426 */ 474 */
@@ -434,7 +482,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
434 482
435 /* wake up cpus */ 483 /* wake up cpus */
436 DBG("smp: kicking cpu %d\n", cpu); 484 DBG("smp: kicking cpu %d\n", cpu);
437 smp_ops->kick_cpu(cpu); 485 rc = smp_ops->kick_cpu(cpu);
486 if (rc) {
487 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
488 return rc;
489 }
438 490
439 /* 491 /*
440 * wait to see if the cpu made a callin (is actually up). 492 * wait to see if the cpu made a callin (is actually up).
@@ -507,7 +559,7 @@ int cpu_first_thread_of_core(int core)
507} 559}
508EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 560EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
509 561
510/* Must be called when no change can occur to cpu_present_map, 562/* Must be called when no change can occur to cpu_present_mask,
511 * i.e. during cpu online or offline. 563 * i.e. during cpu online or offline.
512 */ 564 */
513static struct device_node *cpu_to_l2cache(int cpu) 565static struct device_node *cpu_to_l2cache(int cpu)
@@ -608,7 +660,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
608 * se we pin us down to CPU 0 for a short while 660 * se we pin us down to CPU 0 for a short while
609 */ 661 */
610 alloc_cpumask_var(&old_mask, GFP_NOWAIT); 662 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
611 cpumask_copy(old_mask, &current->cpus_allowed); 663 cpumask_copy(old_mask, tsk_cpus_allowed(current));
612 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); 664 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
613 665
614 if (smp_ops && smp_ops->setup_cpu) 666 if (smp_ops && smp_ops->setup_cpu)
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index c0d8c2006bf4..f0f2199e64e1 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -182,6 +182,41 @@ static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
182static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); 182static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
183static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr); 183static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
184static SYSDEV_ATTR(purr, 0600, show_purr, store_purr); 184static SYSDEV_ATTR(purr, 0600, show_purr, store_purr);
185
186unsigned long dscr_default = 0;
187EXPORT_SYMBOL(dscr_default);
188
189static ssize_t show_dscr_default(struct sysdev_class *class,
190 struct sysdev_class_attribute *attr, char *buf)
191{
192 return sprintf(buf, "%lx\n", dscr_default);
193}
194
195static ssize_t __used store_dscr_default(struct sysdev_class *class,
196 struct sysdev_class_attribute *attr, const char *buf,
197 size_t count)
198{
199 unsigned long val;
200 int ret = 0;
201
202 ret = sscanf(buf, "%lx", &val);
203 if (ret != 1)
204 return -EINVAL;
205 dscr_default = val;
206
207 return count;
208}
209
210static SYSDEV_CLASS_ATTR(dscr_default, 0600,
211 show_dscr_default, store_dscr_default);
212
213static void sysfs_create_dscr_default(void)
214{
215 int err = 0;
216 if (cpu_has_feature(CPU_FTR_DSCR))
217 err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
218 &attr_dscr_default.attr);
219}
185#endif /* CONFIG_PPC64 */ 220#endif /* CONFIG_PPC64 */
186 221
187#ifdef HAS_PPC_PMC_PA6T 222#ifdef HAS_PPC_PMC_PA6T
@@ -617,6 +652,9 @@ static int __init topology_init(void)
617 if (cpu_online(cpu)) 652 if (cpu_online(cpu))
618 register_cpu_online(cpu); 653 register_cpu_online(cpu);
619 } 654 }
655#ifdef CONFIG_PPC64
656 sysfs_create_dscr_default();
657#endif /* CONFIG_PPC64 */
620 658
621 return 0; 659 return 0;
622} 660}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index d782cd71c07c..b13306b0d925 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -198,7 +198,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
198 } else if (show_unhandled_signals && 198 } else if (show_unhandled_signals &&
199 unhandled_signal(current, signr) && 199 unhandled_signal(current, signr) &&
200 printk_ratelimit()) { 200 printk_ratelimit()) {
201 printk(regs->msr & MSR_SF ? fmt64 : fmt32, 201 printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
202 current->comm, current->pid, signr, 202 current->comm, current->pid, signr,
203 addr, regs->nip, regs->link, code); 203 addr, regs->nip, regs->link, code);
204 } 204 }
@@ -220,7 +220,7 @@ void system_reset_exception(struct pt_regs *regs)
220 } 220 }
221 221
222#ifdef CONFIG_KEXEC 222#ifdef CONFIG_KEXEC
223 cpu_set(smp_processor_id(), cpus_in_sr); 223 cpumask_set_cpu(smp_processor_id(), &cpus_in_sr);
224#endif 224#endif
225 225
226 die("System Reset", regs, SIGABRT); 226 die("System Reset", regs, SIGABRT);
@@ -908,6 +908,26 @@ static int emulate_instruction(struct pt_regs *regs)
908 return emulate_isel(regs, instword); 908 return emulate_isel(regs, instword);
909 } 909 }
910 910
911#ifdef CONFIG_PPC64
912 /* Emulate the mfspr rD, DSCR. */
913 if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
914 cpu_has_feature(CPU_FTR_DSCR)) {
915 PPC_WARN_EMULATED(mfdscr, regs);
916 rd = (instword >> 21) & 0x1f;
917 regs->gpr[rd] = mfspr(SPRN_DSCR);
918 return 0;
919 }
920 /* Emulate the mtspr DSCR, rD. */
921 if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
922 cpu_has_feature(CPU_FTR_DSCR)) {
923 PPC_WARN_EMULATED(mtdscr, regs);
924 rd = (instword >> 21) & 0x1f;
925 mtspr(SPRN_DSCR, regs->gpr[rd]);
926 current->thread.dscr_inherit = 1;
927 return 0;
928 }
929#endif
930
911 return -EINVAL; 931 return -EINVAL;
912} 932}
913 933
@@ -1505,6 +1525,10 @@ struct ppc_emulated ppc_emulated = {
1505#ifdef CONFIG_VSX 1525#ifdef CONFIG_VSX
1506 WARN_EMULATED_SETUP(vsx), 1526 WARN_EMULATED_SETUP(vsx),
1507#endif 1527#endif
1528#ifdef CONFIG_PPC64
1529 WARN_EMULATED_SETUP(mfdscr),
1530 WARN_EMULATED_SETUP(mtdscr),
1531#endif
1508}; 1532};
1509 1533
1510u32 ppc_warn_emulated; 1534u32 ppc_warn_emulated;
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index e39cad83c884..23d65abbedce 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -62,6 +62,8 @@ void __init udbg_early_init(void)
62 udbg_init_cpm(); 62 udbg_init_cpm();
63#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) 63#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO)
64 udbg_init_usbgecko(); 64 udbg_init_usbgecko();
65#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
66 udbg_init_wsp();
65#endif 67#endif
66 68
67#ifdef CONFIG_PPC_EARLY_DEBUG 69#ifdef CONFIG_PPC_EARLY_DEBUG
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index baa33a7517bc..6837f839ab78 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -11,6 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/udbg.h> 12#include <asm/udbg.h>
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/reg_a2.h>
14 15
15extern u8 real_readb(volatile u8 __iomem *addr); 16extern u8 real_readb(volatile u8 __iomem *addr);
16extern void real_writeb(u8 data, volatile u8 __iomem *addr); 17extern void real_writeb(u8 data, volatile u8 __iomem *addr);
@@ -298,3 +299,53 @@ void __init udbg_init_40x_realmode(void)
298 udbg_getc_poll = NULL; 299 udbg_getc_poll = NULL;
299} 300}
300#endif /* CONFIG_PPC_EARLY_DEBUG_40x */ 301#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
302
303#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
304static void udbg_wsp_flush(void)
305{
306 if (udbg_comport) {
307 while ((readb(&udbg_comport->lsr) & LSR_THRE) == 0)
308 /* wait for idle */;
309 }
310}
311
312static void udbg_wsp_putc(char c)
313{
314 if (udbg_comport) {
315 if (c == '\n')
316 udbg_wsp_putc('\r');
317 udbg_wsp_flush();
318 writeb(c, &udbg_comport->thr); eieio();
319 }
320}
321
322static int udbg_wsp_getc(void)
323{
324 if (udbg_comport) {
325 while ((readb(&udbg_comport->lsr) & LSR_DR) == 0)
326 ; /* wait for char */
327 return readb(&udbg_comport->rbr);
328 }
329 return -1;
330}
331
332static int udbg_wsp_getc_poll(void)
333{
334 if (udbg_comport)
335 if (readb(&udbg_comport->lsr) & LSR_DR)
336 return readb(&udbg_comport->rbr);
337 return -1;
338}
339
340void __init udbg_init_wsp(void)
341{
342 udbg_comport = (struct NS16550 __iomem *)WSP_UART_VIRT;
343
344 udbg_init_uart(udbg_comport, 57600, 50000000);
345
346 udbg_putc = udbg_wsp_putc;
347 udbg_flush = udbg_wsp_flush;
348 udbg_getc = udbg_wsp_getc;
349 udbg_getc_poll = udbg_wsp_getc_poll;
350}
351#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 9de6f396cf85..4d5a3edff49e 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -102,7 +102,7 @@ _GLOBAL(giveup_altivec)
102 MTMSRD(r5) /* enable use of VMX now */ 102 MTMSRD(r5) /* enable use of VMX now */
103 isync 103 isync
104 PPC_LCMPI 0,r3,0 104 PPC_LCMPI 0,r3,0
105 beqlr- /* if no previous owner, done */ 105 beqlr /* if no previous owner, done */
106 addi r3,r3,THREAD /* want THREAD of task */ 106 addi r3,r3,THREAD /* want THREAD of task */
107 PPC_LL r5,PT_REGS(r3) 107 PPC_LL r5,PT_REGS(r3)
108 PPC_LCMPI 0,r5,0 108 PPC_LCMPI 0,r5,0
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index c961de40c676..0f95b5cce033 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -236,7 +236,7 @@ void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
236 236
237int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 237int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
238{ 238{
239 return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); 239 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
240} 240}
241 241
242void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) 242void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 2b9c9088d00e..1a1b34487e71 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -35,9 +35,7 @@
35 35
36#if defined(CONFIG_PPC_BOOK3S_64) 36#if defined(CONFIG_PPC_BOOK3S_64)
37 37
38#define LOAD_SHADOW_VCPU(reg) \ 38#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg)
39 mfspr reg, SPRN_SPRG_PACA
40
41#define SHADOW_VCPU_OFF PACA_KVM_SVCPU 39#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
42#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) 40#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
43#define FUNC(name) GLUE(.,name) 41#define FUNC(name) GLUE(.,name)
@@ -72,7 +70,7 @@
72.global kvmppc_trampoline_\intno 70.global kvmppc_trampoline_\intno
73kvmppc_trampoline_\intno: 71kvmppc_trampoline_\intno:
74 72
75 mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */ 73 SET_SCRATCH0(r13) /* Save r13 */
76 74
77 /* 75 /*
78 * First thing to do is to find out if we're coming 76 * First thing to do is to find out if we're coming
@@ -91,7 +89,7 @@ kvmppc_trampoline_\intno:
91 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 89 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
92 mtcr r12 90 mtcr r12
93 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 91 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
94 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ 92 GET_SCRATCH0(r13) /* r13 = original r13 */
95 b kvmppc_resume_\intno /* Get back original handler */ 93 b kvmppc_resume_\intno /* Get back original handler */
96 94
97 /* Now we know we're handling a KVM guest */ 95 /* Now we know we're handling a KVM guest */
@@ -114,6 +112,9 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
114INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE 112INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
115INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE 113INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
116INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL 114INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
115#ifdef CONFIG_PPC_BOOK3S_64
116INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV
117#endif
117INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT 118INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
118INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM 119INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
119INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL 120INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
@@ -158,7 +159,7 @@ kvmppc_handler_skip_ins:
158 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 159 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
159 mtcr r12 160 mtcr r12
160 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 161 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
161 mfspr r13, SPRN_SPRG_SCRATCH0 162 GET_SCRATCH0(r13)
162 163
163 /* And get back into the code */ 164 /* And get back into the code */
164 RFI 165 RFI
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 7c52ed0b7051..451264274b8c 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -155,14 +155,20 @@ kvmppc_handler_trampoline_exit:
155 PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13) 155 PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13)
156 156
157 /* Save guest PC and MSR */ 157 /* Save guest PC and MSR */
158 mfsrr0 r3 158 andi. r0,r12,0x2
159 beq 1f
160 mfspr r3,SPRN_HSRR0
161 mfspr r4,SPRN_HSRR1
162 andi. r12,r12,0x3ffd
163 b 2f
1641: mfsrr0 r3
159 mfsrr1 r4 165 mfsrr1 r4
160 1662:
161 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13) 167 PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13)
162 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13) 168 PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13)
163 169
164 /* Get scratch'ed off registers */ 170 /* Get scratch'ed off registers */
165 mfspr r9, SPRN_SPRG_SCRATCH0 171 GET_SCRATCH0(r9)
166 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) 172 PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
167 lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) 173 lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
168 174
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index f53e09c7dac7..13b676c20d12 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -6,14 +6,6 @@
6 6
7#include <asm/system.h> 7#include <asm/system.h>
8 8
9void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
10{
11 if (mem_init_done)
12 return kmalloc(size, mask);
13 else
14 return alloc_bootmem(size);
15}
16
17void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) 9void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
18{ 10{
19 void *p; 11 void *p;
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index 4d4eeb900486..53dcb6b1b708 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -6,6 +6,7 @@
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9#include <asm/page.h>
9#include <asm/processor.h> 10#include <asm/processor.h>
10#include <asm/ppc_asm.h> 11#include <asm/ppc_asm.h>
11#include <asm/asm-offsets.h> 12#include <asm/asm-offsets.h>
@@ -15,9 +16,9 @@ PPC64_CACHES:
15 .tc ppc64_caches[TC],ppc64_caches 16 .tc ppc64_caches[TC],ppc64_caches
16 .section ".text" 17 .section ".text"
17 18
18 19_GLOBAL(copy_page)
19_GLOBAL(copy_4K_page) 20 lis r5,PAGE_SIZE@h
20 li r5,4096 /* 4K page size */ 21 ori r5,r5,PAGE_SIZE@l
21BEGIN_FTR_SECTION 22BEGIN_FTR_SECTION
22 ld r10,PPC64_CACHES@toc(r2) 23 ld r10,PPC64_CACHES@toc(r2)
23 lwz r11,DCACHEL1LOGLINESIZE(r10) /* log2 of cache line size */ 24 lwz r11,DCACHEL1LOGLINESIZE(r10) /* log2 of cache line size */
diff --git a/arch/powerpc/lib/devres.c b/arch/powerpc/lib/devres.c
index deac4d30daf4..e91615abae66 100644
--- a/arch/powerpc/lib/devres.c
+++ b/arch/powerpc/lib/devres.c
@@ -9,11 +9,11 @@
9 9
10#include <linux/device.h> /* devres_*(), devm_ioremap_release() */ 10#include <linux/device.h> /* devres_*(), devm_ioremap_release() */
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12#include <linux/io.h> /* ioremap_flags() */ 12#include <linux/io.h> /* ioremap_prot() */
13#include <linux/module.h> /* EXPORT_SYMBOL() */ 13#include <linux/module.h> /* EXPORT_SYMBOL() */
14 14
15/** 15/**
16 * devm_ioremap_prot - Managed ioremap_flags() 16 * devm_ioremap_prot - Managed ioremap_prot()
17 * @dev: Generic device to remap IO address for 17 * @dev: Generic device to remap IO address for
18 * @offset: BUS offset to map 18 * @offset: BUS offset to map
19 * @size: Size of map 19 * @size: Size of map
@@ -31,7 +31,7 @@ void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset,
31 if (!ptr) 31 if (!ptr)
32 return NULL; 32 return NULL;
33 33
34 addr = ioremap_flags(offset, size, flags); 34 addr = ioremap_prot(offset, size, flags);
35 if (addr) { 35 if (addr) {
36 *ptr = addr; 36 *ptr = addr;
37 devres_add(dev, ptr); 37 devres_add(dev, ptr);
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index f73daa6f3970..9a52349874ee 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -46,6 +46,18 @@ extern int do_stxvd2x(int rn, unsigned long ea);
46#endif 46#endif
47 47
48/* 48/*
49 * Emulate the truncation of 64 bit values in 32-bit mode.
50 */
51static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
52{
53#ifdef __powerpc64__
54 if ((msr & MSR_64BIT) == 0)
55 val &= 0xffffffffUL;
56#endif
57 return val;
58}
59
60/*
49 * Determine whether a conditional branch instruction would branch. 61 * Determine whether a conditional branch instruction would branch.
50 */ 62 */
51static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs) 63static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
@@ -91,11 +103,8 @@ static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs
91 if (instr & 0x04000000) /* update forms */ 103 if (instr & 0x04000000) /* update forms */
92 regs->gpr[ra] = ea; 104 regs->gpr[ra] = ea;
93 } 105 }
94#ifdef __powerpc64__ 106
95 if (!(regs->msr & MSR_SF)) 107 return truncate_if_32bit(regs->msr, ea);
96 ea &= 0xffffffffUL;
97#endif
98 return ea;
99} 108}
100 109
101#ifdef __powerpc64__ 110#ifdef __powerpc64__
@@ -114,9 +123,8 @@ static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *reg
114 if ((instr & 3) == 1) /* update forms */ 123 if ((instr & 3) == 1) /* update forms */
115 regs->gpr[ra] = ea; 124 regs->gpr[ra] = ea;
116 } 125 }
117 if (!(regs->msr & MSR_SF)) 126
118 ea &= 0xffffffffUL; 127 return truncate_if_32bit(regs->msr, ea);
119 return ea;
120} 128}
121#endif /* __powerpc64 */ 129#endif /* __powerpc64 */
122 130
@@ -137,11 +145,8 @@ static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs
137 if (do_update) /* update forms */ 145 if (do_update) /* update forms */
138 regs->gpr[ra] = ea; 146 regs->gpr[ra] = ea;
139 } 147 }
140#ifdef __powerpc64__ 148
141 if (!(regs->msr & MSR_SF)) 149 return truncate_if_32bit(regs->msr, ea);
142 ea &= 0xffffffffUL;
143#endif
144 return ea;
145} 150}
146 151
147/* 152/*
@@ -467,7 +472,7 @@ static void __kprobes set_cr0(struct pt_regs *regs, int rd)
467 472
468 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); 473 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
469#ifdef __powerpc64__ 474#ifdef __powerpc64__
470 if (!(regs->msr & MSR_SF)) 475 if (!(regs->msr & MSR_64BIT))
471 val = (int) val; 476 val = (int) val;
472#endif 477#endif
473 if (val < 0) 478 if (val < 0)
@@ -488,7 +493,7 @@ static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
488 ++val; 493 ++val;
489 regs->gpr[rd] = val; 494 regs->gpr[rd] = val;
490#ifdef __powerpc64__ 495#ifdef __powerpc64__
491 if (!(regs->msr & MSR_SF)) { 496 if (!(regs->msr & MSR_64BIT)) {
492 val = (unsigned int) val; 497 val = (unsigned int) val;
493 val1 = (unsigned int) val1; 498 val1 = (unsigned int) val1;
494 } 499 }
@@ -571,8 +576,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
571 if ((instr & 2) == 0) 576 if ((instr & 2) == 0)
572 imm += regs->nip; 577 imm += regs->nip;
573 regs->nip += 4; 578 regs->nip += 4;
574 if ((regs->msr & MSR_SF) == 0) 579 regs->nip = truncate_if_32bit(regs->msr, regs->nip);
575 regs->nip &= 0xffffffffUL;
576 if (instr & 1) 580 if (instr & 1)
577 regs->link = regs->nip; 581 regs->link = regs->nip;
578 if (branch_taken(instr, regs)) 582 if (branch_taken(instr, regs))
@@ -605,13 +609,9 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
605 imm -= 0x04000000; 609 imm -= 0x04000000;
606 if ((instr & 2) == 0) 610 if ((instr & 2) == 0)
607 imm += regs->nip; 611 imm += regs->nip;
608 if (instr & 1) { 612 if (instr & 1)
609 regs->link = regs->nip + 4; 613 regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
610 if ((regs->msr & MSR_SF) == 0) 614 imm = truncate_if_32bit(regs->msr, imm);
611 regs->link &= 0xffffffffUL;
612 }
613 if ((regs->msr & MSR_SF) == 0)
614 imm &= 0xffffffffUL;
615 regs->nip = imm; 615 regs->nip = imm;
616 return 1; 616 return 1;
617 case 19: 617 case 19:
@@ -619,11 +619,8 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
619 case 16: /* bclr */ 619 case 16: /* bclr */
620 case 528: /* bcctr */ 620 case 528: /* bcctr */
621 imm = (instr & 0x400)? regs->ctr: regs->link; 621 imm = (instr & 0x400)? regs->ctr: regs->link;
622 regs->nip += 4; 622 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
623 if ((regs->msr & MSR_SF) == 0) { 623 imm = truncate_if_32bit(regs->msr, imm);
624 regs->nip &= 0xffffffffUL;
625 imm &= 0xffffffffUL;
626 }
627 if (instr & 1) 624 if (instr & 1)
628 regs->link = regs->nip; 625 regs->link = regs->nip;
629 if (branch_taken(instr, regs)) 626 if (branch_taken(instr, regs))
@@ -1617,11 +1614,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1617 return 0; /* invoke DSI if -EFAULT? */ 1614 return 0; /* invoke DSI if -EFAULT? */
1618 } 1615 }
1619 instr_done: 1616 instr_done:
1620 regs->nip += 4; 1617 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
1621#ifdef __powerpc64__
1622 if ((regs->msr & MSR_SF) == 0)
1623 regs->nip &= 0xffffffffUL;
1624#endif
1625 return 1; 1618 return 1;
1626 1619
1627 logical_done: 1620 logical_done:
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 5b7dd4ea02b5..a242b5d7cbe4 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -118,7 +118,7 @@ _GLOBAL(__hash_page_4K)
118BEGIN_FTR_SECTION 118BEGIN_FTR_SECTION
119 cmpdi r9,0 /* check segment size */ 119 cmpdi r9,0 /* check segment size */
120 bne 3f 120 bne 3f
121END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 121END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
122 /* Calc va and put it in r29 */ 122 /* Calc va and put it in r29 */
123 rldicr r29,r5,28,63-28 123 rldicr r29,r5,28,63-28
124 rldicl r3,r3,0,36 124 rldicl r3,r3,0,36
@@ -401,7 +401,7 @@ _GLOBAL(__hash_page_4K)
401BEGIN_FTR_SECTION 401BEGIN_FTR_SECTION
402 cmpdi r9,0 /* check segment size */ 402 cmpdi r9,0 /* check segment size */
403 bne 3f 403 bne 3f
404END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 404END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
405 /* Calc va and put it in r29 */ 405 /* Calc va and put it in r29 */
406 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ 406 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
407 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ 407 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
@@ -715,7 +715,7 @@ BEGIN_FTR_SECTION
715 andi. r0,r31,_PAGE_NO_CACHE 715 andi. r0,r31,_PAGE_NO_CACHE
716 /* If so, bail out and refault as a 4k page */ 716 /* If so, bail out and refault as a 4k page */
717 bne- ht64_bail_ok 717 bne- ht64_bail_ok
718END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) 718END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE)
719 /* Prepare new PTE value (turn access RW into DIRTY, then 719 /* Prepare new PTE value (turn access RW into DIRTY, then
720 * add BUSY and ACCESSED) 720 * add BUSY and ACCESSED)
721 */ 721 */
@@ -736,7 +736,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
736BEGIN_FTR_SECTION 736BEGIN_FTR_SECTION
737 cmpdi r9,0 /* check segment size */ 737 cmpdi r9,0 /* check segment size */
738 bne 3f 738 bne 3f
739END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 739END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
740 /* Calc va and put it in r29 */ 740 /* Calc va and put it in r29 */
741 rldicr r29,r5,28,63-28 741 rldicr r29,r5,28,63-28
742 rldicl r3,r3,0,36 742 rldicl r3,r3,0,36
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 784a400e0781..dfd764896db0 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -50,9 +50,8 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
50 case MMU_PAGE_4K: 50 case MMU_PAGE_4K:
51 va &= ~0xffful; 51 va &= ~0xffful;
52 va |= ssize << 8; 52 va |= ssize << 8;
53 asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), 53 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
54 %2) 54 : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
55 : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206)
56 : "memory"); 55 : "memory");
57 break; 56 break;
58 default: 57 default:
@@ -61,9 +60,8 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
61 va |= penc << 12; 60 va |= penc << 12;
62 va |= ssize << 8; 61 va |= ssize << 8;
63 va |= 1; /* L */ 62 va |= 1; /* L */
64 asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), 63 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
65 %2) 64 : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
66 : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206)
67 : "memory"); 65 : "memory");
68 break; 66 break;
69 } 67 }
@@ -98,8 +96,8 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
98 96
99static inline void tlbie(unsigned long va, int psize, int ssize, int local) 97static inline void tlbie(unsigned long va, int psize, int ssize, int local)
100{ 98{
101 unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL); 99 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
102 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); 100 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
103 101
104 if (use_local) 102 if (use_local)
105 use_local = mmu_psize_defs[psize].tlbiel; 103 use_local = mmu_psize_defs[psize].tlbiel;
@@ -503,7 +501,7 @@ static void native_flush_hash_range(unsigned long number, int local)
503 } pte_iterate_hashed_end(); 501 } pte_iterate_hashed_end();
504 } 502 }
505 503
506 if (cpu_has_feature(CPU_FTR_TLBIEL) && 504 if (mmu_has_feature(MMU_FTR_TLBIEL) &&
507 mmu_psize_defs[psize].tlbiel && local) { 505 mmu_psize_defs[psize].tlbiel && local) {
508 asm volatile("ptesync":::"memory"); 506 asm volatile("ptesync":::"memory");
509 for (i = 0; i < number; i++) { 507 for (i = 0; i < number; i++) {
@@ -517,7 +515,7 @@ static void native_flush_hash_range(unsigned long number, int local)
517 } 515 }
518 asm volatile("ptesync":::"memory"); 516 asm volatile("ptesync":::"memory");
519 } else { 517 } else {
520 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); 518 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
521 519
522 if (lock_tlbie) 520 if (lock_tlbie)
523 raw_spin_lock(&native_tlbie_lock); 521 raw_spin_lock(&native_tlbie_lock);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 58a022d0f463..26b2872b3d00 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -53,6 +53,7 @@
53#include <asm/sections.h> 53#include <asm/sections.h>
54#include <asm/spu.h> 54#include <asm/spu.h>
55#include <asm/udbg.h> 55#include <asm/udbg.h>
56#include <asm/code-patching.h>
56 57
57#ifdef DEBUG 58#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt) 59#define DBG(fmt...) udbg_printf(fmt)
@@ -258,11 +259,11 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
258 for (; size >= 4; size -= 4, ++prop) { 259 for (; size >= 4; size -= 4, ++prop) {
259 if (prop[0] == 40) { 260 if (prop[0] == 40) {
260 DBG("1T segment support detected\n"); 261 DBG("1T segment support detected\n");
261 cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT; 262 cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
262 return 1; 263 return 1;
263 } 264 }
264 } 265 }
265 cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B; 266 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
266 return 0; 267 return 0;
267} 268}
268 269
@@ -288,7 +289,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
288 if (prop != NULL) { 289 if (prop != NULL) {
289 DBG("Page sizes from device-tree:\n"); 290 DBG("Page sizes from device-tree:\n");
290 size /= 4; 291 size /= 4;
291 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE); 292 cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
292 while(size > 0) { 293 while(size > 0) {
293 unsigned int shift = prop[0]; 294 unsigned int shift = prop[0];
294 unsigned int slbenc = prop[1]; 295 unsigned int slbenc = prop[1];
@@ -316,7 +317,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
316 break; 317 break;
317 case 0x18: 318 case 0x18:
318 idx = MMU_PAGE_16M; 319 idx = MMU_PAGE_16M;
319 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE; 320 cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
320 break; 321 break;
321 case 0x22: 322 case 0x22:
322 idx = MMU_PAGE_16G; 323 idx = MMU_PAGE_16G;
@@ -411,7 +412,7 @@ static void __init htab_init_page_sizes(void)
411 * Not in the device-tree, let's fallback on known size 412 * Not in the device-tree, let's fallback on known size
412 * list for 16M capable GP & GR 413 * list for 16M capable GP & GR
413 */ 414 */
414 if (cpu_has_feature(CPU_FTR_16M_PAGE)) 415 if (mmu_has_feature(MMU_FTR_16M_PAGE))
415 memcpy(mmu_psize_defs, mmu_psize_defaults_gp, 416 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
416 sizeof(mmu_psize_defaults_gp)); 417 sizeof(mmu_psize_defaults_gp));
417 found: 418 found:
@@ -441,7 +442,7 @@ static void __init htab_init_page_sizes(void)
441 mmu_vmalloc_psize = MMU_PAGE_64K; 442 mmu_vmalloc_psize = MMU_PAGE_64K;
442 if (mmu_linear_psize == MMU_PAGE_4K) 443 if (mmu_linear_psize == MMU_PAGE_4K)
443 mmu_linear_psize = MMU_PAGE_64K; 444 mmu_linear_psize = MMU_PAGE_64K;
444 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) { 445 if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
445 /* 446 /*
446 * Don't use 64k pages for ioremap on pSeries, since 447 * Don't use 64k pages for ioremap on pSeries, since
447 * that would stop us accessing the HEA ethernet. 448 * that would stop us accessing the HEA ethernet.
@@ -547,15 +548,7 @@ int remove_section_mapping(unsigned long start, unsigned long end)
547} 548}
548#endif /* CONFIG_MEMORY_HOTPLUG */ 549#endif /* CONFIG_MEMORY_HOTPLUG */
549 550
550static inline void make_bl(unsigned int *insn_addr, void *func) 551#define FUNCTION_TEXT(A) ((*(unsigned long *)(A)))
551{
552 unsigned long funcp = *((unsigned long *)func);
553 int offset = funcp - (unsigned long)insn_addr;
554
555 *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
556 flush_icache_range((unsigned long)insn_addr, 4+
557 (unsigned long)insn_addr);
558}
559 552
560static void __init htab_finish_init(void) 553static void __init htab_finish_init(void)
561{ 554{
@@ -570,16 +563,33 @@ static void __init htab_finish_init(void)
570 extern unsigned int *ht64_call_hpte_remove; 563 extern unsigned int *ht64_call_hpte_remove;
571 extern unsigned int *ht64_call_hpte_updatepp; 564 extern unsigned int *ht64_call_hpte_updatepp;
572 565
573 make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert); 566 patch_branch(ht64_call_hpte_insert1,
574 make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert); 567 FUNCTION_TEXT(ppc_md.hpte_insert),
575 make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove); 568 BRANCH_SET_LINK);
576 make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp); 569 patch_branch(ht64_call_hpte_insert2,
570 FUNCTION_TEXT(ppc_md.hpte_insert),
571 BRANCH_SET_LINK);
572 patch_branch(ht64_call_hpte_remove,
573 FUNCTION_TEXT(ppc_md.hpte_remove),
574 BRANCH_SET_LINK);
575 patch_branch(ht64_call_hpte_updatepp,
576 FUNCTION_TEXT(ppc_md.hpte_updatepp),
577 BRANCH_SET_LINK);
578
577#endif /* CONFIG_PPC_HAS_HASH_64K */ 579#endif /* CONFIG_PPC_HAS_HASH_64K */
578 580
579 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); 581 patch_branch(htab_call_hpte_insert1,
580 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); 582 FUNCTION_TEXT(ppc_md.hpte_insert),
581 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); 583 BRANCH_SET_LINK);
582 make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); 584 patch_branch(htab_call_hpte_insert2,
585 FUNCTION_TEXT(ppc_md.hpte_insert),
586 BRANCH_SET_LINK);
587 patch_branch(htab_call_hpte_remove,
588 FUNCTION_TEXT(ppc_md.hpte_remove),
589 BRANCH_SET_LINK);
590 patch_branch(htab_call_hpte_updatepp,
591 FUNCTION_TEXT(ppc_md.hpte_updatepp),
592 BRANCH_SET_LINK);
583} 593}
584 594
585static void __init htab_initialize(void) 595static void __init htab_initialize(void)
@@ -598,7 +608,7 @@ static void __init htab_initialize(void)
598 /* Initialize page sizes */ 608 /* Initialize page sizes */
599 htab_init_page_sizes(); 609 htab_init_page_sizes();
600 610
601 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { 611 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
602 mmu_kernel_ssize = MMU_SEGSIZE_1T; 612 mmu_kernel_ssize = MMU_SEGSIZE_1T;
603 mmu_highuser_ssize = MMU_SEGSIZE_1T; 613 mmu_highuser_ssize = MMU_SEGSIZE_1T;
604 printk(KERN_INFO "Using 1TB segments\n"); 614 printk(KERN_INFO "Using 1TB segments\n");
@@ -739,7 +749,7 @@ void __init early_init_mmu(void)
739 749
740 /* Initialize stab / SLB management except on iSeries 750 /* Initialize stab / SLB management except on iSeries
741 */ 751 */
742 if (cpu_has_feature(CPU_FTR_SLB)) 752 if (mmu_has_feature(MMU_FTR_SLB))
743 slb_initialize(); 753 slb_initialize();
744 else if (!firmware_has_feature(FW_FEATURE_ISERIES)) 754 else if (!firmware_has_feature(FW_FEATURE_ISERIES))
745 stab_initialize(get_paca()->stab_real); 755 stab_initialize(get_paca()->stab_real);
@@ -756,7 +766,7 @@ void __cpuinit early_init_mmu_secondary(void)
756 * in real mode on pSeries and we want a virtual address on 766 * in real mode on pSeries and we want a virtual address on
757 * iSeries anyway 767 * iSeries anyway
758 */ 768 */
759 if (cpu_has_feature(CPU_FTR_SLB)) 769 if (mmu_has_feature(MMU_FTR_SLB))
760 slb_initialize(); 770 slb_initialize();
761 else 771 else
762 stab_initialize(get_paca()->stab_addr); 772 stab_initialize(get_paca()->stab_addr);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9bb249c3046e..0b9a5c1901b9 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -529,7 +529,7 @@ static int __init hugetlbpage_init(void)
529{ 529{
530 int psize; 530 int psize;
531 531
532 if (!cpu_has_feature(CPU_FTR_16M_PAGE)) 532 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
533 return -ENODEV; 533 return -ENODEV;
534 534
535 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 535 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 2535828aa84b..3bafc3deca6d 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -20,9 +20,205 @@
20#include <linux/idr.h> 20#include <linux/idr.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/gfp.h> 22#include <linux/gfp.h>
23#include <linux/slab.h>
23 24
24#include <asm/mmu_context.h> 25#include <asm/mmu_context.h>
25 26
27#ifdef CONFIG_PPC_ICSWX
28/*
29 * The processor and its L2 cache cause the icswx instruction to
30 * generate a COP_REQ transaction on PowerBus. The transaction has
31 * no address, and the processor does not perform an MMU access
32 * to authenticate the transaction. The command portion of the
33 * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and
34 * the coprocessor Process ID (PID), which the coprocessor compares
35 * to the authorized LPID and PID held in the coprocessor, to determine
36 * if the process is authorized to generate the transaction.
37 * The data of the COP_REQ transaction is 128-byte or less and is
38 * placed in cacheable memory on a 128-byte cache line boundary.
39 *
40 * The task to use a coprocessor should use use_cop() to allocate
41 * a coprocessor PID before executing icswx instruction. use_cop()
42 * also enables the coprocessor context switching. Drop_cop() is
43 * used to free the coprocessor PID.
44 *
45 * Example:
46 * Host Fabric Interface (HFI) is a PowerPC network coprocessor.
47 * Each HFI have multiple windows. Each HFI window serves as a
48 * network device sending to and receiving from HFI network.
49 * HFI immediate send function uses icswx instruction. The immediate
50 * send function allows small (single cache-line) packets be sent
51 * without using the regular HFI send FIFO and doorbell, which are
52 * much slower than immediate send.
53 *
54 * For each task intending to use HFI immediate send, the HFI driver
55 * calls use_cop() to obtain a coprocessor PID for the task.
56 * The HFI driver then allocate a free HFI window and save the
57 * coprocessor PID to the HFI window to allow the task to use the
58 * HFI window.
59 *
60 * The HFI driver repeatedly creates immediate send packets and
61 * issues icswx instruction to send data through the HFI window.
62 * The HFI compares the coprocessor PID in the CPU PID register
63 * to the PID held in the HFI window to determine if the transaction
64 * is allowed.
65 *
66 * When the task to release the HFI window, the HFI driver calls
67 * drop_cop() to release the coprocessor PID.
68 */
69
70#define COP_PID_NONE 0
71#define COP_PID_MIN (COP_PID_NONE + 1)
72#define COP_PID_MAX (0xFFFF)
73
74static DEFINE_SPINLOCK(mmu_context_acop_lock);
75static DEFINE_IDA(cop_ida);
76
77void switch_cop(struct mm_struct *next)
78{
79 mtspr(SPRN_PID, next->context.cop_pid);
80 mtspr(SPRN_ACOP, next->context.acop);
81}
82
83static int new_cop_pid(struct ida *ida, int min_id, int max_id,
84 spinlock_t *lock)
85{
86 int index;
87 int err;
88
89again:
90 if (!ida_pre_get(ida, GFP_KERNEL))
91 return -ENOMEM;
92
93 spin_lock(lock);
94 err = ida_get_new_above(ida, min_id, &index);
95 spin_unlock(lock);
96
97 if (err == -EAGAIN)
98 goto again;
99 else if (err)
100 return err;
101
102 if (index > max_id) {
103 spin_lock(lock);
104 ida_remove(ida, index);
105 spin_unlock(lock);
106 return -ENOMEM;
107 }
108
109 return index;
110}
111
112static void sync_cop(void *arg)
113{
114 struct mm_struct *mm = arg;
115
116 if (mm == current->active_mm)
117 switch_cop(current->active_mm);
118}
119
120/**
121 * Start using a coprocessor.
122 * @acop: mask of coprocessor to be used.
123 * @mm: The mm the coprocessor to associate with. Most likely current mm.
124 *
125 * Return a positive PID if successful. Negative errno otherwise.
126 * The returned PID will be fed to the coprocessor to determine if an
127 * icswx transaction is authenticated.
128 */
129int use_cop(unsigned long acop, struct mm_struct *mm)
130{
131 int ret;
132
133 if (!cpu_has_feature(CPU_FTR_ICSWX))
134 return -ENODEV;
135
136 if (!mm || !acop)
137 return -EINVAL;
138
139 /* We need to make sure mm_users doesn't change */
140 down_read(&mm->mmap_sem);
141 spin_lock(mm->context.cop_lockp);
142
143 if (mm->context.cop_pid == COP_PID_NONE) {
144 ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX,
145 &mmu_context_acop_lock);
146 if (ret < 0)
147 goto out;
148
149 mm->context.cop_pid = ret;
150 }
151 mm->context.acop |= acop;
152
153 sync_cop(mm);
154
155 /*
156 * If this is a threaded process then there might be other threads
157 * running. We need to send an IPI to force them to pick up any
158 * change in PID and ACOP.
159 */
160 if (atomic_read(&mm->mm_users) > 1)
161 smp_call_function(sync_cop, mm, 1);
162
163 ret = mm->context.cop_pid;
164
165out:
166 spin_unlock(mm->context.cop_lockp);
167 up_read(&mm->mmap_sem);
168
169 return ret;
170}
171EXPORT_SYMBOL_GPL(use_cop);
172
173/**
174 * Stop using a coprocessor.
175 * @acop: mask of coprocessor to be stopped.
176 * @mm: The mm the coprocessor associated with.
177 */
178void drop_cop(unsigned long acop, struct mm_struct *mm)
179{
180 int free_pid = COP_PID_NONE;
181
182 if (!cpu_has_feature(CPU_FTR_ICSWX))
183 return;
184
185 if (WARN_ON_ONCE(!mm))
186 return;
187
188 /* We need to make sure mm_users doesn't change */
189 down_read(&mm->mmap_sem);
190 spin_lock(mm->context.cop_lockp);
191
192 mm->context.acop &= ~acop;
193
194 if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) {
195 free_pid = mm->context.cop_pid;
196 mm->context.cop_pid = COP_PID_NONE;
197 }
198
199 sync_cop(mm);
200
201 /*
202 * If this is a threaded process then there might be other threads
203 * running. We need to send an IPI to force them to pick up any
204 * change in PID and ACOP.
205 */
206 if (atomic_read(&mm->mm_users) > 1)
207 smp_call_function(sync_cop, mm, 1);
208
209 if (free_pid != COP_PID_NONE) {
210 spin_lock(&mmu_context_acop_lock);
211 ida_remove(&cop_ida, free_pid);
212 spin_unlock(&mmu_context_acop_lock);
213 }
214
215 spin_unlock(mm->context.cop_lockp);
216 up_read(&mm->mmap_sem);
217}
218EXPORT_SYMBOL_GPL(drop_cop);
219
220#endif /* CONFIG_PPC_ICSWX */
221
26static DEFINE_SPINLOCK(mmu_context_lock); 222static DEFINE_SPINLOCK(mmu_context_lock);
27static DEFINE_IDA(mmu_context_ida); 223static DEFINE_IDA(mmu_context_ida);
28 224
@@ -31,7 +227,6 @@ static DEFINE_IDA(mmu_context_ida);
31 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, 227 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
32 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). 228 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
33 */ 229 */
34#define NO_CONTEXT 0
35#define MAX_CONTEXT ((1UL << 19) - 1) 230#define MAX_CONTEXT ((1UL << 19) - 1)
36 231
37int __init_new_context(void) 232int __init_new_context(void)
@@ -79,6 +274,16 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
79 slice_set_user_psize(mm, mmu_virtual_psize); 274 slice_set_user_psize(mm, mmu_virtual_psize);
80 subpage_prot_init_new_context(mm); 275 subpage_prot_init_new_context(mm);
81 mm->context.id = index; 276 mm->context.id = index;
277#ifdef CONFIG_PPC_ICSWX
278 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
279 if (!mm->context.cop_lockp) {
280 __destroy_context(index);
281 subpage_prot_free(mm);
282 mm->context.id = MMU_NO_CONTEXT;
283 return -ENOMEM;
284 }
285 spin_lock_init(mm->context.cop_lockp);
286#endif /* CONFIG_PPC_ICSWX */
82 287
83 return 0; 288 return 0;
84} 289}
@@ -93,7 +298,12 @@ EXPORT_SYMBOL_GPL(__destroy_context);
93 298
94void destroy_context(struct mm_struct *mm) 299void destroy_context(struct mm_struct *mm)
95{ 300{
301#ifdef CONFIG_PPC_ICSWX
302 drop_cop(mm->context.acop, mm);
303 kfree(mm->context.cop_lockp);
304 mm->context.cop_lockp = NULL;
305#endif /* CONFIG_PPC_ICSWX */
96 __destroy_context(mm->context.id); 306 __destroy_context(mm->context.id);
97 subpage_prot_free(mm); 307 subpage_prot_free(mm);
98 mm->context.id = NO_CONTEXT; 308 mm->context.id = MMU_NO_CONTEXT;
99} 309}
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index c0aab52da3a5..336807de550e 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -338,12 +338,14 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
338 return NOTIFY_OK; 338 return NOTIFY_OK;
339 339
340 switch (action) { 340 switch (action) {
341 case CPU_ONLINE: 341 case CPU_UP_PREPARE:
342 case CPU_ONLINE_FROZEN: 342 case CPU_UP_PREPARE_FROZEN:
343 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); 343 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
344 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); 344 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
345 break; 345 break;
346#ifdef CONFIG_HOTPLUG_CPU 346#ifdef CONFIG_HOTPLUG_CPU
347 case CPU_UP_CANCELED:
348 case CPU_UP_CANCELED_FROZEN:
347 case CPU_DEAD: 349 case CPU_DEAD:
348 case CPU_DEAD_FROZEN: 350 case CPU_DEAD_FROZEN:
349 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); 351 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
@@ -407,7 +409,17 @@ void __init mmu_context_init(void)
407 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { 409 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
408 first_context = 1; 410 first_context = 1;
409 last_context = 65535; 411 last_context = 65535;
410 } else { 412 } else
413#ifdef CONFIG_PPC_BOOK3E_MMU
414 if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
415 u32 mmucfg = mfspr(SPRN_MMUCFG);
416 u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
417 >> MMUCFG_PIDSIZE_SHIFT;
418 first_context = 1;
419 last_context = (1UL << (pid_bits + 1)) - 1;
420 } else
421#endif
422 {
411 first_context = 1; 423 first_context = 1;
412 last_context = 255; 424 last_context = 255;
413 } 425 }
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 5ec1dad2a19d..2164006fe170 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -311,14 +311,13 @@ EXPORT_SYMBOL_GPL(of_node_to_nid);
311static int __init find_min_common_depth(void) 311static int __init find_min_common_depth(void)
312{ 312{
313 int depth; 313 int depth;
314 struct device_node *rtas_root;
315 struct device_node *chosen; 314 struct device_node *chosen;
315 struct device_node *root;
316 const char *vec5; 316 const char *vec5;
317 317
318 rtas_root = of_find_node_by_path("/rtas"); 318 root = of_find_node_by_path("/rtas");
319 319 if (!root)
320 if (!rtas_root) 320 root = of_find_node_by_path("/");
321 return -1;
322 321
323 /* 322 /*
324 * This property is a set of 32-bit integers, each representing 323 * This property is a set of 32-bit integers, each representing
@@ -332,7 +331,7 @@ static int __init find_min_common_depth(void)
332 * NUMA boundary and the following are progressively less significant 331 * NUMA boundary and the following are progressively less significant
333 * boundaries. There can be more than one level of NUMA. 332 * boundaries. There can be more than one level of NUMA.
334 */ 333 */
335 distance_ref_points = of_get_property(rtas_root, 334 distance_ref_points = of_get_property(root,
336 "ibm,associativity-reference-points", 335 "ibm,associativity-reference-points",
337 &distance_ref_points_depth); 336 &distance_ref_points_depth);
338 337
@@ -376,11 +375,11 @@ static int __init find_min_common_depth(void)
376 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; 375 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
377 } 376 }
378 377
379 of_node_put(rtas_root); 378 of_node_put(root);
380 return depth; 379 return depth;
381 380
382err: 381err:
383 of_node_put(rtas_root); 382 of_node_put(root);
384 return -1; 383 return -1;
385} 384}
386 385
@@ -1453,7 +1452,7 @@ int arch_update_cpu_topology(void)
1453 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1452 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1454 struct sys_device *sysdev; 1453 struct sys_device *sysdev;
1455 1454
1456 for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { 1455 for_each_cpu(cpu,&cpu_associativity_changes_mask) {
1457 vphn_get_associativity(cpu, associativity); 1456 vphn_get_associativity(cpu, associativity);
1458 nid = associativity_to_nid(associativity); 1457 nid = associativity_to_nid(associativity);
1459 1458
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 8dc41c0157fe..51f87956f8f8 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -133,7 +133,15 @@ ioremap(phys_addr_t addr, unsigned long size)
133EXPORT_SYMBOL(ioremap); 133EXPORT_SYMBOL(ioremap);
134 134
135void __iomem * 135void __iomem *
136ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) 136ioremap_wc(phys_addr_t addr, unsigned long size)
137{
138 return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
139 __builtin_return_address(0));
140}
141EXPORT_SYMBOL(ioremap_wc);
142
143void __iomem *
144ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
137{ 145{
138 /* writeable implies dirty for kernel addresses */ 146 /* writeable implies dirty for kernel addresses */
139 if (flags & _PAGE_RW) 147 if (flags & _PAGE_RW)
@@ -152,7 +160,7 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
152 160
153 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 161 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
154} 162}
155EXPORT_SYMBOL(ioremap_flags); 163EXPORT_SYMBOL(ioremap_prot);
156 164
157void __iomem * 165void __iomem *
158__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) 166__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 88927a05cdc2..6e595f6496d4 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -255,7 +255,17 @@ void __iomem * ioremap(phys_addr_t addr, unsigned long size)
255 return __ioremap_caller(addr, size, flags, caller); 255 return __ioremap_caller(addr, size, flags, caller);
256} 256}
257 257
258void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, 258void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
259{
260 unsigned long flags = _PAGE_NO_CACHE;
261 void *caller = __builtin_return_address(0);
262
263 if (ppc_md.ioremap)
264 return ppc_md.ioremap(addr, size, flags, caller);
265 return __ioremap_caller(addr, size, flags, caller);
266}
267
268void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
259 unsigned long flags) 269 unsigned long flags)
260{ 270{
261 void *caller = __builtin_return_address(0); 271 void *caller = __builtin_return_address(0);
@@ -311,7 +321,8 @@ void iounmap(volatile void __iomem *token)
311} 321}
312 322
313EXPORT_SYMBOL(ioremap); 323EXPORT_SYMBOL(ioremap);
314EXPORT_SYMBOL(ioremap_flags); 324EXPORT_SYMBOL(ioremap_wc);
325EXPORT_SYMBOL(ioremap_prot);
315EXPORT_SYMBOL(__ioremap); 326EXPORT_SYMBOL(__ioremap);
316EXPORT_SYMBOL(__ioremap_at); 327EXPORT_SYMBOL(__ioremap_at);
317EXPORT_SYMBOL(iounmap); 328EXPORT_SYMBOL(iounmap);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 1d98ecc8eecd..e22276cb67a4 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -24,6 +24,7 @@
24#include <asm/firmware.h> 24#include <asm/firmware.h>
25#include <linux/compiler.h> 25#include <linux/compiler.h>
26#include <asm/udbg.h> 26#include <asm/udbg.h>
27#include <asm/code-patching.h>
27 28
28 29
29extern void slb_allocate_realmode(unsigned long ea); 30extern void slb_allocate_realmode(unsigned long ea);
@@ -166,7 +167,7 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
166 int esid_1t_count; 167 int esid_1t_count;
167 168
168 /* System is not 1T segment size capable. */ 169 /* System is not 1T segment size capable. */
169 if (!cpu_has_feature(CPU_FTR_1T_SEGMENT)) 170 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
170 return (GET_ESID(addr1) == GET_ESID(addr2)); 171 return (GET_ESID(addr1) == GET_ESID(addr2));
171 172
172 esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + 173 esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
@@ -201,7 +202,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
201 */ 202 */
202 hard_irq_disable(); 203 hard_irq_disable();
203 offset = get_paca()->slb_cache_ptr; 204 offset = get_paca()->slb_cache_ptr;
204 if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && 205 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
205 offset <= SLB_CACHE_ENTRIES) { 206 offset <= SLB_CACHE_ENTRIES) {
206 int i; 207 int i;
207 asm volatile("isync" : : : "memory"); 208 asm volatile("isync" : : : "memory");
@@ -249,9 +250,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
249static inline void patch_slb_encoding(unsigned int *insn_addr, 250static inline void patch_slb_encoding(unsigned int *insn_addr,
250 unsigned int immed) 251 unsigned int immed)
251{ 252{
252 *insn_addr = (*insn_addr & 0xffff0000) | immed; 253 int insn = (*insn_addr & 0xffff0000) | immed;
253 flush_icache_range((unsigned long)insn_addr, 4+ 254 patch_instruction(insn_addr, insn);
254 (unsigned long)insn_addr);
255} 255}
256 256
257void slb_set_size(u16 size) 257void slb_set_size(u16 size)
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 95ce35581696..ef653dc95b65 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -58,7 +58,7 @@ _GLOBAL(slb_miss_kernel_load_linear)
58 li r11,0 58 li r11,0
59BEGIN_FTR_SECTION 59BEGIN_FTR_SECTION
60 b slb_finish_load 60 b slb_finish_load
61END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) 61END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
62 b slb_finish_load_1T 62 b slb_finish_load_1T
63 63
641: 641:
@@ -87,7 +87,7 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
876: 876:
88BEGIN_FTR_SECTION 88BEGIN_FTR_SECTION
89 b slb_finish_load 89 b slb_finish_load
90END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) 90END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
91 b slb_finish_load_1T 91 b slb_finish_load_1T
92 92
930: /* user address: proto-VSID = context << 15 | ESID. First check 930: /* user address: proto-VSID = context << 15 | ESID. First check
@@ -138,11 +138,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
138 ld r9,PACACONTEXTID(r13) 138 ld r9,PACACONTEXTID(r13)
139BEGIN_FTR_SECTION 139BEGIN_FTR_SECTION
140 cmpldi r10,0x1000 140 cmpldi r10,0x1000
141END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 141END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
142 rldimi r10,r9,USER_ESID_BITS,0 142 rldimi r10,r9,USER_ESID_BITS,0
143BEGIN_FTR_SECTION 143BEGIN_FTR_SECTION
144 bge slb_finish_load_1T 144 bge slb_finish_load_1T
145END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 145END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
146 b slb_finish_load 146 b slb_finish_load
147 147
1488: /* invalid EA */ 1488: /* invalid EA */
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 446a01842a73..41e31642a86a 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -243,7 +243,7 @@ void __init stabs_alloc(void)
243{ 243{
244 int cpu; 244 int cpu;
245 245
246 if (cpu_has_feature(CPU_FTR_SLB)) 246 if (mmu_has_feature(MMU_FTR_SLB))
247 return; 247 return;
248 248
249 for_each_possible_cpu(cpu) { 249 for_each_possible_cpu(cpu) {
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index aa46e9d1e771..19395f18b1db 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -87,7 +87,7 @@ static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
87 mpic_setup_this_cpu(); 87 mpic_setup_this_cpu();
88} 88}
89 89
90static void __cpuinit smp_iss4xx_kick_cpu(int cpu) 90static int __cpuinit smp_iss4xx_kick_cpu(int cpu)
91{ 91{
92 struct device_node *cpunode = of_get_cpu_node(cpu, NULL); 92 struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
93 const u64 *spin_table_addr_prop; 93 const u64 *spin_table_addr_prop;
@@ -104,7 +104,7 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
104 NULL); 104 NULL);
105 if (spin_table_addr_prop == NULL) { 105 if (spin_table_addr_prop == NULL) {
106 pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu); 106 pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
107 return; 107 return -ENOENT;
108 } 108 }
109 109
110 /* Assume it's mapped as part of the linear mapping. This is a bit 110 /* Assume it's mapped as part of the linear mapping. This is a bit
@@ -117,6 +117,8 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
117 smp_wmb(); 117 smp_wmb();
118 spin_table[1] = __pa(start_secondary_47x); 118 spin_table[1] = __pa(start_secondary_47x);
119 mb(); 119 mb();
120
121 return 0;
120} 122}
121 123
122static struct smp_ops_t iss_smp_ops = { 124static struct smp_ops_t iss_smp_ops = {
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index cfc4b2009982..9f09319352c0 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -61,7 +61,7 @@ irq_to_pic_bit(unsigned int irq)
61static void 61static void
62cpld_mask_irq(struct irq_data *d) 62cpld_mask_irq(struct irq_data *d)
63{ 63{
64 unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq; 64 unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d);
65 void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); 65 void __iomem *pic_mask = irq_to_pic_mask(cpld_irq);
66 66
67 out_8(pic_mask, 67 out_8(pic_mask,
@@ -71,7 +71,7 @@ cpld_mask_irq(struct irq_data *d)
71static void 71static void
72cpld_unmask_irq(struct irq_data *d) 72cpld_unmask_irq(struct irq_data *d)
73{ 73{
74 unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq; 74 unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d);
75 void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); 75 void __iomem *pic_mask = irq_to_pic_mask(cpld_irq);
76 76
77 out_8(pic_mask, 77 out_8(pic_mask,
@@ -97,7 +97,7 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
97 status |= (ignore | mask); 97 status |= (ignore | mask);
98 98
99 if (status == 0xff) 99 if (status == 0xff)
100 return NO_IRQ_IGNORE; 100 return NO_IRQ;
101 101
102 cpld_irq = ffz(status) + offset; 102 cpld_irq = ffz(status) + offset;
103 103
@@ -109,14 +109,14 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
109{ 109{
110 irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, 110 irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
111 &cpld_regs->pci_mask); 111 &cpld_regs->pci_mask);
112 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { 112 if (irq != NO_IRQ) {
113 generic_handle_irq(irq); 113 generic_handle_irq(irq);
114 return; 114 return;
115 } 115 }
116 116
117 irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status, 117 irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
118 &cpld_regs->misc_mask); 118 &cpld_regs->misc_mask);
119 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { 119 if (irq != NO_IRQ) {
120 generic_handle_irq(irq); 120 generic_handle_irq(irq);
121 return; 121 return;
122 } 122 }
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 57a6a349e932..96f85e5e0cd3 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -56,7 +56,7 @@ static void media5200_irq_unmask(struct irq_data *d)
56 56
57 spin_lock_irqsave(&media5200_irq.lock, flags); 57 spin_lock_irqsave(&media5200_irq.lock, flags);
58 val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); 58 val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
59 val |= 1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq); 59 val |= 1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d));
60 out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); 60 out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val);
61 spin_unlock_irqrestore(&media5200_irq.lock, flags); 61 spin_unlock_irqrestore(&media5200_irq.lock, flags);
62} 62}
@@ -68,7 +68,7 @@ static void media5200_irq_mask(struct irq_data *d)
68 68
69 spin_lock_irqsave(&media5200_irq.lock, flags); 69 spin_lock_irqsave(&media5200_irq.lock, flags);
70 val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); 70 val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
71 val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq)); 71 val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d)));
72 out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); 72 out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val);
73 spin_unlock_irqrestore(&media5200_irq.lock, flags); 73 spin_unlock_irqrestore(&media5200_irq.lock, flags);
74} 74}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 1dd15400f6f0..1a9a49570579 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -157,48 +157,30 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
157 */ 157 */
158static void mpc52xx_extirq_mask(struct irq_data *d) 158static void mpc52xx_extirq_mask(struct irq_data *d)
159{ 159{
160 int irq; 160 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
161 int l2irq;
162
163 irq = irq_map[d->irq].hwirq;
164 l2irq = irq & MPC52xx_IRQ_L2_MASK;
165
166 io_be_clrbit(&intr->ctrl, 11 - l2irq); 161 io_be_clrbit(&intr->ctrl, 11 - l2irq);
167} 162}
168 163
169static void mpc52xx_extirq_unmask(struct irq_data *d) 164static void mpc52xx_extirq_unmask(struct irq_data *d)
170{ 165{
171 int irq; 166 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
172 int l2irq;
173
174 irq = irq_map[d->irq].hwirq;
175 l2irq = irq & MPC52xx_IRQ_L2_MASK;
176
177 io_be_setbit(&intr->ctrl, 11 - l2irq); 167 io_be_setbit(&intr->ctrl, 11 - l2irq);
178} 168}
179 169
180static void mpc52xx_extirq_ack(struct irq_data *d) 170static void mpc52xx_extirq_ack(struct irq_data *d)
181{ 171{
182 int irq; 172 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
183 int l2irq;
184
185 irq = irq_map[d->irq].hwirq;
186 l2irq = irq & MPC52xx_IRQ_L2_MASK;
187
188 io_be_setbit(&intr->ctrl, 27-l2irq); 173 io_be_setbit(&intr->ctrl, 27-l2irq);
189} 174}
190 175
191static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type) 176static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type)
192{ 177{
193 u32 ctrl_reg, type; 178 u32 ctrl_reg, type;
194 int irq; 179 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
195 int l2irq;
196 void *handler = handle_level_irq; 180 void *handler = handle_level_irq;
197 181
198 irq = irq_map[d->irq].hwirq; 182 pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__,
199 l2irq = irq & MPC52xx_IRQ_L2_MASK; 183 (int) irqd_to_hwirq(d), l2irq, flow_type);
200
201 pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type);
202 184
203 switch (flow_type) { 185 switch (flow_type) {
204 case IRQF_TRIGGER_HIGH: type = 0; break; 186 case IRQF_TRIGGER_HIGH: type = 0; break;
@@ -237,23 +219,13 @@ static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type)
237 219
238static void mpc52xx_main_mask(struct irq_data *d) 220static void mpc52xx_main_mask(struct irq_data *d)
239{ 221{
240 int irq; 222 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
241 int l2irq;
242
243 irq = irq_map[d->irq].hwirq;
244 l2irq = irq & MPC52xx_IRQ_L2_MASK;
245
246 io_be_setbit(&intr->main_mask, 16 - l2irq); 223 io_be_setbit(&intr->main_mask, 16 - l2irq);
247} 224}
248 225
249static void mpc52xx_main_unmask(struct irq_data *d) 226static void mpc52xx_main_unmask(struct irq_data *d)
250{ 227{
251 int irq; 228 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
252 int l2irq;
253
254 irq = irq_map[d->irq].hwirq;
255 l2irq = irq & MPC52xx_IRQ_L2_MASK;
256
257 io_be_clrbit(&intr->main_mask, 16 - l2irq); 229 io_be_clrbit(&intr->main_mask, 16 - l2irq);
258} 230}
259 231
@@ -270,23 +242,13 @@ static struct irq_chip mpc52xx_main_irqchip = {
270 */ 242 */
271static void mpc52xx_periph_mask(struct irq_data *d) 243static void mpc52xx_periph_mask(struct irq_data *d)
272{ 244{
273 int irq; 245 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
274 int l2irq;
275
276 irq = irq_map[d->irq].hwirq;
277 l2irq = irq & MPC52xx_IRQ_L2_MASK;
278
279 io_be_setbit(&intr->per_mask, 31 - l2irq); 246 io_be_setbit(&intr->per_mask, 31 - l2irq);
280} 247}
281 248
282static void mpc52xx_periph_unmask(struct irq_data *d) 249static void mpc52xx_periph_unmask(struct irq_data *d)
283{ 250{
284 int irq; 251 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
285 int l2irq;
286
287 irq = irq_map[d->irq].hwirq;
288 l2irq = irq & MPC52xx_IRQ_L2_MASK;
289
290 io_be_clrbit(&intr->per_mask, 31 - l2irq); 252 io_be_clrbit(&intr->per_mask, 31 - l2irq);
291} 253}
292 254
@@ -303,34 +265,19 @@ static struct irq_chip mpc52xx_periph_irqchip = {
303 */ 265 */
304static void mpc52xx_sdma_mask(struct irq_data *d) 266static void mpc52xx_sdma_mask(struct irq_data *d)
305{ 267{
306 int irq; 268 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
307 int l2irq;
308
309 irq = irq_map[d->irq].hwirq;
310 l2irq = irq & MPC52xx_IRQ_L2_MASK;
311
312 io_be_setbit(&sdma->IntMask, l2irq); 269 io_be_setbit(&sdma->IntMask, l2irq);
313} 270}
314 271
315static void mpc52xx_sdma_unmask(struct irq_data *d) 272static void mpc52xx_sdma_unmask(struct irq_data *d)
316{ 273{
317 int irq; 274 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
318 int l2irq;
319
320 irq = irq_map[d->irq].hwirq;
321 l2irq = irq & MPC52xx_IRQ_L2_MASK;
322
323 io_be_clrbit(&sdma->IntMask, l2irq); 275 io_be_clrbit(&sdma->IntMask, l2irq);
324} 276}
325 277
326static void mpc52xx_sdma_ack(struct irq_data *d) 278static void mpc52xx_sdma_ack(struct irq_data *d)
327{ 279{
328 int irq; 280 int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
329 int l2irq;
330
331 irq = irq_map[d->irq].hwirq;
332 l2irq = irq & MPC52xx_IRQ_L2_MASK;
333
334 out_be32(&sdma->IntPend, 1 << l2irq); 281 out_be32(&sdma->IntPend, 1 << l2irq);
335} 282}
336 283
@@ -539,7 +486,7 @@ void __init mpc52xx_init_irq(void)
539unsigned int mpc52xx_get_irq(void) 486unsigned int mpc52xx_get_irq(void)
540{ 487{
541 u32 status; 488 u32 status;
542 int irq = NO_IRQ_IGNORE; 489 int irq;
543 490
544 status = in_be32(&intr->enc_status); 491 status = in_be32(&intr->enc_status);
545 if (status & 0x00000400) { /* critical */ 492 if (status & 0x00000400) { /* critical */
@@ -562,6 +509,8 @@ unsigned int mpc52xx_get_irq(void)
562 } else { 509 } else {
563 irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET); 510 irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET);
564 } 511 }
512 } else {
513 return NO_IRQ;
565 } 514 }
566 515
567 return irq_linear_revmap(mpc52xx_irqhost, irq); 516 return irq_linear_revmap(mpc52xx_irqhost, irq);
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 4a4eb6ffa12f..8ccf9ed62fe2 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -42,7 +42,7 @@ struct pq2ads_pci_pic {
42static void pq2ads_pci_mask_irq(struct irq_data *d) 42static void pq2ads_pci_mask_irq(struct irq_data *d)
43{ 43{
44 struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); 44 struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
45 int irq = NUM_IRQS - virq_to_hw(d->irq) - 1; 45 int irq = NUM_IRQS - irqd_to_hwirq(d) - 1;
46 46
47 if (irq != -1) { 47 if (irq != -1) {
48 unsigned long flags; 48 unsigned long flags;
@@ -58,7 +58,7 @@ static void pq2ads_pci_mask_irq(struct irq_data *d)
58static void pq2ads_pci_unmask_irq(struct irq_data *d) 58static void pq2ads_pci_unmask_irq(struct irq_data *d)
59{ 59{
60 struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); 60 struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
61 int irq = NUM_IRQS - virq_to_hw(d->irq) - 1; 61 int irq = NUM_IRQS - irqd_to_hwirq(d) - 1;
62 62
63 if (irq != -1) { 63 if (irq != -1) {
64 unsigned long flags; 64 unsigned long flags;
@@ -112,16 +112,8 @@ static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
112 return 0; 112 return 0;
113} 113}
114 114
115static void pci_host_unmap(struct irq_host *h, unsigned int virq)
116{
117 /* remove chip and handler */
118 irq_set_chip_data(virq, NULL);
119 irq_set_chip(virq, NULL);
120}
121
122static struct irq_host_ops pci_pic_host_ops = { 115static struct irq_host_ops pci_pic_host_ops = {
123 .map = pci_pic_host_map, 116 .map = pci_pic_host_map,
124 .unmap = pci_host_unmap,
125}; 117};
126 118
127int __init pq2ads_pci_init_irq(void) 119int __init pq2ads_pci_init_irq(void)
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 0d00ff9d05a0..d6a93a10c0f5 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -41,7 +41,7 @@ extern void __early_start(void);
41#define NUM_BOOT_ENTRY 8 41#define NUM_BOOT_ENTRY 8
42#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32)) 42#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32))
43 43
44static void __init 44static int __init
45smp_85xx_kick_cpu(int nr) 45smp_85xx_kick_cpu(int nr)
46{ 46{
47 unsigned long flags; 47 unsigned long flags;
@@ -60,7 +60,7 @@ smp_85xx_kick_cpu(int nr)
60 60
61 if (cpu_rel_addr == NULL) { 61 if (cpu_rel_addr == NULL) {
62 printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); 62 printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
63 return; 63 return -ENOENT;
64 } 64 }
65 65
66 /* 66 /*
@@ -107,6 +107,8 @@ smp_85xx_kick_cpu(int nr)
107 iounmap(bptr_vaddr); 107 iounmap(bptr_vaddr);
108 108
109 pr_debug("waited %d msecs for CPU #%d.\n", n, nr); 109 pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
110
111 return 0;
110} 112}
111 113
112static void __init 114static void __init
@@ -233,8 +235,10 @@ void __init mpc85xx_smp_init(void)
233 smp_85xx_ops.message_pass = smp_mpic_message_pass; 235 smp_85xx_ops.message_pass = smp_mpic_message_pass;
234 } 236 }
235 237
236 if (cpu_has_feature(CPU_FTR_DBELL)) 238 if (cpu_has_feature(CPU_FTR_DBELL)) {
237 smp_85xx_ops.message_pass = doorbell_message_pass; 239 smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass;
240 smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
241 }
238 242
239 BUG_ON(!smp_85xx_ops.message_pass); 243 BUG_ON(!smp_85xx_ops.message_pass);
240 244
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index db864623b4ae..12cb9bb2cc68 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -48,8 +48,6 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = {
48 [8] = {0, IRQ_TYPE_LEVEL_HIGH}, 48 [8] = {0, IRQ_TYPE_LEVEL_HIGH},
49}; 49};
50 50
51#define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
52
53static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); 51static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock);
54 52
55static void __iomem *socrates_fpga_pic_iobase; 53static void __iomem *socrates_fpga_pic_iobase;
@@ -110,11 +108,9 @@ void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc)
110static void socrates_fpga_pic_ack(struct irq_data *d) 108static void socrates_fpga_pic_ack(struct irq_data *d)
111{ 109{
112 unsigned long flags; 110 unsigned long flags;
113 unsigned int hwirq, irq_line; 111 unsigned int irq_line, hwirq = irqd_to_hwirq(d);
114 uint32_t mask; 112 uint32_t mask;
115 113
116 hwirq = socrates_fpga_irq_to_hw(d->irq);
117
118 irq_line = fpga_irqs[hwirq].irq_line; 114 irq_line = fpga_irqs[hwirq].irq_line;
119 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 115 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
120 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 116 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -127,12 +123,10 @@ static void socrates_fpga_pic_ack(struct irq_data *d)
127static void socrates_fpga_pic_mask(struct irq_data *d) 123static void socrates_fpga_pic_mask(struct irq_data *d)
128{ 124{
129 unsigned long flags; 125 unsigned long flags;
130 unsigned int hwirq; 126 unsigned int hwirq = irqd_to_hwirq(d);
131 int irq_line; 127 int irq_line;
132 u32 mask; 128 u32 mask;
133 129
134 hwirq = socrates_fpga_irq_to_hw(d->irq);
135
136 irq_line = fpga_irqs[hwirq].irq_line; 130 irq_line = fpga_irqs[hwirq].irq_line;
137 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 131 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
138 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 132 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -145,12 +139,10 @@ static void socrates_fpga_pic_mask(struct irq_data *d)
145static void socrates_fpga_pic_mask_ack(struct irq_data *d) 139static void socrates_fpga_pic_mask_ack(struct irq_data *d)
146{ 140{
147 unsigned long flags; 141 unsigned long flags;
148 unsigned int hwirq; 142 unsigned int hwirq = irqd_to_hwirq(d);
149 int irq_line; 143 int irq_line;
150 u32 mask; 144 u32 mask;
151 145
152 hwirq = socrates_fpga_irq_to_hw(d->irq);
153
154 irq_line = fpga_irqs[hwirq].irq_line; 146 irq_line = fpga_irqs[hwirq].irq_line;
155 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 147 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
156 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 148 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -164,12 +156,10 @@ static void socrates_fpga_pic_mask_ack(struct irq_data *d)
164static void socrates_fpga_pic_unmask(struct irq_data *d) 156static void socrates_fpga_pic_unmask(struct irq_data *d)
165{ 157{
166 unsigned long flags; 158 unsigned long flags;
167 unsigned int hwirq; 159 unsigned int hwirq = irqd_to_hwirq(d);
168 int irq_line; 160 int irq_line;
169 u32 mask; 161 u32 mask;
170 162
171 hwirq = socrates_fpga_irq_to_hw(d->irq);
172
173 irq_line = fpga_irqs[hwirq].irq_line; 163 irq_line = fpga_irqs[hwirq].irq_line;
174 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 164 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
175 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 165 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -182,12 +172,10 @@ static void socrates_fpga_pic_unmask(struct irq_data *d)
182static void socrates_fpga_pic_eoi(struct irq_data *d) 172static void socrates_fpga_pic_eoi(struct irq_data *d)
183{ 173{
184 unsigned long flags; 174 unsigned long flags;
185 unsigned int hwirq; 175 unsigned int hwirq = irqd_to_hwirq(d);
186 int irq_line; 176 int irq_line;
187 u32 mask; 177 u32 mask;
188 178
189 hwirq = socrates_fpga_irq_to_hw(d->irq);
190
191 irq_line = fpga_irqs[hwirq].irq_line; 179 irq_line = fpga_irqs[hwirq].irq_line;
192 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 180 raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
193 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 181 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -201,12 +189,10 @@ static int socrates_fpga_pic_set_type(struct irq_data *d,
201 unsigned int flow_type) 189 unsigned int flow_type)
202{ 190{
203 unsigned long flags; 191 unsigned long flags;
204 unsigned int hwirq; 192 unsigned int hwirq = irqd_to_hwirq(d);
205 int polarity; 193 int polarity;
206 u32 mask; 194 u32 mask;
207 195
208 hwirq = socrates_fpga_irq_to_hw(d->irq);
209
210 if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE) 196 if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE)
211 return -EINVAL; 197 return -EINVAL;
212 198
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c
index 0beec7d5566b..94594e58594c 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.c
+++ b/arch/powerpc/platforms/86xx/gef_pic.c
@@ -46,8 +46,6 @@
46#define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0) 46#define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0)
47#define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1) 47#define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1)
48 48
49#define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
50
51 49
52static DEFINE_RAW_SPINLOCK(gef_pic_lock); 50static DEFINE_RAW_SPINLOCK(gef_pic_lock);
53 51
@@ -113,11 +111,9 @@ void gef_pic_cascade(unsigned int irq, struct irq_desc *desc)
113static void gef_pic_mask(struct irq_data *d) 111static void gef_pic_mask(struct irq_data *d)
114{ 112{
115 unsigned long flags; 113 unsigned long flags;
116 unsigned int hwirq; 114 unsigned int hwirq = irqd_to_hwirq(d);
117 u32 mask; 115 u32 mask;
118 116
119 hwirq = gef_irq_to_hw(d->irq);
120
121 raw_spin_lock_irqsave(&gef_pic_lock, flags); 117 raw_spin_lock_irqsave(&gef_pic_lock, flags);
122 mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); 118 mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
123 mask &= ~(1 << hwirq); 119 mask &= ~(1 << hwirq);
@@ -136,11 +132,9 @@ static void gef_pic_mask_ack(struct irq_data *d)
136static void gef_pic_unmask(struct irq_data *d) 132static void gef_pic_unmask(struct irq_data *d)
137{ 133{
138 unsigned long flags; 134 unsigned long flags;
139 unsigned int hwirq; 135 unsigned int hwirq = irqd_to_hwirq(d);
140 u32 mask; 136 u32 mask;
141 137
142 hwirq = gef_irq_to_hw(d->irq);
143
144 raw_spin_lock_irqsave(&gef_pic_lock, flags); 138 raw_spin_lock_irqsave(&gef_pic_lock, flags);
145 mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); 139 mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
146 mask |= (1 << hwirq); 140 mask |= (1 << hwirq);
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 018cc67be426..a896511690c2 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -66,7 +66,7 @@ static void __init mpc8610_suspend_init(void)
66 return; 66 return;
67 } 67 }
68 68
69 ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9/wakeup", NULL); 69 ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9:wakeup", NULL);
70 if (ret) { 70 if (ret) {
71 pr_err("%s: can't request pixis event IRQ: %d\n", 71 pr_err("%s: can't request pixis event IRQ: %d\n",
72 __func__, ret); 72 __func__, ret);
@@ -105,45 +105,77 @@ machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
105 105
106#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 106#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
107 107
108static u32 get_busfreq(void) 108/*
109{ 109 * DIU Area Descriptor
110 struct device_node *node; 110 *
111 111 * The MPC8610 reference manual shows the bits of the AD register in
112 u32 fs_busfreq = 0; 112 * little-endian order, which causes the BLUE_C field to be split into two
113 node = of_find_node_by_type(NULL, "cpu"); 113 * parts. To simplify the definition of the MAKE_AD() macro, we define the
114 if (node) { 114 * fields in big-endian order and byte-swap the result.
115 unsigned int size; 115 *
116 const unsigned int *prop = 116 * So even though the registers don't look like they're in the
117 of_get_property(node, "bus-frequency", &size); 117 * same bit positions as they are on the P1022, the same value is written to
118 if (prop) 118 * the AD register on the MPC8610 and on the P1022.
119 fs_busfreq = *prop; 119 */
120 of_node_put(node); 120#define AD_BYTE_F 0x10000000
121 }; 121#define AD_ALPHA_C_MASK 0x0E000000
122 return fs_busfreq; 122#define AD_ALPHA_C_SHIFT 25
123} 123#define AD_BLUE_C_MASK 0x01800000
124#define AD_BLUE_C_SHIFT 23
125#define AD_GREEN_C_MASK 0x00600000
126#define AD_GREEN_C_SHIFT 21
127#define AD_RED_C_MASK 0x00180000
128#define AD_RED_C_SHIFT 19
129#define AD_PALETTE 0x00040000
130#define AD_PIXEL_S_MASK 0x00030000
131#define AD_PIXEL_S_SHIFT 16
132#define AD_COMP_3_MASK 0x0000F000
133#define AD_COMP_3_SHIFT 12
134#define AD_COMP_2_MASK 0x00000F00
135#define AD_COMP_2_SHIFT 8
136#define AD_COMP_1_MASK 0x000000F0
137#define AD_COMP_1_SHIFT 4
138#define AD_COMP_0_MASK 0x0000000F
139#define AD_COMP_0_SHIFT 0
140
141#define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \
142 cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \
143 (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \
144 (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \
145 (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \
146 (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT))
124 147
125unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel, 148unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel,
126 int monitor_port) 149 int monitor_port)
127{ 150{
128 static const unsigned long pixelformat[][3] = { 151 static const unsigned long pixelformat[][3] = {
129 {0x88882317, 0x88083218, 0x65052119}, 152 {
130 {0x88883316, 0x88082219, 0x65053118}, 153 MAKE_AD(3, 0, 2, 1, 3, 8, 8, 8, 8),
154 MAKE_AD(4, 2, 0, 1, 2, 8, 8, 8, 0),
155 MAKE_AD(4, 0, 2, 1, 1, 5, 6, 5, 0)
156 },
157 {
158 MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8),
159 MAKE_AD(4, 0, 2, 1, 2, 8, 8, 8, 0),
160 MAKE_AD(4, 2, 0, 1, 1, 5, 6, 5, 0)
161 },
131 }; 162 };
132 unsigned int pix_fmt, arch_monitor; 163 unsigned int arch_monitor;
133 164
165 /* The DVI port is mis-wired on revision 1 of this board. */
134 arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1; 166 arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1;
135 /* DVI port for board version 0x01 */ 167
136 168 switch (bits_per_pixel) {
137 if (bits_per_pixel == 32) 169 case 32:
138 pix_fmt = pixelformat[arch_monitor][0]; 170 return pixelformat[arch_monitor][0];
139 else if (bits_per_pixel == 24) 171 case 24:
140 pix_fmt = pixelformat[arch_monitor][1]; 172 return pixelformat[arch_monitor][1];
141 else if (bits_per_pixel == 16) 173 case 16:
142 pix_fmt = pixelformat[arch_monitor][2]; 174 return pixelformat[arch_monitor][2];
143 else 175 default:
144 pix_fmt = pixelformat[1][0]; 176 pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel);
145 177 return 0;
146 return pix_fmt; 178 }
147} 179}
148 180
149void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base) 181void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base)
@@ -190,8 +222,7 @@ void mpc8610hpcd_set_pixel_clock(unsigned int pixclock)
190 } 222 }
191 223
192 /* Pixel Clock configuration */ 224 /* Pixel Clock configuration */
193 pr_debug("DIU: Bus Frequency = %d\n", get_busfreq()); 225 speed_ccb = fsl_get_sys_freq();
194 speed_ccb = get_busfreq();
195 226
196 /* Calculate the pixel clock with the smallest error */ 227 /* Calculate the pixel clock with the smallest error */
197 /* calculate the following in steps to avoid overflow */ 228 /* calculate the following in steps to avoid overflow */
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index eacea0e3fcc8..af09baee22cb 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -56,7 +56,7 @@ smp_86xx_release_core(int nr)
56} 56}
57 57
58 58
59static void __init 59static int __init
60smp_86xx_kick_cpu(int nr) 60smp_86xx_kick_cpu(int nr)
61{ 61{
62 unsigned int save_vector; 62 unsigned int save_vector;
@@ -65,7 +65,7 @@ smp_86xx_kick_cpu(int nr)
65 unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100); 65 unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100);
66 66
67 if (nr < 0 || nr >= NR_CPUS) 67 if (nr < 0 || nr >= NR_CPUS)
68 return; 68 return -ENOENT;
69 69
70 pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr); 70 pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr);
71 71
@@ -92,6 +92,8 @@ smp_86xx_kick_cpu(int nr)
92 local_irq_restore(flags); 92 local_irq_restore(flags);
93 93
94 pr_debug("wait CPU #%d for %d msecs.\n", nr, n); 94 pr_debug("wait CPU #%d for %d msecs.\n", nr, n);
95
96 return 0;
95} 97}
96 98
97 99
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index 9ecce995dd4b..1e121088826f 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -150,7 +150,7 @@ void __init mpc8xx_calibrate_decr(void)
150 */ 150 */
151 cpu = of_find_node_by_type(NULL, "cpu"); 151 cpu = of_find_node_by_type(NULL, "cpu");
152 virq= irq_of_parse_and_map(cpu, 0); 152 virq= irq_of_parse_and_map(cpu, 0);
153 irq = irq_map[virq].hwirq; 153 irq = virq_to_hw(virq);
154 154
155 sys_tmr2 = immr_map(im_sit); 155 sys_tmr2 = immr_map(im_sit);
156 out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) | 156 out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) |
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index f7b07720aa30..f970ca2b180c 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -20,6 +20,7 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
20source "arch/powerpc/platforms/44x/Kconfig" 20source "arch/powerpc/platforms/44x/Kconfig"
21source "arch/powerpc/platforms/40x/Kconfig" 21source "arch/powerpc/platforms/40x/Kconfig"
22source "arch/powerpc/platforms/amigaone/Kconfig" 22source "arch/powerpc/platforms/amigaone/Kconfig"
23source "arch/powerpc/platforms/wsp/Kconfig"
23 24
24config KVM_GUEST 25config KVM_GUEST
25 bool "KVM Guest support" 26 bool "KVM Guest support"
@@ -56,16 +57,19 @@ config UDBG_RTAS_CONSOLE
56 depends on PPC_RTAS 57 depends on PPC_RTAS
57 default n 58 default n
58 59
60config PPC_SMP_MUXED_IPI
61 bool
62 help
63 Select this opton if your platform supports SMP and your
64 interrupt controller provides less than 4 interrupts to each
65 cpu. This will enable the generic code to multiplex the 4
66 messages on to one ipi.
67
59config PPC_UDBG_BEAT 68config PPC_UDBG_BEAT
60 bool "BEAT based debug console" 69 bool "BEAT based debug console"
61 depends on PPC_CELLEB 70 depends on PPC_CELLEB
62 default n 71 default n
63 72
64config XICS
65 depends on PPC_PSERIES
66 bool
67 default y
68
69config IPIC 73config IPIC
70 bool 74 bool
71 default n 75 default n
@@ -147,14 +151,27 @@ config PPC_970_NAP
147 bool 151 bool
148 default n 152 default n
149 153
154config PPC_P7_NAP
155 bool
156 default n
157
150config PPC_INDIRECT_IO 158config PPC_INDIRECT_IO
151 bool 159 bool
152 select GENERIC_IOMAP 160 select GENERIC_IOMAP
153 default n 161
162config PPC_INDIRECT_PIO
163 bool
164 select PPC_INDIRECT_IO
165
166config PPC_INDIRECT_MMIO
167 bool
168 select PPC_INDIRECT_IO
169
170config PPC_IO_WORKAROUNDS
171 bool
154 172
155config GENERIC_IOMAP 173config GENERIC_IOMAP
156 bool 174 bool
157 default n
158 175
159source "drivers/cpufreq/Kconfig" 176source "drivers/cpufreq/Kconfig"
160 177
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 111138c55f9c..2165b65876f9 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -73,6 +73,7 @@ config PPC_BOOK3S_64
73config PPC_BOOK3E_64 73config PPC_BOOK3E_64
74 bool "Embedded processors" 74 bool "Embedded processors"
75 select PPC_FPU # Make it a choice ? 75 select PPC_FPU # Make it a choice ?
76 select PPC_SMP_MUXED_IPI
76 77
77endchoice 78endchoice
78 79
@@ -107,6 +108,10 @@ config POWER4
107 depends on PPC64 && PPC_BOOK3S 108 depends on PPC64 && PPC_BOOK3S
108 def_bool y 109 def_bool y
109 110
111config PPC_A2
112 bool
113 depends on PPC_BOOK3E_64
114
110config TUNE_CELL 115config TUNE_CELL
111 bool "Optimize for Cell Broadband Engine" 116 bool "Optimize for Cell Broadband Engine"
112 depends on PPC64 && PPC_BOOK3S 117 depends on PPC64 && PPC_BOOK3S
@@ -174,6 +179,7 @@ config FSL_BOOKE
174config PPC_FSL_BOOK3E 179config PPC_FSL_BOOK3E
175 bool 180 bool
176 select FSL_EMB_PERFMON 181 select FSL_EMB_PERFMON
182 select PPC_SMP_MUXED_IPI
177 default y if FSL_BOOKE 183 default y if FSL_BOOKE
178 184
179config PTE_64BIT 185config PTE_64BIT
@@ -226,6 +232,24 @@ config VSX
226 232
227 If in doubt, say Y here. 233 If in doubt, say Y here.
228 234
235config PPC_ICSWX
236 bool "Support for PowerPC icswx coprocessor instruction"
237 depends on POWER4
238 default n
239 ---help---
240
241 This option enables kernel support for the PowerPC Initiate
242 Coprocessor Store Word (icswx) coprocessor instruction on POWER7
243 or newer processors.
244
245 This option is only useful if you have a processor that supports
246 the icswx coprocessor instruction. It does not have any effect
247 on processors without the icswx coprocessor instruction.
248
249 This option slightly increases kernel memory usage.
250
251 If in doubt, say N here.
252
229config SPE 253config SPE
230 bool "SPE Support" 254 bool "SPE Support"
231 depends on E200 || (E500 && !PPC_E500MC) 255 depends on E200 || (E500 && !PPC_E500MC)
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index fdb9f0b0d7a8..73e2116cfeed 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_PPC_CELL) += cell/
22obj-$(CONFIG_PPC_PS3) += ps3/ 22obj-$(CONFIG_PPC_PS3) += ps3/
23obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ 23obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
24obj-$(CONFIG_AMIGAONE) += amigaone/ 24obj-$(CONFIG_AMIGAONE) += amigaone/
25obj-$(CONFIG_PPC_WSP) += wsp/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 81239ebed83f..67d5009b4e86 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -6,7 +6,8 @@ config PPC_CELL_COMMON
6 bool 6 bool
7 select PPC_CELL 7 select PPC_CELL
8 select PPC_DCR_MMIO 8 select PPC_DCR_MMIO
9 select PPC_INDIRECT_IO 9 select PPC_INDIRECT_PIO
10 select PPC_INDIRECT_MMIO
10 select PPC_NATIVE 11 select PPC_NATIVE
11 select PPC_RTAS 12 select PPC_RTAS
12 select IRQ_EDGE_EOI_HANDLER 13 select IRQ_EDGE_EOI_HANDLER
@@ -15,6 +16,7 @@ config PPC_CELL_NATIVE
15 bool 16 bool
16 select PPC_CELL_COMMON 17 select PPC_CELL_COMMON
17 select MPIC 18 select MPIC
19 select PPC_IO_WORKAROUNDS
18 select IBM_NEW_EMAC_EMAC4 20 select IBM_NEW_EMAC_EMAC4
19 select IBM_NEW_EMAC_RGMII 21 select IBM_NEW_EMAC_RGMII
20 select IBM_NEW_EMAC_ZMII #test only 22 select IBM_NEW_EMAC_ZMII #test only
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 83fafe922641..a4a89350bcfc 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,7 +1,7 @@
1obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o 1obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o
2 2
3obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ 3obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
4 pmu.o io-workarounds.o spider-pci.o 4 pmu.o spider-pci.o
5obj-$(CONFIG_CBE_RAS) += ras.o 5obj-$(CONFIG_CBE_RAS) += ras.o
6 6
7obj-$(CONFIG_CBE_THERM) += cbe_thermal.o 7obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
@@ -39,11 +39,10 @@ obj-y += celleb_setup.o \
39 celleb_pci.o celleb_scc_epci.o \ 39 celleb_pci.o celleb_scc_epci.o \
40 celleb_scc_pciex.o \ 40 celleb_scc_pciex.o \
41 celleb_scc_uhc.o \ 41 celleb_scc_uhc.o \
42 io-workarounds.o spider-pci.o \ 42 spider-pci.o beat.o beat_htab.o \
43 beat.o beat_htab.o beat_hvCall.o \ 43 beat_hvCall.o beat_interrupt.o \
44 beat_interrupt.o beat_iommu.o 44 beat_iommu.o
45 45
46obj-$(CONFIG_SMP) += beat_smp.o
47obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o 46obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o
48obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o 47obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o
49obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o 48obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index bb5ebf8fa80b..ac06903e136a 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -113,7 +113,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
113 pr_devel("axon_msi: woff %x roff %x msi %x\n", 113 pr_devel("axon_msi: woff %x roff %x msi %x\n",
114 write_offset, msic->read_offset, msi); 114 write_offset, msic->read_offset, msi);
115 115
116 if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) { 116 if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) {
117 generic_handle_irq(msi); 117 generic_handle_irq(msi);
118 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); 118 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
119 } else { 119 } else {
@@ -320,6 +320,7 @@ static struct irq_chip msic_irq_chip = {
320static int msic_host_map(struct irq_host *h, unsigned int virq, 320static int msic_host_map(struct irq_host *h, unsigned int virq,
321 irq_hw_number_t hw) 321 irq_hw_number_t hw)
322{ 322{
323 irq_set_chip_data(virq, h->host_data);
323 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); 324 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
324 325
325 return 0; 326 return 0;
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 4cb9e147c307..55015e1f6939 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -148,16 +148,6 @@ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
148} 148}
149 149
150/* 150/*
151 * Update binding hardware IRQ number (hw) and Virtuql
152 * IRQ number (virq). This is called only once for a given mapping.
153 */
154static void beatic_pic_host_remap(struct irq_host *h, unsigned int virq,
155 irq_hw_number_t hw)
156{
157 beat_construct_and_connect_irq_plug(virq, hw);
158}
159
160/*
161 * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), 151 * Translate device-tree interrupt spec to irq_hw_number_t style (ulong),
162 * to pass away to irq_create_mapping(). 152 * to pass away to irq_create_mapping().
163 * 153 *
@@ -184,7 +174,6 @@ static int beatic_pic_host_match(struct irq_host *h, struct device_node *np)
184 174
185static struct irq_host_ops beatic_pic_host_ops = { 175static struct irq_host_ops beatic_pic_host_ops = {
186 .map = beatic_pic_host_map, 176 .map = beatic_pic_host_map,
187 .remap = beatic_pic_host_remap,
188 .unmap = beatic_pic_host_unmap, 177 .unmap = beatic_pic_host_unmap,
189 .xlate = beatic_pic_host_xlate, 178 .xlate = beatic_pic_host_xlate,
190 .match = beatic_pic_host_match, 179 .match = beatic_pic_host_match,
@@ -257,22 +246,6 @@ void __init beatic_init_IRQ(void)
257 irq_set_default_host(beatic_host); 246 irq_set_default_host(beatic_host);
258} 247}
259 248
260#ifdef CONFIG_SMP
261
262/* Nullified to compile with SMP mode */
263void beatic_setup_cpu(int cpu)
264{
265}
266
267void beatic_cause_IPI(int cpu, int mesg)
268{
269}
270
271void beatic_request_IPIs(void)
272{
273}
274#endif /* CONFIG_SMP */
275
276void beatic_deinit_IRQ(void) 249void beatic_deinit_IRQ(void)
277{ 250{
278 int i; 251 int i;
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h
index b470fd0051f1..a7e52f91a078 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.h
+++ b/arch/powerpc/platforms/cell/beat_interrupt.h
@@ -24,9 +24,6 @@
24 24
25extern void beatic_init_IRQ(void); 25extern void beatic_init_IRQ(void);
26extern unsigned int beatic_get_irq(void); 26extern unsigned int beatic_get_irq(void);
27extern void beatic_cause_IPI(int cpu, int mesg);
28extern void beatic_request_IPIs(void);
29extern void beatic_setup_cpu(int);
30extern void beatic_deinit_IRQ(void); 27extern void beatic_deinit_IRQ(void);
31 28
32#endif 29#endif
diff --git a/arch/powerpc/platforms/cell/beat_smp.c b/arch/powerpc/platforms/cell/beat_smp.c
deleted file mode 100644
index 26efc204c47f..000000000000
--- a/arch/powerpc/platforms/cell/beat_smp.c
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * SMP support for Celleb platform. (Incomplete)
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on arch/powerpc/platforms/cell/smp.c:
7 * Dave Engebretsen, Peter Bergner, and
8 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
9 * Plus various changes from other IBM teams...
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 */
25
26#undef DEBUG
27
28#include <linux/kernel.h>
29#include <linux/smp.h>
30#include <linux/interrupt.h>
31#include <linux/init.h>
32#include <linux/threads.h>
33#include <linux/cpu.h>
34
35#include <asm/irq.h>
36#include <asm/smp.h>
37#include <asm/machdep.h>
38#include <asm/udbg.h>
39
40#include "beat_interrupt.h"
41
42#ifdef DEBUG
43#define DBG(fmt...) udbg_printf(fmt)
44#else
45#define DBG(fmt...)
46#endif
47
48/*
49 * The primary thread of each non-boot processor is recorded here before
50 * smp init.
51 */
52/* static cpumask_t of_spin_map; */
53
54/**
55 * smp_startup_cpu() - start the given cpu
56 *
57 * At boot time, there is nothing to do for primary threads which were
58 * started from Open Firmware. For anything else, call RTAS with the
59 * appropriate start location.
60 *
61 * Returns:
62 * 0 - failure
63 * 1 - success
64 */
65static inline int __devinit smp_startup_cpu(unsigned int lcpu)
66{
67 return 0;
68}
69
70static void smp_beatic_message_pass(int target, int msg)
71{
72 unsigned int i;
73
74 if (target < NR_CPUS) {
75 beatic_cause_IPI(target, msg);
76 } else {
77 for_each_online_cpu(i) {
78 if (target == MSG_ALL_BUT_SELF
79 && i == smp_processor_id())
80 continue;
81 beatic_cause_IPI(i, msg);
82 }
83 }
84}
85
86static int __init smp_beatic_probe(void)
87{
88 return cpus_weight(cpu_possible_map);
89}
90
91static void __devinit smp_beatic_setup_cpu(int cpu)
92{
93 beatic_setup_cpu(cpu);
94}
95
96static void __devinit smp_celleb_kick_cpu(int nr)
97{
98 BUG_ON(nr < 0 || nr >= NR_CPUS);
99
100 if (!smp_startup_cpu(nr))
101 return;
102}
103
104static int smp_celleb_cpu_bootable(unsigned int nr)
105{
106 return 1;
107}
108static struct smp_ops_t bpa_beatic_smp_ops = {
109 .message_pass = smp_beatic_message_pass,
110 .probe = smp_beatic_probe,
111 .kick_cpu = smp_celleb_kick_cpu,
112 .setup_cpu = smp_beatic_setup_cpu,
113 .cpu_bootable = smp_celleb_cpu_bootable,
114};
115
116/* This is called very early */
117void __init smp_init_celleb(void)
118{
119 DBG(" -> smp_init_celleb()\n");
120
121 smp_ops = &bpa_beatic_smp_ops;
122
123 DBG(" <- smp_init_celleb()\n");
124}
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index dbc338f187a2..f3917e7a5b44 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -45,8 +45,8 @@ static struct cbe_thread_map
45 unsigned int cbe_id; 45 unsigned int cbe_id;
46} cbe_thread_map[NR_CPUS]; 46} cbe_thread_map[NR_CPUS];
47 47
48static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE }; 48static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
49static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE; 49static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
50 50
51static struct cbe_regs_map *cbe_find_map(struct device_node *np) 51static struct cbe_regs_map *cbe_find_map(struct device_node *np)
52{ 52{
@@ -159,7 +159,8 @@ EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
159 159
160u32 cbe_node_to_cpu(int node) 160u32 cbe_node_to_cpu(int node)
161{ 161{
162 return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t)); 162 return cpumask_first(&cbe_local_mask[node]);
163
163} 164}
164EXPORT_SYMBOL_GPL(cbe_node_to_cpu); 165EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
165 166
@@ -268,9 +269,9 @@ void __init cbe_regs_init(void)
268 thread->regs = map; 269 thread->regs = map;
269 thread->cbe_id = cbe_id; 270 thread->cbe_id = cbe_id;
270 map->be_node = thread->be_node; 271 map->be_node = thread->be_node;
271 cpu_set(i, cbe_local_mask[cbe_id]); 272 cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
272 if(thread->thread_id == 0) 273 if(thread->thread_id == 0)
273 cpu_set(i, cbe_first_online_cpu); 274 cpumask_set_cpu(i, &cbe_first_online_cpu);
274 } 275 }
275 } 276 }
276 277
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
index 404d1fc04d59..5822141aa63f 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ b/arch/powerpc/platforms/cell/celleb_pci.c
@@ -41,7 +41,6 @@
41#include <asm/pci-bridge.h> 41#include <asm/pci-bridge.h>
42#include <asm/ppc-pci.h> 42#include <asm/ppc-pci.h>
43 43
44#include "io-workarounds.h"
45#include "celleb_pci.h" 44#include "celleb_pci.h"
46 45
47#define MAX_PCI_DEVICES 32 46#define MAX_PCI_DEVICES 32
@@ -320,7 +319,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node,
320 319
321 size = 256; 320 size = 256;
322 config = &private->fake_config[devno][fn]; 321 config = &private->fake_config[devno][fn];
323 *config = alloc_maybe_bootmem(size, GFP_KERNEL); 322 *config = zalloc_maybe_bootmem(size, GFP_KERNEL);
324 if (*config == NULL) { 323 if (*config == NULL) {
325 printk(KERN_ERR "PCI: " 324 printk(KERN_ERR "PCI: "
326 "not enough memory for fake configuration space\n"); 325 "not enough memory for fake configuration space\n");
@@ -331,7 +330,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node,
331 330
332 size = sizeof(struct celleb_pci_resource); 331 size = sizeof(struct celleb_pci_resource);
333 res = &private->res[devno][fn]; 332 res = &private->res[devno][fn];
334 *res = alloc_maybe_bootmem(size, GFP_KERNEL); 333 *res = zalloc_maybe_bootmem(size, GFP_KERNEL);
335 if (*res == NULL) { 334 if (*res == NULL) {
336 printk(KERN_ERR 335 printk(KERN_ERR
337 "PCI: not enough memory for resource data space\n"); 336 "PCI: not enough memory for resource data space\n");
@@ -432,7 +431,7 @@ static int __init phb_set_bus_ranges(struct device_node *dev,
432static void __init celleb_alloc_private_mem(struct pci_controller *hose) 431static void __init celleb_alloc_private_mem(struct pci_controller *hose)
433{ 432{
434 hose->private_data = 433 hose->private_data =
435 alloc_maybe_bootmem(sizeof(struct celleb_pci_private), 434 zalloc_maybe_bootmem(sizeof(struct celleb_pci_private),
436 GFP_KERNEL); 435 GFP_KERNEL);
437} 436}
438 437
@@ -469,18 +468,6 @@ static struct of_device_id celleb_phb_match[] __initdata = {
469 }, 468 },
470}; 469};
471 470
472static int __init celleb_io_workaround_init(struct pci_controller *phb,
473 struct celleb_phb_spec *phb_spec)
474{
475 if (phb_spec->ops) {
476 iowa_register_bus(phb, phb_spec->ops, phb_spec->iowa_init,
477 phb_spec->iowa_data);
478 io_workaround_init();
479 }
480
481 return 0;
482}
483
484int __init celleb_setup_phb(struct pci_controller *phb) 471int __init celleb_setup_phb(struct pci_controller *phb)
485{ 472{
486 struct device_node *dev = phb->dn; 473 struct device_node *dev = phb->dn;
@@ -500,7 +487,11 @@ int __init celleb_setup_phb(struct pci_controller *phb)
500 if (rc) 487 if (rc)
501 return 1; 488 return 1;
502 489
503 return celleb_io_workaround_init(phb, phb_spec); 490 if (phb_spec->ops)
491 iowa_register_bus(phb, phb_spec->ops,
492 phb_spec->iowa_init,
493 phb_spec->iowa_data);
494 return 0;
504} 495}
505 496
506int celleb_pci_probe_mode(struct pci_bus *bus) 497int celleb_pci_probe_mode(struct pci_bus *bus)
diff --git a/arch/powerpc/platforms/cell/celleb_pci.h b/arch/powerpc/platforms/cell/celleb_pci.h
index 4cba1523ec50..a801fcc5f389 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.h
+++ b/arch/powerpc/platforms/cell/celleb_pci.h
@@ -26,8 +26,9 @@
26#include <asm/pci-bridge.h> 26#include <asm/pci-bridge.h>
27#include <asm/prom.h> 27#include <asm/prom.h>
28#include <asm/ppc-pci.h> 28#include <asm/ppc-pci.h>
29#include <asm/io-workarounds.h>
29 30
30#include "io-workarounds.h" 31struct iowa_bus;
31 32
32struct celleb_phb_spec { 33struct celleb_phb_spec {
33 int (*setup)(struct device_node *, struct pci_controller *); 34 int (*setup)(struct device_node *, struct pci_controller *);
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index e53845579770..d58d9bae4b9b 100644
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -128,10 +128,6 @@ static void __init celleb_setup_arch_beat(void)
128 spu_management_ops = &spu_management_of_ops; 128 spu_management_ops = &spu_management_of_ops;
129#endif 129#endif
130 130
131#ifdef CONFIG_SMP
132 smp_init_celleb();
133#endif
134
135 celleb_setup_arch_common(); 131 celleb_setup_arch_common();
136} 132}
137 133
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 44cfd1bef89b..449c08c15862 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -196,8 +196,20 @@ static irqreturn_t iic_ipi_action(int irq, void *dev_id)
196{ 196{
197 int ipi = (int)(long)dev_id; 197 int ipi = (int)(long)dev_id;
198 198
199 smp_message_recv(ipi); 199 switch(ipi) {
200 200 case PPC_MSG_CALL_FUNCTION:
201 generic_smp_call_function_interrupt();
202 break;
203 case PPC_MSG_RESCHEDULE:
204 scheduler_ipi();
205 break;
206 case PPC_MSG_CALL_FUNC_SINGLE:
207 generic_smp_call_function_single_interrupt();
208 break;
209 case PPC_MSG_DEBUGGER_BREAK:
210 debug_ipi_action(0, NULL);
211 break;
212 }
201 return IRQ_HANDLED; 213 return IRQ_HANDLED;
202} 214}
203static void iic_request_ipi(int ipi, const char *name) 215static void iic_request_ipi(int ipi, const char *name)
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
index d31c594cfdf3..51e290126bc1 100644
--- a/arch/powerpc/platforms/cell/qpace_setup.c
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -42,7 +42,6 @@
42#include "interrupt.h" 42#include "interrupt.h"
43#include "pervasive.h" 43#include "pervasive.h"
44#include "ras.h" 44#include "ras.h"
45#include "io-workarounds.h"
46 45
47static void qpace_show_cpuinfo(struct seq_file *m) 46static void qpace_show_cpuinfo(struct seq_file *m)
48{ 47{
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index fd57bfe00edf..c73cf4c43fc2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -51,11 +51,11 @@
51#include <asm/udbg.h> 51#include <asm/udbg.h>
52#include <asm/mpic.h> 52#include <asm/mpic.h>
53#include <asm/cell-regs.h> 53#include <asm/cell-regs.h>
54#include <asm/io-workarounds.h>
54 55
55#include "interrupt.h" 56#include "interrupt.h"
56#include "pervasive.h" 57#include "pervasive.h"
57#include "ras.h" 58#include "ras.h"
58#include "io-workarounds.h"
59 59
60#ifdef DEBUG 60#ifdef DEBUG
61#define DBG(fmt...) udbg_printf(fmt) 61#define DBG(fmt...) udbg_printf(fmt)
@@ -136,8 +136,6 @@ static int __devinit cell_setup_phb(struct pci_controller *phb)
136 136
137 iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, 137 iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init,
138 (void *)SPIDER_PCI_REG_BASE); 138 (void *)SPIDER_PCI_REG_BASE);
139 io_workaround_init();
140
141 return 0; 139 return 0;
142} 140}
143 141
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index f774530075b7..d176e6148e3f 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -77,7 +77,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
77 unsigned int pcpu; 77 unsigned int pcpu;
78 int start_cpu; 78 int start_cpu;
79 79
80 if (cpu_isset(lcpu, of_spin_map)) 80 if (cpumask_test_cpu(lcpu, &of_spin_map))
81 /* Already started by OF and sitting in spin loop */ 81 /* Already started by OF and sitting in spin loop */
82 return 1; 82 return 1;
83 83
@@ -103,27 +103,11 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
103 return 1; 103 return 1;
104} 104}
105 105
106static void smp_iic_message_pass(int target, int msg)
107{
108 unsigned int i;
109
110 if (target < NR_CPUS) {
111 iic_cause_IPI(target, msg);
112 } else {
113 for_each_online_cpu(i) {
114 if (target == MSG_ALL_BUT_SELF
115 && i == smp_processor_id())
116 continue;
117 iic_cause_IPI(i, msg);
118 }
119 }
120}
121
122static int __init smp_iic_probe(void) 106static int __init smp_iic_probe(void)
123{ 107{
124 iic_request_IPIs(); 108 iic_request_IPIs();
125 109
126 return cpus_weight(cpu_possible_map); 110 return cpumask_weight(cpu_possible_mask);
127} 111}
128 112
129static void __devinit smp_cell_setup_cpu(int cpu) 113static void __devinit smp_cell_setup_cpu(int cpu)
@@ -137,12 +121,12 @@ static void __devinit smp_cell_setup_cpu(int cpu)
137 mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); 121 mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
138} 122}
139 123
140static void __devinit smp_cell_kick_cpu(int nr) 124static int __devinit smp_cell_kick_cpu(int nr)
141{ 125{
142 BUG_ON(nr < 0 || nr >= NR_CPUS); 126 BUG_ON(nr < 0 || nr >= NR_CPUS);
143 127
144 if (!smp_startup_cpu(nr)) 128 if (!smp_startup_cpu(nr))
145 return; 129 return -ENOENT;
146 130
147 /* 131 /*
148 * The processor is currently spinning, waiting for the 132 * The processor is currently spinning, waiting for the
@@ -150,6 +134,8 @@ static void __devinit smp_cell_kick_cpu(int nr)
150 * the processor will continue on to secondary_start 134 * the processor will continue on to secondary_start
151 */ 135 */
152 paca[nr].cpu_start = 1; 136 paca[nr].cpu_start = 1;
137
138 return 0;
153} 139}
154 140
155static int smp_cell_cpu_bootable(unsigned int nr) 141static int smp_cell_cpu_bootable(unsigned int nr)
@@ -166,7 +152,7 @@ static int smp_cell_cpu_bootable(unsigned int nr)
166 return 1; 152 return 1;
167} 153}
168static struct smp_ops_t bpa_iic_smp_ops = { 154static struct smp_ops_t bpa_iic_smp_ops = {
169 .message_pass = smp_iic_message_pass, 155 .message_pass = iic_cause_IPI,
170 .probe = smp_iic_probe, 156 .probe = smp_iic_probe,
171 .kick_cpu = smp_cell_kick_cpu, 157 .kick_cpu = smp_cell_kick_cpu,
172 .setup_cpu = smp_cell_setup_cpu, 158 .setup_cpu = smp_cell_setup_cpu,
@@ -186,13 +172,12 @@ void __init smp_init_cell(void)
186 if (cpu_has_feature(CPU_FTR_SMT)) { 172 if (cpu_has_feature(CPU_FTR_SMT)) {
187 for_each_present_cpu(i) { 173 for_each_present_cpu(i) {
188 if (cpu_thread_in_core(i) == 0) 174 if (cpu_thread_in_core(i) == 0)
189 cpu_set(i, of_spin_map); 175 cpumask_set_cpu(i, &of_spin_map);
190 } 176 }
191 } else { 177 } else
192 of_spin_map = cpu_present_map; 178 cpumask_copy(&of_spin_map, cpu_present_mask);
193 }
194 179
195 cpu_clear(boot_cpuid, of_spin_map); 180 cpumask_clear_cpu(boot_cpuid, &of_spin_map);
196 181
197 /* Non-lpar has additional take/give timebase */ 182 /* Non-lpar has additional take/give timebase */
198 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { 183 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
index ca7731c0b595..f1f7878893f3 100644
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -27,8 +27,7 @@
27 27
28#include <asm/ppc-pci.h> 28#include <asm/ppc-pci.h>
29#include <asm/pci-bridge.h> 29#include <asm/pci-bridge.h>
30 30#include <asm/io-workarounds.h>
31#include "io-workarounds.h"
32 31
33#define SPIDER_PCI_DISABLE_PREFETCH 32#define SPIDER_PCI_DISABLE_PREFETCH
34 33
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index c5cf50e6b45a..442c28c00f88 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -68,9 +68,9 @@ struct spider_pic {
68}; 68};
69static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; 69static struct spider_pic spider_pics[SPIDER_CHIP_COUNT];
70 70
71static struct spider_pic *spider_virq_to_pic(unsigned int virq) 71static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d)
72{ 72{
73 return irq_map[virq].host->host_data; 73 return irq_data_get_irq_chip_data(d);
74} 74}
75 75
76static void __iomem *spider_get_irq_config(struct spider_pic *pic, 76static void __iomem *spider_get_irq_config(struct spider_pic *pic,
@@ -81,24 +81,24 @@ static void __iomem *spider_get_irq_config(struct spider_pic *pic,
81 81
82static void spider_unmask_irq(struct irq_data *d) 82static void spider_unmask_irq(struct irq_data *d)
83{ 83{
84 struct spider_pic *pic = spider_virq_to_pic(d->irq); 84 struct spider_pic *pic = spider_irq_data_to_pic(d);
85 void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq); 85 void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
86 86
87 out_be32(cfg, in_be32(cfg) | 0x30000000u); 87 out_be32(cfg, in_be32(cfg) | 0x30000000u);
88} 88}
89 89
90static void spider_mask_irq(struct irq_data *d) 90static void spider_mask_irq(struct irq_data *d)
91{ 91{
92 struct spider_pic *pic = spider_virq_to_pic(d->irq); 92 struct spider_pic *pic = spider_irq_data_to_pic(d);
93 void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq); 93 void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
94 94
95 out_be32(cfg, in_be32(cfg) & ~0x30000000u); 95 out_be32(cfg, in_be32(cfg) & ~0x30000000u);
96} 96}
97 97
98static void spider_ack_irq(struct irq_data *d) 98static void spider_ack_irq(struct irq_data *d)
99{ 99{
100 struct spider_pic *pic = spider_virq_to_pic(d->irq); 100 struct spider_pic *pic = spider_irq_data_to_pic(d);
101 unsigned int src = irq_map[d->irq].hwirq; 101 unsigned int src = irqd_to_hwirq(d);
102 102
103 /* Reset edge detection logic if necessary 103 /* Reset edge detection logic if necessary
104 */ 104 */
@@ -116,8 +116,8 @@ static void spider_ack_irq(struct irq_data *d)
116static int spider_set_irq_type(struct irq_data *d, unsigned int type) 116static int spider_set_irq_type(struct irq_data *d, unsigned int type)
117{ 117{
118 unsigned int sense = type & IRQ_TYPE_SENSE_MASK; 118 unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
119 struct spider_pic *pic = spider_virq_to_pic(d->irq); 119 struct spider_pic *pic = spider_irq_data_to_pic(d);
120 unsigned int hw = irq_map[d->irq].hwirq; 120 unsigned int hw = irqd_to_hwirq(d);
121 void __iomem *cfg = spider_get_irq_config(pic, hw); 121 void __iomem *cfg = spider_get_irq_config(pic, hw);
122 u32 old_mask; 122 u32 old_mask;
123 u32 ic; 123 u32 ic;
@@ -171,6 +171,7 @@ static struct irq_chip spider_pic = {
171static int spider_host_map(struct irq_host *h, unsigned int virq, 171static int spider_host_map(struct irq_host *h, unsigned int virq,
172 irq_hw_number_t hw) 172 irq_hw_number_t hw)
173{ 173{
174 irq_set_chip_data(virq, h->host_data);
174 irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); 175 irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq);
175 176
176 /* Set default irq type */ 177 /* Set default irq type */
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 65203857b0ce..32cb4e66d2cd 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
141 * runqueue. The context will be rescheduled on the proper node 141 * runqueue. The context will be rescheduled on the proper node
142 * if it is timesliced or preempted. 142 * if it is timesliced or preempted.
143 */ 143 */
144 ctx->cpus_allowed = current->cpus_allowed; 144 cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
145 145
146 /* Save the current cpu id for spu interrupt routing. */ 146 /* Save the current cpu id for spu interrupt routing. */
147 ctx->last_ran = raw_smp_processor_id(); 147 ctx->last_ran = raw_smp_processor_id();
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index 02cafecc90e3..a800122e4dda 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -30,10 +30,12 @@
30#include <asm/mpic.h> 30#include <asm/mpic.h>
31#include <asm/rtas.h> 31#include <asm/rtas.h>
32 32
33static void __devinit smp_chrp_kick_cpu(int nr) 33static int __devinit smp_chrp_kick_cpu(int nr)
34{ 34{
35 *(unsigned long *)KERNELBASE = nr; 35 *(unsigned long *)KERNELBASE = nr;
36 asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory"); 36 asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory");
37
38 return 0;
37} 39}
38 40
39static void __devinit smp_chrp_setup_cpu(int cpu_nr) 41static void __devinit smp_chrp_setup_cpu(int cpu_nr)
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 12aa62b6f227..f61a2dd96b99 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -48,7 +48,7 @@
48 48
49static void flipper_pic_mask_and_ack(struct irq_data *d) 49static void flipper_pic_mask_and_ack(struct irq_data *d)
50{ 50{
51 int irq = virq_to_hw(d->irq); 51 int irq = irqd_to_hwirq(d);
52 void __iomem *io_base = irq_data_get_irq_chip_data(d); 52 void __iomem *io_base = irq_data_get_irq_chip_data(d);
53 u32 mask = 1 << irq; 53 u32 mask = 1 << irq;
54 54
@@ -59,7 +59,7 @@ static void flipper_pic_mask_and_ack(struct irq_data *d)
59 59
60static void flipper_pic_ack(struct irq_data *d) 60static void flipper_pic_ack(struct irq_data *d)
61{ 61{
62 int irq = virq_to_hw(d->irq); 62 int irq = irqd_to_hwirq(d);
63 void __iomem *io_base = irq_data_get_irq_chip_data(d); 63 void __iomem *io_base = irq_data_get_irq_chip_data(d);
64 64
65 /* this is at least needed for RSW */ 65 /* this is at least needed for RSW */
@@ -68,7 +68,7 @@ static void flipper_pic_ack(struct irq_data *d)
68 68
69static void flipper_pic_mask(struct irq_data *d) 69static void flipper_pic_mask(struct irq_data *d)
70{ 70{
71 int irq = virq_to_hw(d->irq); 71 int irq = irqd_to_hwirq(d);
72 void __iomem *io_base = irq_data_get_irq_chip_data(d); 72 void __iomem *io_base = irq_data_get_irq_chip_data(d);
73 73
74 clrbits32(io_base + FLIPPER_IMR, 1 << irq); 74 clrbits32(io_base + FLIPPER_IMR, 1 << irq);
@@ -76,7 +76,7 @@ static void flipper_pic_mask(struct irq_data *d)
76 76
77static void flipper_pic_unmask(struct irq_data *d) 77static void flipper_pic_unmask(struct irq_data *d)
78{ 78{
79 int irq = virq_to_hw(d->irq); 79 int irq = irqd_to_hwirq(d);
80 void __iomem *io_base = irq_data_get_irq_chip_data(d); 80 void __iomem *io_base = irq_data_get_irq_chip_data(d);
81 81
82 setbits32(io_base + FLIPPER_IMR, 1 << irq); 82 setbits32(io_base + FLIPPER_IMR, 1 << irq);
@@ -107,12 +107,6 @@ static int flipper_pic_map(struct irq_host *h, unsigned int virq,
107 return 0; 107 return 0;
108} 108}
109 109
110static void flipper_pic_unmap(struct irq_host *h, unsigned int irq)
111{
112 irq_set_chip_data(irq, NULL);
113 irq_set_chip(irq, NULL);
114}
115
116static int flipper_pic_match(struct irq_host *h, struct device_node *np) 110static int flipper_pic_match(struct irq_host *h, struct device_node *np)
117{ 111{
118 return 1; 112 return 1;
@@ -121,7 +115,6 @@ static int flipper_pic_match(struct irq_host *h, struct device_node *np)
121 115
122static struct irq_host_ops flipper_irq_host_ops = { 116static struct irq_host_ops flipper_irq_host_ops = {
123 .map = flipper_pic_map, 117 .map = flipper_pic_map,
124 .unmap = flipper_pic_unmap,
125 .match = flipper_pic_match, 118 .match = flipper_pic_match,
126}; 119};
127 120
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 2bdddfc9d520..e4919170c6bc 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -43,7 +43,7 @@
43 43
44static void hlwd_pic_mask_and_ack(struct irq_data *d) 44static void hlwd_pic_mask_and_ack(struct irq_data *d)
45{ 45{
46 int irq = virq_to_hw(d->irq); 46 int irq = irqd_to_hwirq(d);
47 void __iomem *io_base = irq_data_get_irq_chip_data(d); 47 void __iomem *io_base = irq_data_get_irq_chip_data(d);
48 u32 mask = 1 << irq; 48 u32 mask = 1 << irq;
49 49
@@ -53,7 +53,7 @@ static void hlwd_pic_mask_and_ack(struct irq_data *d)
53 53
54static void hlwd_pic_ack(struct irq_data *d) 54static void hlwd_pic_ack(struct irq_data *d)
55{ 55{
56 int irq = virq_to_hw(d->irq); 56 int irq = irqd_to_hwirq(d);
57 void __iomem *io_base = irq_data_get_irq_chip_data(d); 57 void __iomem *io_base = irq_data_get_irq_chip_data(d);
58 58
59 out_be32(io_base + HW_BROADWAY_ICR, 1 << irq); 59 out_be32(io_base + HW_BROADWAY_ICR, 1 << irq);
@@ -61,7 +61,7 @@ static void hlwd_pic_ack(struct irq_data *d)
61 61
62static void hlwd_pic_mask(struct irq_data *d) 62static void hlwd_pic_mask(struct irq_data *d)
63{ 63{
64 int irq = virq_to_hw(d->irq); 64 int irq = irqd_to_hwirq(d);
65 void __iomem *io_base = irq_data_get_irq_chip_data(d); 65 void __iomem *io_base = irq_data_get_irq_chip_data(d);
66 66
67 clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq); 67 clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
@@ -69,7 +69,7 @@ static void hlwd_pic_mask(struct irq_data *d)
69 69
70static void hlwd_pic_unmask(struct irq_data *d) 70static void hlwd_pic_unmask(struct irq_data *d)
71{ 71{
72 int irq = virq_to_hw(d->irq); 72 int irq = irqd_to_hwirq(d);
73 void __iomem *io_base = irq_data_get_irq_chip_data(d); 73 void __iomem *io_base = irq_data_get_irq_chip_data(d);
74 74
75 setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); 75 setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
@@ -100,15 +100,8 @@ static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
100 return 0; 100 return 0;
101} 101}
102 102
103static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq)
104{
105 irq_set_chip_data(irq, NULL);
106 irq_set_chip(irq, NULL);
107}
108
109static struct irq_host_ops hlwd_irq_host_ops = { 103static struct irq_host_ops hlwd_irq_host_ops = {
110 .map = hlwd_pic_map, 104 .map = hlwd_pic_map,
111 .unmap = hlwd_pic_unmap,
112}; 105};
113 106
114static unsigned int __hlwd_pic_get_irq(struct irq_host *h) 107static unsigned int __hlwd_pic_get_irq(struct irq_host *h)
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index e5bc9f75d474..b57cda3a0817 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -1,7 +1,9 @@
1config PPC_ISERIES 1config PPC_ISERIES
2 bool "IBM Legacy iSeries" 2 bool "IBM Legacy iSeries"
3 depends on PPC64 && PPC_BOOK3S 3 depends on PPC64 && PPC_BOOK3S
4 select PPC_INDIRECT_IO 4 select PPC_SMP_MUXED_IPI
5 select PPC_INDIRECT_PIO
6 select PPC_INDIRECT_MMIO
5 select PPC_PCI_CHOICE if EXPERT 7 select PPC_PCI_CHOICE if EXPERT
6 8
7menu "iSeries device drivers" 9menu "iSeries device drivers"
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index 32a56c6dfa72..29c02f36b32f 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -31,6 +31,7 @@
31#include <asm/thread_info.h> 31#include <asm/thread_info.h>
32#include <asm/ptrace.h> 32#include <asm/ptrace.h>
33#include <asm/cputable.h> 33#include <asm/cputable.h>
34#include <asm/mmu.h>
34 35
35#include "exception.h" 36#include "exception.h"
36 37
@@ -60,29 +61,31 @@ system_reset_iSeries:
60/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */ 61/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
61/* In the UP case we'll yield() later, and we will not access the paca anyway */ 62/* In the UP case we'll yield() later, and we will not access the paca anyway */
62#ifdef CONFIG_SMP 63#ifdef CONFIG_SMP
631: 64iSeries_secondary_wait_paca:
64 HMT_LOW 65 HMT_LOW
65 LOAD_REG_ADDR(r23, __secondary_hold_spinloop) 66 LOAD_REG_ADDR(r23, __secondary_hold_spinloop)
66 ld r23,0(r23) 67 ld r23,0(r23)
67 sync
68 LOAD_REG_ADDR(r3,current_set)
69 sldi r28,r24,3 /* get current_set[cpu#] */
70 ldx r3,r3,r28
71 addi r1,r3,THREAD_SIZE
72 subi r1,r1,STACK_FRAME_OVERHEAD
73 68
74 cmpwi 0,r23,0 /* Keep poking the Hypervisor until */ 69 cmpdi 0,r23,0
75 bne 2f /* we're released */ 70 bne 2f /* go on when the master is ready */
76 /* Let the Hypervisor know we are alive */ 71
72 /* Keep poking the Hypervisor until we're released */
77 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 73 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
78 lis r3,0x8002 74 lis r3,0x8002
79 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ 75 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
80 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 76 li r0,-1 /* r0=-1 indicates a Hypervisor call */
81 sc /* Invoke the hypervisor via a system call */ 77 sc /* Invoke the hypervisor via a system call */
82 b 1b 78 b iSeries_secondary_wait_paca
83#endif
84 79
852: 802:
81 HMT_MEDIUM
82 sync
83
84 LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */
85 lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */
86 cmpld 0,r24,r3 /* is our cpu number allocated? */
87 bge iSeries_secondary_yield /* no, yield forever */
88
86 /* Load our paca now that it's been allocated */ 89 /* Load our paca now that it's been allocated */
87 LOAD_REG_ADDR(r13, paca) 90 LOAD_REG_ADDR(r13, paca)
88 ld r13,0(r13) 91 ld r13,0(r13)
@@ -93,10 +96,24 @@ system_reset_iSeries:
93 ori r23,r23,MSR_RI 96 ori r23,r23,MSR_RI
94 mtmsrd r23 /* RI on */ 97 mtmsrd r23 /* RI on */
95 98
96 HMT_LOW 99iSeries_secondary_smp_loop:
97#ifdef CONFIG_SMP
98 lbz r23,PACAPROCSTART(r13) /* Test if this processor 100 lbz r23,PACAPROCSTART(r13) /* Test if this processor
99 * should start */ 101 * should start */
102 cmpwi 0,r23,0
103 bne 3f /* go on when we are told */
104
105 HMT_LOW
106 /* Let the Hypervisor know we are alive */
107 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
108 lis r3,0x8002
109 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
110 li r0,-1 /* r0=-1 indicates a Hypervisor call */
111 sc /* Invoke the hypervisor via a system call */
112 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
113 b iSeries_secondary_smp_loop /* wait for signal to start */
114
1153:
116 HMT_MEDIUM
100 sync 117 sync
101 LOAD_REG_ADDR(r3,current_set) 118 LOAD_REG_ADDR(r3,current_set)
102 sldi r28,r24,3 /* get current_set[cpu#] */ 119 sldi r28,r24,3 /* get current_set[cpu#] */
@@ -104,27 +121,22 @@ system_reset_iSeries:
104 addi r1,r3,THREAD_SIZE 121 addi r1,r3,THREAD_SIZE
105 subi r1,r1,STACK_FRAME_OVERHEAD 122 subi r1,r1,STACK_FRAME_OVERHEAD
106 123
107 cmpwi 0,r23,0
108 beq iSeries_secondary_smp_loop /* Loop until told to go */
109 b __secondary_start /* Loop until told to go */ 124 b __secondary_start /* Loop until told to go */
110iSeries_secondary_smp_loop: 125#endif /* CONFIG_SMP */
111 /* Let the Hypervisor know we are alive */ 126
112 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 127iSeries_secondary_yield:
113 lis r3,0x8002
114 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
115#else /* CONFIG_SMP */
116 /* Yield the processor. This is required for non-SMP kernels 128 /* Yield the processor. This is required for non-SMP kernels
117 which are running on multi-threaded machines. */ 129 which are running on multi-threaded machines. */
130 HMT_LOW
118 lis r3,0x8000 131 lis r3,0x8000
119 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ 132 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
120 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ 133 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
121 li r4,0 /* "yield timed" */ 134 li r4,0 /* "yield timed" */
122 li r5,-1 /* "yield forever" */ 135 li r5,-1 /* "yield forever" */
123#endif /* CONFIG_SMP */
124 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 136 li r0,-1 /* r0=-1 indicates a Hypervisor call */
125 sc /* Invoke the hypervisor via a system call */ 137 sc /* Invoke the hypervisor via a system call */
126 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */ 138 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
127 b 2b /* If SMP not configured, secondaries 139 b iSeries_secondary_yield /* If SMP not configured, secondaries
128 * loop forever */ 140 * loop forever */
129 141
130/*** ISeries-LPAR interrupt handlers ***/ 142/*** ISeries-LPAR interrupt handlers ***/
@@ -157,7 +169,7 @@ BEGIN_FTR_SECTION
157FTR_SECTION_ELSE 169FTR_SECTION_ELSE
158 EXCEPTION_PROLOG_1(PACA_EXGEN) 170 EXCEPTION_PROLOG_1(PACA_EXGEN)
159 EXCEPTION_PROLOG_ISERIES_1 171 EXCEPTION_PROLOG_ISERIES_1
160ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) 172ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
161 b data_access_common 173 b data_access_common
162 174
163.do_stab_bolted_iSeries: 175.do_stab_bolted_iSeries:
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 52a6889832c7..b2103453eb01 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -42,7 +42,6 @@
42#include "irq.h" 42#include "irq.h"
43#include "pci.h" 43#include "pci.h"
44#include "call_pci.h" 44#include "call_pci.h"
45#include "smp.h"
46 45
47#ifdef CONFIG_PCI 46#ifdef CONFIG_PCI
48 47
@@ -171,7 +170,7 @@ static void iseries_enable_IRQ(struct irq_data *d)
171{ 170{
172 u32 bus, dev_id, function, mask; 171 u32 bus, dev_id, function, mask;
173 const u32 sub_bus = 0; 172 const u32 sub_bus = 0;
174 unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; 173 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
175 174
176 /* The IRQ has already been locked by the caller */ 175 /* The IRQ has already been locked by the caller */
177 bus = REAL_IRQ_TO_BUS(rirq); 176 bus = REAL_IRQ_TO_BUS(rirq);
@@ -188,7 +187,7 @@ static unsigned int iseries_startup_IRQ(struct irq_data *d)
188{ 187{
189 u32 bus, dev_id, function, mask; 188 u32 bus, dev_id, function, mask;
190 const u32 sub_bus = 0; 189 const u32 sub_bus = 0;
191 unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; 190 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
192 191
193 bus = REAL_IRQ_TO_BUS(rirq); 192 bus = REAL_IRQ_TO_BUS(rirq);
194 function = REAL_IRQ_TO_FUNC(rirq); 193 function = REAL_IRQ_TO_FUNC(rirq);
@@ -234,7 +233,7 @@ static void iseries_shutdown_IRQ(struct irq_data *d)
234{ 233{
235 u32 bus, dev_id, function, mask; 234 u32 bus, dev_id, function, mask;
236 const u32 sub_bus = 0; 235 const u32 sub_bus = 0;
237 unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; 236 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
238 237
239 /* irq should be locked by the caller */ 238 /* irq should be locked by the caller */
240 bus = REAL_IRQ_TO_BUS(rirq); 239 bus = REAL_IRQ_TO_BUS(rirq);
@@ -257,7 +256,7 @@ static void iseries_disable_IRQ(struct irq_data *d)
257{ 256{
258 u32 bus, dev_id, function, mask; 257 u32 bus, dev_id, function, mask;
259 const u32 sub_bus = 0; 258 const u32 sub_bus = 0;
260 unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; 259 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
261 260
262 /* The IRQ has already been locked by the caller */ 261 /* The IRQ has already been locked by the caller */
263 bus = REAL_IRQ_TO_BUS(rirq); 262 bus = REAL_IRQ_TO_BUS(rirq);
@@ -271,7 +270,7 @@ static void iseries_disable_IRQ(struct irq_data *d)
271 270
272static void iseries_end_IRQ(struct irq_data *d) 271static void iseries_end_IRQ(struct irq_data *d)
273{ 272{
274 unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq; 273 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
275 274
276 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), 275 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
277 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); 276 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
@@ -316,7 +315,7 @@ unsigned int iSeries_get_irq(void)
316#ifdef CONFIG_SMP 315#ifdef CONFIG_SMP
317 if (get_lppaca()->int_dword.fields.ipi_cnt) { 316 if (get_lppaca()->int_dword.fields.ipi_cnt) {
318 get_lppaca()->int_dword.fields.ipi_cnt = 0; 317 get_lppaca()->int_dword.fields.ipi_cnt = 0;
319 iSeries_smp_message_recv(); 318 smp_ipi_demux();
320 } 319 }
321#endif /* CONFIG_SMP */ 320#endif /* CONFIG_SMP */
322 if (hvlpevent_is_pending()) 321 if (hvlpevent_is_pending())
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 2946ae10fbfd..c25a0815c26b 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -249,7 +249,7 @@ static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
249 unsigned long i; 249 unsigned long i;
250 unsigned long mem_blocks = 0; 250 unsigned long mem_blocks = 0;
251 251
252 if (cpu_has_feature(CPU_FTR_SLB)) 252 if (mmu_has_feature(MMU_FTR_SLB))
253 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array, 253 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
254 max_entries); 254 max_entries);
255 else 255 else
@@ -634,7 +634,7 @@ static int __init iseries_probe(void)
634 634
635 hpte_init_iSeries(); 635 hpte_init_iSeries();
636 /* iSeries does not support 16M pages */ 636 /* iSeries does not support 16M pages */
637 cur_cpu_spec->cpu_features &= ~CPU_FTR_16M_PAGE; 637 cur_cpu_spec->mmu_features &= ~MMU_FTR_16M_PAGE;
638 638
639 return 1; 639 return 1;
640} 640}
@@ -685,6 +685,11 @@ void * __init iSeries_early_setup(void)
685 powerpc_firmware_features |= FW_FEATURE_ISERIES; 685 powerpc_firmware_features |= FW_FEATURE_ISERIES;
686 powerpc_firmware_features |= FW_FEATURE_LPAR; 686 powerpc_firmware_features |= FW_FEATURE_LPAR;
687 687
688#ifdef CONFIG_SMP
689 /* On iSeries we know we can never have more than 64 cpus */
690 nr_cpu_ids = max(nr_cpu_ids, 64);
691#endif
692
688 iSeries_fixup_klimit(); 693 iSeries_fixup_klimit();
689 694
690 /* 695 /*
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 6c6029914dbc..e3265adde5d3 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -42,57 +42,23 @@
42#include <asm/cputable.h> 42#include <asm/cputable.h>
43#include <asm/system.h> 43#include <asm/system.h>
44 44
45#include "smp.h" 45static void smp_iSeries_cause_ipi(int cpu, unsigned long data)
46
47static unsigned long iSeries_smp_message[NR_CPUS];
48
49void iSeries_smp_message_recv(void)
50{
51 int cpu = smp_processor_id();
52 int msg;
53
54 if (num_online_cpus() < 2)
55 return;
56
57 for (msg = 0; msg < 4; msg++)
58 if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
59 smp_message_recv(msg);
60}
61
62static inline void smp_iSeries_do_message(int cpu, int msg)
63{ 46{
64 set_bit(msg, &iSeries_smp_message[cpu]);
65 HvCall_sendIPI(&(paca[cpu])); 47 HvCall_sendIPI(&(paca[cpu]));
66} 48}
67 49
68static void smp_iSeries_message_pass(int target, int msg)
69{
70 int i;
71
72 if (target < NR_CPUS)
73 smp_iSeries_do_message(target, msg);
74 else {
75 for_each_online_cpu(i) {
76 if ((target == MSG_ALL_BUT_SELF) &&
77 (i == smp_processor_id()))
78 continue;
79 smp_iSeries_do_message(i, msg);
80 }
81 }
82}
83
84static int smp_iSeries_probe(void) 50static int smp_iSeries_probe(void)
85{ 51{
86 return cpumask_weight(cpu_possible_mask); 52 return cpumask_weight(cpu_possible_mask);
87} 53}
88 54
89static void smp_iSeries_kick_cpu(int nr) 55static int smp_iSeries_kick_cpu(int nr)
90{ 56{
91 BUG_ON((nr < 0) || (nr >= NR_CPUS)); 57 BUG_ON((nr < 0) || (nr >= NR_CPUS));
92 58
93 /* Verify that our partition has a processor nr */ 59 /* Verify that our partition has a processor nr */
94 if (lppaca_of(nr).dyn_proc_status >= 2) 60 if (lppaca_of(nr).dyn_proc_status >= 2)
95 return; 61 return -ENOENT;
96 62
97 /* The processor is currently spinning, waiting 63 /* The processor is currently spinning, waiting
98 * for the cpu_start field to become non-zero 64 * for the cpu_start field to become non-zero
@@ -100,6 +66,8 @@ static void smp_iSeries_kick_cpu(int nr)
100 * continue on to secondary_start in iSeries_head.S 66 * continue on to secondary_start in iSeries_head.S
101 */ 67 */
102 paca[nr].cpu_start = 1; 68 paca[nr].cpu_start = 1;
69
70 return 0;
103} 71}
104 72
105static void __devinit smp_iSeries_setup_cpu(int nr) 73static void __devinit smp_iSeries_setup_cpu(int nr)
@@ -107,7 +75,8 @@ static void __devinit smp_iSeries_setup_cpu(int nr)
107} 75}
108 76
109static struct smp_ops_t iSeries_smp_ops = { 77static struct smp_ops_t iSeries_smp_ops = {
110 .message_pass = smp_iSeries_message_pass, 78 .message_pass = smp_muxed_ipi_message_pass,
79 .cause_ipi = smp_iSeries_cause_ipi,
111 .probe = smp_iSeries_probe, 80 .probe = smp_iSeries_probe,
112 .kick_cpu = smp_iSeries_kick_cpu, 81 .kick_cpu = smp_iSeries_kick_cpu,
113 .setup_cpu = smp_iSeries_setup_cpu, 82 .setup_cpu = smp_iSeries_setup_cpu,
diff --git a/arch/powerpc/platforms/iseries/smp.h b/arch/powerpc/platforms/iseries/smp.h
deleted file mode 100644
index d501f7de01e7..000000000000
--- a/arch/powerpc/platforms/iseries/smp.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _PLATFORMS_ISERIES_SMP_H
2#define _PLATFORMS_ISERIES_SMP_H
3
4extern void iSeries_smp_message_recv(void);
5
6#endif /* _PLATFORMS_ISERIES_SMP_H */
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index 1e1a0873e1dd..1afd10f67858 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -18,4 +18,13 @@ config PPC_PMAC64
18 select PPC_970_NAP 18 select PPC_970_NAP
19 default y 19 default y
20 20
21 21config PPC_PMAC32_PSURGE
22 bool "Support for powersurge upgrade cards" if EXPERT
23 depends on SMP && PPC32 && PPC_PMAC
24 select PPC_SMP_MUXED_IPI
25 default y
26 help
27 The powersurge cpu boards can be used in the generation
28 of powermacs that have a socket for an upgradeable cpu card,
29 including the 7500, 8500, 9500, 9600. Support exists for
30 both dual and quad socket upgrade cards.
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 7c18a1607d1c..9089b0421191 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -84,7 +84,7 @@ static void __pmac_retrigger(unsigned int irq_nr)
84 84
85static void pmac_mask_and_ack_irq(struct irq_data *d) 85static void pmac_mask_and_ack_irq(struct irq_data *d)
86{ 86{
87 unsigned int src = irq_map[d->irq].hwirq; 87 unsigned int src = irqd_to_hwirq(d);
88 unsigned long bit = 1UL << (src & 0x1f); 88 unsigned long bit = 1UL << (src & 0x1f);
89 int i = src >> 5; 89 int i = src >> 5;
90 unsigned long flags; 90 unsigned long flags;
@@ -106,7 +106,7 @@ static void pmac_mask_and_ack_irq(struct irq_data *d)
106 106
107static void pmac_ack_irq(struct irq_data *d) 107static void pmac_ack_irq(struct irq_data *d)
108{ 108{
109 unsigned int src = irq_map[d->irq].hwirq; 109 unsigned int src = irqd_to_hwirq(d);
110 unsigned long bit = 1UL << (src & 0x1f); 110 unsigned long bit = 1UL << (src & 0x1f);
111 int i = src >> 5; 111 int i = src >> 5;
112 unsigned long flags; 112 unsigned long flags;
@@ -152,7 +152,7 @@ static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
152static unsigned int pmac_startup_irq(struct irq_data *d) 152static unsigned int pmac_startup_irq(struct irq_data *d)
153{ 153{
154 unsigned long flags; 154 unsigned long flags;
155 unsigned int src = irq_map[d->irq].hwirq; 155 unsigned int src = irqd_to_hwirq(d);
156 unsigned long bit = 1UL << (src & 0x1f); 156 unsigned long bit = 1UL << (src & 0x1f);
157 int i = src >> 5; 157 int i = src >> 5;
158 158
@@ -169,7 +169,7 @@ static unsigned int pmac_startup_irq(struct irq_data *d)
169static void pmac_mask_irq(struct irq_data *d) 169static void pmac_mask_irq(struct irq_data *d)
170{ 170{
171 unsigned long flags; 171 unsigned long flags;
172 unsigned int src = irq_map[d->irq].hwirq; 172 unsigned int src = irqd_to_hwirq(d);
173 173
174 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 174 raw_spin_lock_irqsave(&pmac_pic_lock, flags);
175 __clear_bit(src, ppc_cached_irq_mask); 175 __clear_bit(src, ppc_cached_irq_mask);
@@ -180,7 +180,7 @@ static void pmac_mask_irq(struct irq_data *d)
180static void pmac_unmask_irq(struct irq_data *d) 180static void pmac_unmask_irq(struct irq_data *d)
181{ 181{
182 unsigned long flags; 182 unsigned long flags;
183 unsigned int src = irq_map[d->irq].hwirq; 183 unsigned int src = irqd_to_hwirq(d);
184 184
185 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 185 raw_spin_lock_irqsave(&pmac_pic_lock, flags);
186 __set_bit(src, ppc_cached_irq_mask); 186 __set_bit(src, ppc_cached_irq_mask);
@@ -193,7 +193,7 @@ static int pmac_retrigger(struct irq_data *d)
193 unsigned long flags; 193 unsigned long flags;
194 194
195 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 195 raw_spin_lock_irqsave(&pmac_pic_lock, flags);
196 __pmac_retrigger(irq_map[d->irq].hwirq); 196 __pmac_retrigger(irqd_to_hwirq(d));
197 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 197 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
198 return 1; 198 return 1;
199} 199}
@@ -239,15 +239,12 @@ static unsigned int pmac_pic_get_irq(void)
239 unsigned long bits = 0; 239 unsigned long bits = 0;
240 unsigned long flags; 240 unsigned long flags;
241 241
242#ifdef CONFIG_SMP 242#ifdef CONFIG_PPC_PMAC32_PSURGE
243 void psurge_smp_message_recv(void); 243 /* IPI's are a hack on the powersurge -- Cort */
244 244 if (smp_processor_id() != 0) {
245 /* IPI's are a hack on the powersurge -- Cort */ 245 return psurge_secondary_virq;
246 if ( smp_processor_id() != 0 ) {
247 psurge_smp_message_recv();
248 return NO_IRQ_IGNORE; /* ignore, already handled */
249 } 246 }
250#endif /* CONFIG_SMP */ 247#endif /* CONFIG_PPC_PMAC32_PSURGE */
251 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 248 raw_spin_lock_irqsave(&pmac_pic_lock, flags);
252 for (irq = max_real_irqs; (irq -= 32) >= 0; ) { 249 for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
253 int i = irq >> 5; 250 int i = irq >> 5;
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h
deleted file mode 100644
index d622a8345aaa..000000000000
--- a/arch/powerpc/platforms/powermac/pic.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __PPC_PLATFORMS_PMAC_PIC_H
2#define __PPC_PLATFORMS_PMAC_PIC_H
3
4#include <linux/irq.h>
5
6extern struct irq_chip pmac_pic;
7
8extern void pmac_pic_init(void);
9extern int pmac_get_irq(void);
10
11#endif /* __PPC_PLATFORMS_PMAC_PIC_H */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 20468f49aec0..8327cce2bdb0 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -33,6 +33,7 @@ extern void pmac_setup_pci_dma(void);
33extern void pmac_check_ht_link(void); 33extern void pmac_check_ht_link(void);
34 34
35extern void pmac_setup_smp(void); 35extern void pmac_setup_smp(void);
36extern int psurge_secondary_virq;
36extern void low_cpu_die(void) __attribute__((noreturn)); 37extern void low_cpu_die(void) __attribute__((noreturn));
37 38
38extern int pmac_nvram_init(void); 39extern int pmac_nvram_init(void);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index bc5f0dc6ae1e..db092d7c4c5b 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -70,7 +70,7 @@ static void (*pmac_tb_freeze)(int freeze);
70static u64 timebase; 70static u64 timebase;
71static int tb_req; 71static int tb_req;
72 72
73#ifdef CONFIG_PPC32 73#ifdef CONFIG_PPC_PMAC32_PSURGE
74 74
75/* 75/*
76 * Powersurge (old powermac SMP) support. 76 * Powersurge (old powermac SMP) support.
@@ -124,6 +124,10 @@ static volatile u32 __iomem *psurge_start;
124/* what sort of powersurge board we have */ 124/* what sort of powersurge board we have */
125static int psurge_type = PSURGE_NONE; 125static int psurge_type = PSURGE_NONE;
126 126
127/* irq for secondary cpus to report */
128static struct irq_host *psurge_host;
129int psurge_secondary_virq;
130
127/* 131/*
128 * Set and clear IPIs for powersurge. 132 * Set and clear IPIs for powersurge.
129 */ 133 */
@@ -156,51 +160,52 @@ static inline void psurge_clr_ipi(int cpu)
156/* 160/*
157 * On powersurge (old SMP powermac architecture) we don't have 161 * On powersurge (old SMP powermac architecture) we don't have
158 * separate IPIs for separate messages like openpic does. Instead 162 * separate IPIs for separate messages like openpic does. Instead
159 * we have a bitmap for each processor, where a 1 bit means that 163 * use the generic demux helpers
160 * the corresponding message is pending for that processor.
161 * Ideally each cpu's entry would be in a different cache line.
162 * -- paulus. 164 * -- paulus.
163 */ 165 */
164static unsigned long psurge_smp_message[NR_CPUS]; 166static irqreturn_t psurge_ipi_intr(int irq, void *d)
165
166void psurge_smp_message_recv(void)
167{ 167{
168 int cpu = smp_processor_id(); 168 psurge_clr_ipi(smp_processor_id());
169 int msg; 169 smp_ipi_demux();
170 170
171 /* clear interrupt */ 171 return IRQ_HANDLED;
172 psurge_clr_ipi(cpu); 172}
173
174 if (num_online_cpus() < 2)
175 return;
176 173
177 /* make sure there is a message there */ 174static void smp_psurge_cause_ipi(int cpu, unsigned long data)
178 for (msg = 0; msg < 4; msg++) 175{
179 if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) 176 psurge_set_ipi(cpu);
180 smp_message_recv(msg);
181} 177}
182 178
183irqreturn_t psurge_primary_intr(int irq, void *d) 179static int psurge_host_map(struct irq_host *h, unsigned int virq,
180 irq_hw_number_t hw)
184{ 181{
185 psurge_smp_message_recv(); 182 irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
186 return IRQ_HANDLED; 183
184 return 0;
187} 185}
188 186
189static void smp_psurge_message_pass(int target, int msg) 187struct irq_host_ops psurge_host_ops = {
188 .map = psurge_host_map,
189};
190
191static int psurge_secondary_ipi_init(void)
190{ 192{
191 int i; 193 int rc = -ENOMEM;
192 194
193 if (num_online_cpus() < 2) 195 psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0,
194 return; 196 &psurge_host_ops, 0);
195 197
196 for_each_online_cpu(i) { 198 if (psurge_host)
197 if (target == MSG_ALL 199 psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
198 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) 200
199 || target == i) { 201 if (psurge_secondary_virq)
200 set_bit(msg, &psurge_smp_message[i]); 202 rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
201 psurge_set_ipi(i); 203 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
202 } 204
203 } 205 if (rc)
206 pr_err("Failed to setup secondary cpu IPI\n");
207
208 return rc;
204} 209}
205 210
206/* 211/*
@@ -311,6 +316,9 @@ static int __init smp_psurge_probe(void)
311 ncpus = 2; 316 ncpus = 2;
312 } 317 }
313 318
319 if (psurge_secondary_ipi_init())
320 return 1;
321
314 psurge_start = ioremap(PSURGE_START, 4); 322 psurge_start = ioremap(PSURGE_START, 4);
315 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); 323 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
316 324
@@ -329,7 +337,7 @@ static int __init smp_psurge_probe(void)
329 return ncpus; 337 return ncpus;
330} 338}
331 339
332static void __init smp_psurge_kick_cpu(int nr) 340static int __init smp_psurge_kick_cpu(int nr)
333{ 341{
334 unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; 342 unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
335 unsigned long a, flags; 343 unsigned long a, flags;
@@ -394,11 +402,13 @@ static void __init smp_psurge_kick_cpu(int nr)
394 psurge_set_ipi(1); 402 psurge_set_ipi(1);
395 403
396 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); 404 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
405
406 return 0;
397} 407}
398 408
399static struct irqaction psurge_irqaction = { 409static struct irqaction psurge_irqaction = {
400 .handler = psurge_primary_intr, 410 .handler = psurge_ipi_intr,
401 .flags = IRQF_DISABLED, 411 .flags = IRQF_DISABLED|IRQF_PERCPU,
402 .name = "primary IPI", 412 .name = "primary IPI",
403}; 413};
404 414
@@ -437,14 +447,15 @@ void __init smp_psurge_give_timebase(void)
437 447
438/* PowerSurge-style Macs */ 448/* PowerSurge-style Macs */
439struct smp_ops_t psurge_smp_ops = { 449struct smp_ops_t psurge_smp_ops = {
440 .message_pass = smp_psurge_message_pass, 450 .message_pass = smp_muxed_ipi_message_pass,
451 .cause_ipi = smp_psurge_cause_ipi,
441 .probe = smp_psurge_probe, 452 .probe = smp_psurge_probe,
442 .kick_cpu = smp_psurge_kick_cpu, 453 .kick_cpu = smp_psurge_kick_cpu,
443 .setup_cpu = smp_psurge_setup_cpu, 454 .setup_cpu = smp_psurge_setup_cpu,
444 .give_timebase = smp_psurge_give_timebase, 455 .give_timebase = smp_psurge_give_timebase,
445 .take_timebase = smp_psurge_take_timebase, 456 .take_timebase = smp_psurge_take_timebase,
446}; 457};
447#endif /* CONFIG_PPC32 - actually powersurge support */ 458#endif /* CONFIG_PPC_PMAC32_PSURGE */
448 459
449/* 460/*
450 * Core 99 and later support 461 * Core 99 and later support
@@ -791,14 +802,14 @@ static int __init smp_core99_probe(void)
791 return ncpus; 802 return ncpus;
792} 803}
793 804
794static void __devinit smp_core99_kick_cpu(int nr) 805static int __devinit smp_core99_kick_cpu(int nr)
795{ 806{
796 unsigned int save_vector; 807 unsigned int save_vector;
797 unsigned long target, flags; 808 unsigned long target, flags;
798 unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100); 809 unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100);
799 810
800 if (nr < 0 || nr > 3) 811 if (nr < 0 || nr > 3)
801 return; 812 return -ENOENT;
802 813
803 if (ppc_md.progress) 814 if (ppc_md.progress)
804 ppc_md.progress("smp_core99_kick_cpu", 0x346); 815 ppc_md.progress("smp_core99_kick_cpu", 0x346);
@@ -830,6 +841,8 @@ static void __devinit smp_core99_kick_cpu(int nr)
830 841
831 local_irq_restore(flags); 842 local_irq_restore(flags);
832 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); 843 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
844
845 return 0;
833} 846}
834 847
835static void __devinit smp_core99_setup_cpu(int cpu_nr) 848static void __devinit smp_core99_setup_cpu(int cpu_nr)
@@ -1002,7 +1015,7 @@ void __init pmac_setup_smp(void)
1002 of_node_put(np); 1015 of_node_put(np);
1003 smp_ops = &core99_smp_ops; 1016 smp_ops = &core99_smp_ops;
1004 } 1017 }
1005#ifdef CONFIG_PPC32 1018#ifdef CONFIG_PPC_PMAC32_PSURGE
1006 else { 1019 else {
1007 /* We have to set bits in cpu_possible_mask here since the 1020 /* We have to set bits in cpu_possible_mask here since the
1008 * secondary CPU(s) aren't in the device tree. Various 1021 * secondary CPU(s) aren't in the device tree. Various
@@ -1015,7 +1028,7 @@ void __init pmac_setup_smp(void)
1015 set_cpu_possible(cpu, true); 1028 set_cpu_possible(cpu, true);
1016 smp_ops = &psurge_smp_ops; 1029 smp_ops = &psurge_smp_ops;
1017 } 1030 }
1018#endif /* CONFIG_PPC32 */ 1031#endif /* CONFIG_PPC_PMAC32_PSURGE */
1019 1032
1020#ifdef CONFIG_HOTPLUG_CPU 1033#ifdef CONFIG_HOTPLUG_CPU
1021 ppc_md.cpu_die = pmac_cpu_die; 1034 ppc_md.cpu_die = pmac_cpu_die;
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index f2f6413b81d3..600ed2c0ed59 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -197,7 +197,7 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
197 result = irq_set_chip_data(*virq, pd); 197 result = irq_set_chip_data(*virq, pd);
198 198
199 if (result) { 199 if (result) {
200 pr_debug("%s:%d: set_irq_chip_data failed\n", 200 pr_debug("%s:%d: irq_set_chip_data failed\n",
201 __func__, __LINE__); 201 __func__, __LINE__);
202 goto fail_set; 202 goto fail_set;
203 } 203 }
@@ -659,11 +659,6 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd,
659static void dump_bmp(struct ps3_private* pd) {}; 659static void dump_bmp(struct ps3_private* pd) {};
660#endif /* defined(DEBUG) */ 660#endif /* defined(DEBUG) */
661 661
662static void ps3_host_unmap(struct irq_host *h, unsigned int virq)
663{
664 irq_set_chip_data(virq, NULL);
665}
666
667static int ps3_host_map(struct irq_host *h, unsigned int virq, 662static int ps3_host_map(struct irq_host *h, unsigned int virq,
668 irq_hw_number_t hwirq) 663 irq_hw_number_t hwirq)
669{ 664{
@@ -683,7 +678,6 @@ static int ps3_host_match(struct irq_host *h, struct device_node *np)
683 678
684static struct irq_host_ops ps3_host_ops = { 679static struct irq_host_ops ps3_host_ops = {
685 .map = ps3_host_map, 680 .map = ps3_host_map,
686 .unmap = ps3_host_unmap,
687 .match = ps3_host_match, 681 .match = ps3_host_match,
688}; 682};
689 683
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 51ffde40af2b..4c44794faac0 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -39,7 +39,7 @@
39#define MSG_COUNT 4 39#define MSG_COUNT 4
40static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs); 40static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs);
41 41
42static void do_message_pass(int target, int msg) 42static void ps3_smp_message_pass(int cpu, int msg)
43{ 43{
44 int result; 44 int result;
45 unsigned int virq; 45 unsigned int virq;
@@ -49,28 +49,12 @@ static void do_message_pass(int target, int msg)
49 return; 49 return;
50 } 50 }
51 51
52 virq = per_cpu(ps3_ipi_virqs, target)[msg]; 52 virq = per_cpu(ps3_ipi_virqs, cpu)[msg];
53 result = ps3_send_event_locally(virq); 53 result = ps3_send_event_locally(virq);
54 54
55 if (result) 55 if (result)
56 DBG("%s:%d: ps3_send_event_locally(%d, %d) failed" 56 DBG("%s:%d: ps3_send_event_locally(%d, %d) failed"
57 " (%d)\n", __func__, __LINE__, target, msg, result); 57 " (%d)\n", __func__, __LINE__, cpu, msg, result);
58}
59
60static void ps3_smp_message_pass(int target, int msg)
61{
62 int cpu;
63
64 if (target < NR_CPUS)
65 do_message_pass(target, msg);
66 else if (target == MSG_ALL_BUT_SELF) {
67 for_each_online_cpu(cpu)
68 if (cpu != smp_processor_id())
69 do_message_pass(cpu, msg);
70 } else {
71 for_each_online_cpu(cpu)
72 do_message_pass(cpu, msg);
73 }
74} 58}
75 59
76static int ps3_smp_probe(void) 60static int ps3_smp_probe(void)
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 39a472e9e80f..375a9f92158d 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -197,7 +197,7 @@ static void spu_unmap(struct spu *spu)
197 * The current HV requires the spu shadow regs to be mapped with the 197 * The current HV requires the spu shadow regs to be mapped with the
198 * PTE page protection bits set as read-only (PP=3). This implementation 198 * PTE page protection bits set as read-only (PP=3). This implementation
199 * uses the low level __ioremap() to bypass the page protection settings 199 * uses the low level __ioremap() to bypass the page protection settings
200 * inforced by ioremap_flags() to get the needed PTE bits set for the 200 * inforced by ioremap_prot() to get the needed PTE bits set for the
201 * shadow regs. 201 * shadow regs.
202 */ 202 */
203 203
@@ -214,7 +214,7 @@ static int __init setup_areas(struct spu *spu)
214 goto fail_ioremap; 214 goto fail_ioremap;
215 } 215 }
216 216
217 spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys, 217 spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,
218 LS_SIZE, _PAGE_NO_CACHE); 218 LS_SIZE, _PAGE_NO_CACHE);
219 219
220 if (!spu->local_store) { 220 if (!spu->local_store) {
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 5b3da4b4ea79..71af4c5d6c05 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -3,7 +3,10 @@ config PPC_PSERIES
3 bool "IBM pSeries & new (POWER5-based) iSeries" 3 bool "IBM pSeries & new (POWER5-based) iSeries"
4 select MPIC 4 select MPIC
5 select PCI_MSI 5 select PCI_MSI
6 select XICS 6 select PPC_XICS
7 select PPC_ICP_NATIVE
8 select PPC_ICP_HV
9 select PPC_ICS_RTAS
7 select PPC_I8259 10 select PPC_I8259
8 select PPC_RTAS 11 select PPC_RTAS
9 select PPC_RTAS_DAEMON 12 select PPC_RTAS_DAEMON
@@ -47,6 +50,24 @@ config SCANLOG
47 tristate "Scanlog dump interface" 50 tristate "Scanlog dump interface"
48 depends on RTAS_PROC && PPC_PSERIES 51 depends on RTAS_PROC && PPC_PSERIES
49 52
53config IO_EVENT_IRQ
54 bool "IO Event Interrupt support"
55 depends on PPC_PSERIES
56 default y
57 help
58 Select this option, if you want to enable support for IO Event
59 interrupts. IO event interrupt is a mechanism provided by RTAS
60 to return information about hardware error and non-error events
61 which may need OS attention. RTAS returns events for multiple
62 event types and scopes. Device drivers can register their handlers
63 to receive events.
64
65 This option will only enable the IO event platform code. You
66 will still need to enable or compile the actual drivers
67 that use this infrastruture to handle IO event interrupts.
68
69 Say Y if you are unsure.
70
50config LPARCFG 71config LPARCFG
51 bool "LPAR Configuration Data" 72 bool "LPAR Configuration Data"
52 depends on PPC_PSERIES || PPC_ISERIES 73 depends on PPC_PSERIES || PPC_ISERIES
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index fc5237810ece..3556e402cbf5 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -5,7 +5,6 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
5 setup.o iommu.o event_sources.o ras.o \ 5 setup.o iommu.o event_sources.o ras.o \
6 firmware.o power.o dlpar.o mobility.o 6 firmware.o power.o dlpar.o mobility.o
7obj-$(CONFIG_SMP) += smp.o 7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_XICS) += xics.o
9obj-$(CONFIG_SCANLOG) += scanlog.o 8obj-$(CONFIG_SCANLOG) += scanlog.o
10obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o 9obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o
11obj-$(CONFIG_KEXEC) += kexec.o 10obj-$(CONFIG_KEXEC) += kexec.o
@@ -22,6 +21,7 @@ obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
22obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o 21obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o
23obj-$(CONFIG_CMM) += cmm.o 22obj-$(CONFIG_CMM) += cmm.o
24obj-$(CONFIG_DTL) += dtl.o 23obj-$(CONFIG_DTL) += dtl.o
24obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
25 25
26ifeq ($(CONFIG_PPC_PSERIES),y) 26ifeq ($(CONFIG_PPC_PSERIES),y)
27obj-$(CONFIG_SUSPEND) += suspend.o 27obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index c371bc06434b..e9190073bb97 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -52,10 +52,10 @@ static u8 dtl_event_mask = 0x7;
52 52
53 53
54/* 54/*
55 * Size of per-cpu log buffers. Default is just under 16 pages worth. 55 * Size of per-cpu log buffers. Firmware requires that the buffer does
56 * not cross a 4k boundary.
56 */ 57 */
57static int dtl_buf_entries = (16 * 85); 58static int dtl_buf_entries = N_DISPATCH_LOG;
58
59 59
60#ifdef CONFIG_VIRT_CPU_ACCOUNTING 60#ifdef CONFIG_VIRT_CPU_ACCOUNTING
61struct dtl_ring { 61struct dtl_ring {
@@ -151,7 +151,7 @@ static int dtl_start(struct dtl *dtl)
151 151
152 /* Register our dtl buffer with the hypervisor. The HV expects the 152 /* Register our dtl buffer with the hypervisor. The HV expects the
153 * buffer size to be passed in the second word of the buffer */ 153 * buffer size to be passed in the second word of the buffer */
154 ((u32 *)dtl->buf)[1] = dtl->buf_entries * sizeof(struct dtl_entry); 154 ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;
155 155
156 hwcpu = get_hard_smp_processor_id(dtl->cpu); 156 hwcpu = get_hard_smp_processor_id(dtl->cpu);
157 addr = __pa(dtl->buf); 157 addr = __pa(dtl->buf);
@@ -196,13 +196,15 @@ static int dtl_enable(struct dtl *dtl)
196 long int rc; 196 long int rc;
197 struct dtl_entry *buf = NULL; 197 struct dtl_entry *buf = NULL;
198 198
199 if (!dtl_cache)
200 return -ENOMEM;
201
199 /* only allow one reader */ 202 /* only allow one reader */
200 if (dtl->buf) 203 if (dtl->buf)
201 return -EBUSY; 204 return -EBUSY;
202 205
203 n_entries = dtl_buf_entries; 206 n_entries = dtl_buf_entries;
204 buf = kmalloc_node(n_entries * sizeof(struct dtl_entry), 207 buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
205 GFP_KERNEL, cpu_to_node(dtl->cpu));
206 if (!buf) { 208 if (!buf) {
207 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", 209 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
208 __func__, dtl->cpu); 210 __func__, dtl->cpu);
@@ -223,7 +225,7 @@ static int dtl_enable(struct dtl *dtl)
223 spin_unlock(&dtl->lock); 225 spin_unlock(&dtl->lock);
224 226
225 if (rc) 227 if (rc)
226 kfree(buf); 228 kmem_cache_free(dtl_cache, buf);
227 return rc; 229 return rc;
228} 230}
229 231
@@ -231,7 +233,7 @@ static void dtl_disable(struct dtl *dtl)
231{ 233{
232 spin_lock(&dtl->lock); 234 spin_lock(&dtl->lock);
233 dtl_stop(dtl); 235 dtl_stop(dtl);
234 kfree(dtl->buf); 236 kmem_cache_free(dtl_cache, dtl->buf);
235 dtl->buf = NULL; 237 dtl->buf = NULL;
236 dtl->buf_entries = 0; 238 dtl->buf_entries = 0;
237 spin_unlock(&dtl->lock); 239 spin_unlock(&dtl->lock);
@@ -365,7 +367,7 @@ static int dtl_init(void)
365 367
366 event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, 368 event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
367 dtl_dir, &dtl_event_mask); 369 dtl_dir, &dtl_event_mask);
368 buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0600, 370 buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
369 dtl_dir, &dtl_buf_entries); 371 dtl_dir, &dtl_buf_entries);
370 372
371 if (!event_mask_file || !buf_entries_file) { 373 if (!event_mask_file || !buf_entries_file) {
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 89649173d3a3..46b55cf563e3 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -93,6 +93,7 @@ static int ibm_slot_error_detail;
93static int ibm_get_config_addr_info; 93static int ibm_get_config_addr_info;
94static int ibm_get_config_addr_info2; 94static int ibm_get_config_addr_info2;
95static int ibm_configure_bridge; 95static int ibm_configure_bridge;
96static int ibm_configure_pe;
96 97
97int eeh_subsystem_enabled; 98int eeh_subsystem_enabled;
98EXPORT_SYMBOL(eeh_subsystem_enabled); 99EXPORT_SYMBOL(eeh_subsystem_enabled);
@@ -261,6 +262,8 @@ void eeh_slot_error_detail(struct pci_dn *pdn, int severity)
261 pci_regs_buf[0] = 0; 262 pci_regs_buf[0] = 0;
262 263
263 rtas_pci_enable(pdn, EEH_THAW_MMIO); 264 rtas_pci_enable(pdn, EEH_THAW_MMIO);
265 rtas_configure_bridge(pdn);
266 eeh_restore_bars(pdn);
264 loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); 267 loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
265 268
266 rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen); 269 rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen);
@@ -448,6 +451,39 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag)
448 raw_spin_unlock_irqrestore(&confirm_error_lock, flags); 451 raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
449} 452}
450 453
454void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
455{
456 struct device_node *dn;
457
458 for_each_child_of_node(parent, dn) {
459 if (PCI_DN(dn)) {
460
461 struct pci_dev *dev = PCI_DN(dn)->pcidev;
462
463 if (dev && dev->driver)
464 *freset |= dev->needs_freset;
465
466 __eeh_set_pe_freset(dn, freset);
467 }
468 }
469}
470
471void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
472{
473 struct pci_dev *dev;
474 dn = find_device_pe(dn);
475
476 /* Back up one, since config addrs might be shared */
477 if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
478 dn = dn->parent;
479
480 dev = PCI_DN(dn)->pcidev;
481 if (dev)
482 *freset |= dev->needs_freset;
483
484 __eeh_set_pe_freset(dn, freset);
485}
486
451/** 487/**
452 * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze 488 * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze
453 * @dn device node 489 * @dn device node
@@ -692,15 +728,24 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state)
692 if (pdn->eeh_pe_config_addr) 728 if (pdn->eeh_pe_config_addr)
693 config_addr = pdn->eeh_pe_config_addr; 729 config_addr = pdn->eeh_pe_config_addr;
694 730
695 rc = rtas_call(ibm_set_slot_reset,4,1, NULL, 731 rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
696 config_addr, 732 config_addr,
697 BUID_HI(pdn->phb->buid), 733 BUID_HI(pdn->phb->buid),
698 BUID_LO(pdn->phb->buid), 734 BUID_LO(pdn->phb->buid),
699 state); 735 state);
700 if (rc) 736
701 printk (KERN_WARNING "EEH: Unable to reset the failed slot," 737 /* Fundamental-reset not supported on this PE, try hot-reset */
702 " (%d) #RST=%d dn=%s\n", 738 if (rc == -8 && state == 3) {
703 rc, state, pdn->node->full_name); 739 rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
740 config_addr,
741 BUID_HI(pdn->phb->buid),
742 BUID_LO(pdn->phb->buid), 1);
743 if (rc)
744 printk(KERN_WARNING
745 "EEH: Unable to reset the failed slot,"
746 " #RST=%d dn=%s\n",
747 rc, pdn->node->full_name);
748 }
704} 749}
705 750
706/** 751/**
@@ -736,18 +781,21 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
736/** 781/**
737 * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second 782 * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
738 * @pdn: pci device node to be reset. 783 * @pdn: pci device node to be reset.
739 *
740 * Return 0 if success, else a non-zero value.
741 */ 784 */
742 785
743static void __rtas_set_slot_reset(struct pci_dn *pdn) 786static void __rtas_set_slot_reset(struct pci_dn *pdn)
744{ 787{
745 struct pci_dev *dev = pdn->pcidev; 788 unsigned int freset = 0;
746 789
747 /* Determine type of EEH reset required by device, 790 /* Determine type of EEH reset required for
748 * default hot reset or fundamental reset 791 * Partitionable Endpoint, a hot-reset (1)
749 */ 792 * or a fundamental reset (3).
750 if (dev && dev->needs_freset) 793 * A fundamental reset required by any device under
794 * Partitionable Endpoint trumps hot-reset.
795 */
796 eeh_set_pe_freset(pdn->node, &freset);
797
798 if (freset)
751 rtas_pci_slot_reset(pdn, 3); 799 rtas_pci_slot_reset(pdn, 3);
752 else 800 else
753 rtas_pci_slot_reset(pdn, 1); 801 rtas_pci_slot_reset(pdn, 1);
@@ -895,13 +943,20 @@ rtas_configure_bridge(struct pci_dn *pdn)
895{ 943{
896 int config_addr; 944 int config_addr;
897 int rc; 945 int rc;
946 int token;
898 947
899 /* Use PE configuration address, if present */ 948 /* Use PE configuration address, if present */
900 config_addr = pdn->eeh_config_addr; 949 config_addr = pdn->eeh_config_addr;
901 if (pdn->eeh_pe_config_addr) 950 if (pdn->eeh_pe_config_addr)
902 config_addr = pdn->eeh_pe_config_addr; 951 config_addr = pdn->eeh_pe_config_addr;
903 952
904 rc = rtas_call(ibm_configure_bridge,3,1, NULL, 953 /* Use new configure-pe function, if supported */
954 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE)
955 token = ibm_configure_pe;
956 else
957 token = ibm_configure_bridge;
958
959 rc = rtas_call(token, 3, 1, NULL,
905 config_addr, 960 config_addr,
906 BUID_HI(pdn->phb->buid), 961 BUID_HI(pdn->phb->buid),
907 BUID_LO(pdn->phb->buid)); 962 BUID_LO(pdn->phb->buid));
@@ -1077,6 +1132,7 @@ void __init eeh_init(void)
1077 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); 1132 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
1078 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); 1133 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
1079 ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); 1134 ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
1135 ibm_configure_pe = rtas_token("ibm,configure-pe");
1080 1136
1081 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) 1137 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
1082 return; 1138 return;
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index b8d70f5d9aa9..1b6cb10589e0 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -328,7 +328,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
328 struct pci_bus *frozen_bus; 328 struct pci_bus *frozen_bus;
329 int rc = 0; 329 int rc = 0;
330 enum pci_ers_result result = PCI_ERS_RESULT_NONE; 330 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
331 const char *location, *pci_str, *drv_str; 331 const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str;
332 332
333 frozen_dn = find_device_pe(event->dn); 333 frozen_dn = find_device_pe(event->dn);
334 if (!frozen_dn) { 334 if (!frozen_dn) {
@@ -364,13 +364,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
364 frozen_pdn = PCI_DN(frozen_dn); 364 frozen_pdn = PCI_DN(frozen_dn);
365 frozen_pdn->eeh_freeze_count++; 365 frozen_pdn->eeh_freeze_count++;
366 366
367 if (frozen_pdn->pcidev) { 367 pci_str = eeh_pci_name(event->dev);
368 pci_str = pci_name (frozen_pdn->pcidev); 368 drv_str = pcid_name(event->dev);
369 drv_str = pcid_name (frozen_pdn->pcidev);
370 } else {
371 pci_str = eeh_pci_name(event->dev);
372 drv_str = pcid_name (event->dev);
373 }
374 369
375 if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES) 370 if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
376 goto excess_failures; 371 goto excess_failures;
@@ -378,8 +373,17 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
378 printk(KERN_WARNING 373 printk(KERN_WARNING
379 "EEH: This PCI device has failed %d times in the last hour:\n", 374 "EEH: This PCI device has failed %d times in the last hour:\n",
380 frozen_pdn->eeh_freeze_count); 375 frozen_pdn->eeh_freeze_count);
376
377 if (frozen_pdn->pcidev) {
378 bus_pci_str = pci_name(frozen_pdn->pcidev);
379 bus_drv_str = pcid_name(frozen_pdn->pcidev);
380 printk(KERN_WARNING
381 "EEH: Bus location=%s driver=%s pci addr=%s\n",
382 location, bus_drv_str, bus_pci_str);
383 }
384
381 printk(KERN_WARNING 385 printk(KERN_WARNING
382 "EEH: location=%s driver=%s pci addr=%s\n", 386 "EEH: Device location=%s driver=%s pci addr=%s\n",
383 location, drv_str, pci_str); 387 location, drv_str, pci_str);
384 388
385 /* Walk the various device drivers attached to this slot through 389 /* Walk the various device drivers attached to this slot through
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index ef8c45489e20..46f13a3c5d09 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/interrupt.h>
22#include <linux/delay.h> 23#include <linux/delay.h>
23#include <linux/cpu.h> 24#include <linux/cpu.h>
24#include <asm/system.h> 25#include <asm/system.h>
@@ -28,7 +29,7 @@
28#include <asm/machdep.h> 29#include <asm/machdep.h>
29#include <asm/vdso_datapage.h> 30#include <asm/vdso_datapage.h>
30#include <asm/pSeries_reconfig.h> 31#include <asm/pSeries_reconfig.h>
31#include "xics.h" 32#include <asm/xics.h>
32#include "plpar_wrappers.h" 33#include "plpar_wrappers.h"
33#include "offline_states.h" 34#include "offline_states.h"
34 35
@@ -280,7 +281,7 @@ static int pseries_add_processor(struct device_node *np)
280 } 281 }
281 282
282 for_each_cpu(cpu, tmp) { 283 for_each_cpu(cpu, tmp) {
283 BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); 284 BUG_ON(cpu_present(cpu));
284 set_cpu_present(cpu, true); 285 set_cpu_present(cpu, true);
285 set_hard_smp_processor_id(cpu, *intserv++); 286 set_hard_smp_processor_id(cpu, *intserv++);
286 } 287 }
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c
new file mode 100644
index 000000000000..c829e6067d54
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/io_event_irq.c
@@ -0,0 +1,231 @@
1/*
2 * Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/errno.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/irq.h>
14#include <linux/interrupt.h>
15#include <linux/of.h>
16#include <linux/list.h>
17#include <linux/notifier.h>
18
19#include <asm/machdep.h>
20#include <asm/rtas.h>
21#include <asm/irq.h>
22#include <asm/io_event_irq.h>
23
24#include "pseries.h"
25
26/*
27 * IO event interrupt is a mechanism provided by RTAS to return
28 * information about hardware error and non-error events. Device
29 * drivers can register their event handlers to receive events.
30 * Device drivers are expected to use atomic_notifier_chain_register()
31 * and atomic_notifier_chain_unregister() to register and unregister
32 * their event handlers. Since multiple IO event types and scopes
33 * share an IO event interrupt, the event handlers are called one
34 * by one until the IO event is claimed by one of the handlers.
35 * The event handlers are expected to return NOTIFY_OK if the
36 * event is handled by the event handler or NOTIFY_DONE if the
37 * event does not belong to the handler.
38 *
39 * Usage:
40 *
41 * Notifier function:
42 * #include <asm/io_event_irq.h>
43 * int event_handler(struct notifier_block *nb, unsigned long val, void *data) {
44 * p = (struct pseries_io_event_sect_data *) data;
45 * if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE;
46 * :
47 * :
48 * return NOTIFY_OK;
49 * }
50 * struct notifier_block event_nb = {
51 * .notifier_call = event_handler,
52 * }
53 *
54 * Registration:
55 * atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb);
56 *
57 * Unregistration:
58 * atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb);
59 */
60
61ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list);
62EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list);
63
64static int ioei_check_exception_token;
65
66/* pSeries event log format */
67
68/* Two bytes ASCII section IDs */
69#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H')
70#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H')
71#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S')
72#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H')
73#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T')
74#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S')
75#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H')
76#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W')
77#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P')
78#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R')
79#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M')
80#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P')
81#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E')
82#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
83#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
84#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
85
86/* Vendor specific Platform Event Log Format, Version 6, section header */
87struct pseries_elog_section {
88 uint16_t id; /* 0x00 2-byte ASCII section ID */
89 uint16_t length; /* 0x02 Section length in bytes */
90 uint8_t version; /* 0x04 Section version */
91 uint8_t subtype; /* 0x05 Section subtype */
92 uint16_t creator_component; /* 0x06 Creator component ID */
93 uint8_t data[]; /* 0x08 Start of section data */
94};
95
96static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
97
98/**
99 * Find data portion of a specific section in RTAS extended event log.
100 * @elog: RTAS error/event log.
101 * @sect_id: secsion ID.
102 *
103 * Return:
104 * pointer to the section data of the specified section
105 * NULL if not found
106 */
107static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog,
108 uint16_t sect_id)
109{
110 struct rtas_ext_event_log_v6 *xelog =
111 (struct rtas_ext_event_log_v6 *) elog->buffer;
112 struct pseries_elog_section *sect;
113 unsigned char *p, *log_end;
114
115 /* Check that we understand the format */
116 if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) ||
117 xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
118 xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM)
119 return NULL;
120
121 log_end = elog->buffer + elog->extended_log_length;
122 p = xelog->vendor_log;
123 while (p < log_end) {
124 sect = (struct pseries_elog_section *)p;
125 if (sect->id == sect_id)
126 return sect;
127 p += sect->length;
128 }
129 return NULL;
130}
131
132/**
133 * Find the data portion of an IO Event section from event log.
134 * @elog: RTAS error/event log.
135 *
136 * Return:
137 * pointer to a valid IO event section data. NULL if not found.
138 */
139static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
140{
141 struct pseries_elog_section *sect;
142
143 /* We should only ever get called for io-event interrupts, but if
144 * we do get called for another type then something went wrong so
145 * make some noise about it.
146 * RTAS_TYPE_IO only exists in extended event log version 6 or later.
147 * No need to check event log version.
148 */
149 if (unlikely(elog->type != RTAS_TYPE_IO)) {
150 printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d",
151 elog->type);
152 return NULL;
153 }
154
155 sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT);
156 if (unlikely(!sect)) {
157 printk_once(KERN_WARNING "io_event_irq: RTAS extended event "
158 "log does not contain an IO Event section. "
159 "Could be a bug in system firmware!\n");
160 return NULL;
161 }
162 return (struct pseries_io_event *) &sect->data;
163}
164
165/*
166 * PAPR:
167 * - check-exception returns the first found error or event and clear that
168 * error or event so it is reported once.
169 * - Each interrupt returns one event. If a plateform chooses to report
170 * multiple events through a single interrupt, it must ensure that the
171 * interrupt remains asserted until check-exception has been used to
172 * process all out-standing events for that interrupt.
173 *
174 * Implementation notes:
175 * - Events must be processed in the order they are returned. Hence,
176 * sequential in nature.
177 * - The owner of an event is determined by combinations of scope,
178 * event type, and sub-type. There is no easy way to pre-sort clients
179 * by scope or event type alone. For example, Torrent ISR route change
180 * event is reported with scope 0x00 (Not Applicatable) rather than
181 * 0x3B (Torrent-hub). It is better to let the clients to identify
182 * who owns the the event.
183 */
184
185static irqreturn_t ioei_interrupt(int irq, void *dev_id)
186{
187 struct pseries_io_event *event;
188 int rtas_rc;
189
190 for (;;) {
191 rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL,
192 RTAS_VECTOR_EXTERNAL_INTERRUPT,
193 virq_to_hw(irq),
194 RTAS_IO_EVENTS, 1 /* Time Critical */,
195 __pa(ioei_rtas_buf),
196 RTAS_DATA_BUF_SIZE);
197 if (rtas_rc != 0)
198 break;
199
200 event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf);
201 if (!event)
202 continue;
203
204 atomic_notifier_call_chain(&pseries_ioei_notifier_list,
205 0, event);
206 }
207 return IRQ_HANDLED;
208}
209
210static int __init ioei_init(void)
211{
212 struct device_node *np;
213
214 ioei_check_exception_token = rtas_token("check-exception");
215 if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) {
216 pr_warning("IO Event IRQ not supported on this system !\n");
217 return -ENODEV;
218 }
219 np = of_find_node_by_path("/event-sources/ibm,io-events");
220 if (np) {
221 request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT");
222 of_node_put(np);
223 } else {
224 pr_err("io_event_irq: No ibm,io-events on system! "
225 "IO Event interrupt disabled.\n");
226 return -ENODEV;
227 }
228 return 0;
229}
230machine_subsys_initcall(pseries, ioei_init);
231
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 6d5412a18b26..01faab9456ca 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -659,15 +659,18 @@ static void remove_ddw(struct device_node *np)
659{ 659{
660 struct dynamic_dma_window_prop *dwp; 660 struct dynamic_dma_window_prop *dwp;
661 struct property *win64; 661 struct property *win64;
662 const u32 *ddr_avail; 662 const u32 *ddw_avail;
663 u64 liobn; 663 u64 liobn;
664 int len, ret; 664 int len, ret;
665 665
666 ddr_avail = of_get_property(np, "ibm,ddw-applicable", &len); 666 ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
667 win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); 667 win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
668 if (!win64 || !ddr_avail || len < 3 * sizeof(u32)) 668 if (!win64)
669 return; 669 return;
670 670
671 if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
672 goto delprop;
673
671 dwp = win64->value; 674 dwp = win64->value;
672 liobn = (u64)be32_to_cpu(dwp->liobn); 675 liobn = (u64)be32_to_cpu(dwp->liobn);
673 676
@@ -681,28 +684,29 @@ static void remove_ddw(struct device_node *np)
681 pr_debug("%s successfully cleared tces in window.\n", 684 pr_debug("%s successfully cleared tces in window.\n",
682 np->full_name); 685 np->full_name);
683 686
684 ret = rtas_call(ddr_avail[2], 1, 1, NULL, liobn); 687 ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
685 if (ret) 688 if (ret)
686 pr_warning("%s: failed to remove direct window: rtas returned " 689 pr_warning("%s: failed to remove direct window: rtas returned "
687 "%d to ibm,remove-pe-dma-window(%x) %llx\n", 690 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
688 np->full_name, ret, ddr_avail[2], liobn); 691 np->full_name, ret, ddw_avail[2], liobn);
689 else 692 else
690 pr_debug("%s: successfully removed direct window: rtas returned " 693 pr_debug("%s: successfully removed direct window: rtas returned "
691 "%d to ibm,remove-pe-dma-window(%x) %llx\n", 694 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
692 np->full_name, ret, ddr_avail[2], liobn); 695 np->full_name, ret, ddw_avail[2], liobn);
693}
694 696
697delprop:
698 ret = prom_remove_property(np, win64);
699 if (ret)
700 pr_warning("%s: failed to remove direct window property: %d\n",
701 np->full_name, ret);
702}
695 703
696static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *pdn) 704static u64 find_existing_ddw(struct device_node *pdn)
697{ 705{
698 struct device_node *dn;
699 struct pci_dn *pcidn;
700 struct direct_window *window; 706 struct direct_window *window;
701 const struct dynamic_dma_window_prop *direct64; 707 const struct dynamic_dma_window_prop *direct64;
702 u64 dma_addr = 0; 708 u64 dma_addr = 0;
703 709
704 dn = pci_device_to_OF_node(dev);
705 pcidn = PCI_DN(dn);
706 spin_lock(&direct_window_list_lock); 710 spin_lock(&direct_window_list_lock);
707 /* check if we already created a window and dupe that config if so */ 711 /* check if we already created a window and dupe that config if so */
708 list_for_each_entry(window, &direct_window_list, list) { 712 list_for_each_entry(window, &direct_window_list, list) {
@@ -717,36 +721,40 @@ static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *
717 return dma_addr; 721 return dma_addr;
718} 722}
719 723
720static u64 dupe_ddw_if_kexec(struct pci_dev *dev, struct device_node *pdn) 724static int find_existing_ddw_windows(void)
721{ 725{
722 struct device_node *dn;
723 struct pci_dn *pcidn;
724 int len; 726 int len;
727 struct device_node *pdn;
725 struct direct_window *window; 728 struct direct_window *window;
726 const struct dynamic_dma_window_prop *direct64; 729 const struct dynamic_dma_window_prop *direct64;
727 u64 dma_addr = 0;
728 730
729 dn = pci_device_to_OF_node(dev); 731 if (!firmware_has_feature(FW_FEATURE_LPAR))
730 pcidn = PCI_DN(dn); 732 return 0;
731 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); 733
732 if (direct64) { 734 for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
735 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
736 if (!direct64)
737 continue;
738
733 window = kzalloc(sizeof(*window), GFP_KERNEL); 739 window = kzalloc(sizeof(*window), GFP_KERNEL);
734 if (!window) { 740 if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
741 kfree(window);
735 remove_ddw(pdn); 742 remove_ddw(pdn);
736 } else { 743 continue;
737 window->device = pdn;
738 window->prop = direct64;
739 spin_lock(&direct_window_list_lock);
740 list_add(&window->list, &direct_window_list);
741 spin_unlock(&direct_window_list_lock);
742 dma_addr = direct64->dma_base;
743 } 744 }
745
746 window->device = pdn;
747 window->prop = direct64;
748 spin_lock(&direct_window_list_lock);
749 list_add(&window->list, &direct_window_list);
750 spin_unlock(&direct_window_list_lock);
744 } 751 }
745 752
746 return dma_addr; 753 return 0;
747} 754}
755machine_arch_initcall(pseries, find_existing_ddw_windows);
748 756
749static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail, 757static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
750 struct ddw_query_response *query) 758 struct ddw_query_response *query)
751{ 759{
752 struct device_node *dn; 760 struct device_node *dn;
@@ -767,15 +775,15 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail,
767 if (pcidn->eeh_pe_config_addr) 775 if (pcidn->eeh_pe_config_addr)
768 cfg_addr = pcidn->eeh_pe_config_addr; 776 cfg_addr = pcidn->eeh_pe_config_addr;
769 buid = pcidn->phb->buid; 777 buid = pcidn->phb->buid;
770 ret = rtas_call(ddr_avail[0], 3, 5, (u32 *)query, 778 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
771 cfg_addr, BUID_HI(buid), BUID_LO(buid)); 779 cfg_addr, BUID_HI(buid), BUID_LO(buid));
772 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" 780 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
773 " returned %d\n", ddr_avail[0], cfg_addr, BUID_HI(buid), 781 " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
774 BUID_LO(buid), ret); 782 BUID_LO(buid), ret);
775 return ret; 783 return ret;
776} 784}
777 785
778static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail, 786static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
779 struct ddw_create_response *create, int page_shift, 787 struct ddw_create_response *create, int page_shift,
780 int window_shift) 788 int window_shift)
781{ 789{
@@ -800,12 +808,12 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail,
800 808
801 do { 809 do {
802 /* extra outputs are LIOBN and dma-addr (hi, lo) */ 810 /* extra outputs are LIOBN and dma-addr (hi, lo) */
803 ret = rtas_call(ddr_avail[1], 5, 4, (u32 *)create, cfg_addr, 811 ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
804 BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); 812 BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
805 } while (rtas_busy_delay(ret)); 813 } while (rtas_busy_delay(ret));
806 dev_info(&dev->dev, 814 dev_info(&dev->dev,
807 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " 815 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
808 "(liobn = 0x%x starting addr = %x %x)\n", ddr_avail[1], 816 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
809 cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, 817 cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
810 window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); 818 window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
811 819
@@ -831,18 +839,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
831 int page_shift; 839 int page_shift;
832 u64 dma_addr, max_addr; 840 u64 dma_addr, max_addr;
833 struct device_node *dn; 841 struct device_node *dn;
834 const u32 *uninitialized_var(ddr_avail); 842 const u32 *uninitialized_var(ddw_avail);
835 struct direct_window *window; 843 struct direct_window *window;
836 struct property *uninitialized_var(win64); 844 struct property *win64;
837 struct dynamic_dma_window_prop *ddwprop; 845 struct dynamic_dma_window_prop *ddwprop;
838 846
839 mutex_lock(&direct_window_init_mutex); 847 mutex_lock(&direct_window_init_mutex);
840 848
841 dma_addr = dupe_ddw_if_already_created(dev, pdn); 849 dma_addr = find_existing_ddw(pdn);
842 if (dma_addr != 0)
843 goto out_unlock;
844
845 dma_addr = dupe_ddw_if_kexec(dev, pdn);
846 if (dma_addr != 0) 850 if (dma_addr != 0)
847 goto out_unlock; 851 goto out_unlock;
848 852
@@ -854,8 +858,8 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
854 * for the given node in that order. 858 * for the given node in that order.
855 * the property is actually in the parent, not the PE 859 * the property is actually in the parent, not the PE
856 */ 860 */
857 ddr_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); 861 ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
858 if (!ddr_avail || len < 3 * sizeof(u32)) 862 if (!ddw_avail || len < 3 * sizeof(u32))
859 goto out_unlock; 863 goto out_unlock;
860 864
861 /* 865 /*
@@ -865,7 +869,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
865 * of page sizes: supported and supported for migrate-dma. 869 * of page sizes: supported and supported for migrate-dma.
866 */ 870 */
867 dn = pci_device_to_OF_node(dev); 871 dn = pci_device_to_OF_node(dev);
868 ret = query_ddw(dev, ddr_avail, &query); 872 ret = query_ddw(dev, ddw_avail, &query);
869 if (ret != 0) 873 if (ret != 0)
870 goto out_unlock; 874 goto out_unlock;
871 875
@@ -907,13 +911,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
907 } 911 }
908 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); 912 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
909 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); 913 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
914 win64->length = sizeof(*ddwprop);
910 if (!win64->name || !win64->value) { 915 if (!win64->name || !win64->value) {
911 dev_info(&dev->dev, 916 dev_info(&dev->dev,
912 "couldn't allocate property name and value\n"); 917 "couldn't allocate property name and value\n");
913 goto out_free_prop; 918 goto out_free_prop;
914 } 919 }
915 920
916 ret = create_ddw(dev, ddr_avail, &create, page_shift, len); 921 ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
917 if (ret != 0) 922 if (ret != 0)
918 goto out_free_prop; 923 goto out_free_prop;
919 924
@@ -1021,13 +1026,16 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1021 const void *dma_window = NULL; 1026 const void *dma_window = NULL;
1022 u64 dma_offset; 1027 u64 dma_offset;
1023 1028
1024 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 1029 if (!dev->dma_mask)
1025 return -EIO; 1030 return -EIO;
1026 1031
1032 if (!dev_is_pci(dev))
1033 goto check_mask;
1034
1035 pdev = to_pci_dev(dev);
1036
1027 /* only attempt to use a new window if 64-bit DMA is requested */ 1037 /* only attempt to use a new window if 64-bit DMA is requested */
1028 if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { 1038 if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
1029 pdev = to_pci_dev(dev);
1030
1031 dn = pci_device_to_OF_node(pdev); 1039 dn = pci_device_to_OF_node(pdev);
1032 dev_dbg(dev, "node is %s\n", dn->full_name); 1040 dev_dbg(dev, "node is %s\n", dn->full_name);
1033 1041
@@ -1054,12 +1062,17 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1054 } 1062 }
1055 } 1063 }
1056 1064
1057 /* fall-through to iommu ops */ 1065 /* fall back on iommu ops, restore table pointer with ops */
1058 if (!ddw_enabled) { 1066 if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
1059 dev_info(dev, "Using 32-bit DMA via iommu\n"); 1067 dev_info(dev, "Restoring 32-bit DMA via iommu\n");
1060 set_dma_ops(dev, &dma_iommu_ops); 1068 set_dma_ops(dev, &dma_iommu_ops);
1069 pci_dma_dev_setup_pSeriesLP(pdev);
1061 } 1070 }
1062 1071
1072check_mask:
1073 if (!dma_supported(dev, dma_mask))
1074 return -EIO;
1075
1063 *dev->dma_mask = dma_mask; 1076 *dev->dma_mask = dma_mask;
1064 return 0; 1077 return 0;
1065} 1078}
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 77d38a5e2ff9..54cf3a4aa16b 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -7,15 +7,18 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9 9
10#include <linux/kernel.h>
11#include <linux/interrupt.h>
12
10#include <asm/machdep.h> 13#include <asm/machdep.h>
11#include <asm/page.h> 14#include <asm/page.h>
12#include <asm/firmware.h> 15#include <asm/firmware.h>
13#include <asm/kexec.h> 16#include <asm/kexec.h>
14#include <asm/mpic.h> 17#include <asm/mpic.h>
18#include <asm/xics.h>
15#include <asm/smp.h> 19#include <asm/smp.h>
16 20
17#include "pseries.h" 21#include "pseries.h"
18#include "xics.h"
19#include "plpar_wrappers.h" 22#include "plpar_wrappers.h"
20 23
21static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) 24static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index ca5d5898d320..39e6e0a7b2fa 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -329,6 +329,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
329 /* Make pHyp happy */ 329 /* Make pHyp happy */
330 if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) 330 if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU))
331 hpte_r &= ~_PAGE_COHERENT; 331 hpte_r &= ~_PAGE_COHERENT;
332 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
333 flags |= H_COALESCE_CAND;
332 334
333 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); 335 lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
334 if (unlikely(lpar_rc == H_PTEG_FULL)) { 336 if (unlikely(lpar_rc == H_PTEG_FULL)) {
@@ -573,7 +575,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
573 unsigned long i, pix, rc; 575 unsigned long i, pix, rc;
574 unsigned long flags = 0; 576 unsigned long flags = 0;
575 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 577 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
576 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); 578 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
577 unsigned long param[9]; 579 unsigned long param[9];
578 unsigned long va; 580 unsigned long va;
579 unsigned long hash, index, shift, hidx, slot; 581 unsigned long hash, index, shift, hidx, slot;
@@ -771,3 +773,47 @@ out:
771 local_irq_restore(flags); 773 local_irq_restore(flags);
772} 774}
773#endif 775#endif
776
777/**
778 * h_get_mpp
779 * H_GET_MPP hcall returns info in 7 parms
780 */
781int h_get_mpp(struct hvcall_mpp_data *mpp_data)
782{
783 int rc;
784 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
785
786 rc = plpar_hcall9(H_GET_MPP, retbuf);
787
788 mpp_data->entitled_mem = retbuf[0];
789 mpp_data->mapped_mem = retbuf[1];
790
791 mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
792 mpp_data->pool_num = retbuf[2] & 0xffff;
793
794 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
795 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
796 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
797
798 mpp_data->pool_size = retbuf[4];
799 mpp_data->loan_request = retbuf[5];
800 mpp_data->backing_mem = retbuf[6];
801
802 return rc;
803}
804EXPORT_SYMBOL(h_get_mpp);
805
806int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
807{
808 int rc;
809 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
810
811 rc = plpar_hcall9(H_GET_MPP_X, retbuf);
812
813 mpp_x_data->coalesced_bytes = retbuf[0];
814 mpp_x_data->pool_coalesced_bytes = retbuf[1];
815 mpp_x_data->pool_purr_cycles = retbuf[2];
816 mpp_x_data->pool_spurr_cycles = retbuf[3];
817
818 return rc;
819}
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index d9801117124b..4bf21207d7d3 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -270,31 +270,4 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
270 lbuf[1]); 270 lbuf[1]);
271} 271}
272 272
273static inline long plpar_eoi(unsigned long xirr)
274{
275 return plpar_hcall_norets(H_EOI, xirr);
276}
277
278static inline long plpar_cppr(unsigned long cppr)
279{
280 return plpar_hcall_norets(H_CPPR, cppr);
281}
282
283static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
284{
285 return plpar_hcall_norets(H_IPI, servernum, mfrr);
286}
287
288static inline long plpar_xirr(unsigned long *xirr_ret, unsigned char cppr)
289{
290 long rc;
291 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
292
293 rc = plpar_hcall(H_XIRR, retbuf, cppr);
294
295 *xirr_ret = retbuf[0];
296
297 return rc;
298}
299
300#endif /* _PSERIES_PLPAR_WRAPPERS_H */ 273#endif /* _PSERIES_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index c55d7ad9c648..086d2ae4e06a 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -122,7 +122,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
122 122
123 status = rtas_call(ras_check_exception_token, 6, 1, NULL, 123 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
124 RTAS_VECTOR_EXTERNAL_INTERRUPT, 124 RTAS_VECTOR_EXTERNAL_INTERRUPT,
125 irq_map[irq].hwirq, 125 virq_to_hw(irq),
126 RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, 126 RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
127 critical, __pa(&ras_log_buf), 127 critical, __pa(&ras_log_buf),
128 rtas_get_error_log_max()); 128 rtas_get_error_log_max());
@@ -157,7 +157,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
157 157
158 status = rtas_call(ras_check_exception_token, 6, 1, NULL, 158 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
159 RTAS_VECTOR_EXTERNAL_INTERRUPT, 159 RTAS_VECTOR_EXTERNAL_INTERRUPT,
160 irq_map[irq].hwirq, 160 virq_to_hw(irq),
161 RTAS_INTERNAL_ERROR, 1 /*Time Critical */, 161 RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
162 __pa(&ras_log_buf), 162 __pa(&ras_log_buf),
163 rtas_get_error_log_max()); 163 rtas_get_error_log_max());
@@ -227,7 +227,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
227 struct rtas_error_log *h, *errhdr = NULL; 227 struct rtas_error_log *h, *errhdr = NULL;
228 228
229 if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { 229 if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
230 printk(KERN_ERR "FWNMI: corrupt r3\n"); 230 printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
231 return NULL; 231 return NULL;
232 } 232 }
233 233
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 6c42cfde8415..593acceeff96 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -53,9 +53,9 @@
53#include <asm/irq.h> 53#include <asm/irq.h>
54#include <asm/time.h> 54#include <asm/time.h>
55#include <asm/nvram.h> 55#include <asm/nvram.h>
56#include "xics.h"
57#include <asm/pmc.h> 56#include <asm/pmc.h>
58#include <asm/mpic.h> 57#include <asm/mpic.h>
58#include <asm/xics.h>
59#include <asm/ppc-pci.h> 59#include <asm/ppc-pci.h>
60#include <asm/i8259.h> 60#include <asm/i8259.h>
61#include <asm/udbg.h> 61#include <asm/udbg.h>
@@ -205,6 +205,9 @@ static void __init pseries_mpic_init_IRQ(void)
205 mpic_assign_isu(mpic, n, isuaddr); 205 mpic_assign_isu(mpic, n, isuaddr);
206 } 206 }
207 207
208 /* Setup top-level get_irq */
209 ppc_md.get_irq = mpic_get_irq;
210
208 /* All ISUs are setup, complete initialization */ 211 /* All ISUs are setup, complete initialization */
209 mpic_init(mpic); 212 mpic_init(mpic);
210 213
@@ -214,7 +217,7 @@ static void __init pseries_mpic_init_IRQ(void)
214 217
215static void __init pseries_xics_init_IRQ(void) 218static void __init pseries_xics_init_IRQ(void)
216{ 219{
217 xics_init_IRQ(); 220 xics_init();
218 pseries_setup_i8259_cascade(); 221 pseries_setup_i8259_cascade();
219} 222}
220 223
@@ -238,7 +241,6 @@ static void __init pseries_discover_pic(void)
238 if (strstr(typep, "open-pic")) { 241 if (strstr(typep, "open-pic")) {
239 pSeries_mpic_node = of_node_get(np); 242 pSeries_mpic_node = of_node_get(np);
240 ppc_md.init_IRQ = pseries_mpic_init_IRQ; 243 ppc_md.init_IRQ = pseries_mpic_init_IRQ;
241 ppc_md.get_irq = mpic_get_irq;
242 setup_kexec_cpu_down_mpic(); 244 setup_kexec_cpu_down_mpic();
243 smp_init_pseries_mpic(); 245 smp_init_pseries_mpic();
244 return; 246 return;
@@ -276,6 +278,8 @@ static struct notifier_block pci_dn_reconfig_nb = {
276 .notifier_call = pci_dn_reconfig_notifier, 278 .notifier_call = pci_dn_reconfig_notifier,
277}; 279};
278 280
281struct kmem_cache *dtl_cache;
282
279#ifdef CONFIG_VIRT_CPU_ACCOUNTING 283#ifdef CONFIG_VIRT_CPU_ACCOUNTING
280/* 284/*
281 * Allocate space for the dispatch trace log for all possible cpus 285 * Allocate space for the dispatch trace log for all possible cpus
@@ -287,18 +291,12 @@ static int alloc_dispatch_logs(void)
287 int cpu, ret; 291 int cpu, ret;
288 struct paca_struct *pp; 292 struct paca_struct *pp;
289 struct dtl_entry *dtl; 293 struct dtl_entry *dtl;
290 struct kmem_cache *dtl_cache;
291 294
292 if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 295 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
293 return 0; 296 return 0;
294 297
295 dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, 298 if (!dtl_cache)
296 DISPATCH_LOG_BYTES, 0, NULL);
297 if (!dtl_cache) {
298 pr_warn("Failed to create dispatch trace log buffer cache\n");
299 pr_warn("Stolen time statistics will be unreliable\n");
300 return 0; 299 return 0;
301 }
302 300
303 for_each_possible_cpu(cpu) { 301 for_each_possible_cpu(cpu) {
304 pp = &paca[cpu]; 302 pp = &paca[cpu];
@@ -332,10 +330,27 @@ static int alloc_dispatch_logs(void)
332 330
333 return 0; 331 return 0;
334} 332}
335 333#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
336early_initcall(alloc_dispatch_logs); 334static inline int alloc_dispatch_logs(void)
335{
336 return 0;
337}
337#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 338#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
338 339
340static int alloc_dispatch_log_kmem_cache(void)
341{
342 dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
343 DISPATCH_LOG_BYTES, 0, NULL);
344 if (!dtl_cache) {
345 pr_warn("Failed to create dispatch trace log buffer cache\n");
346 pr_warn("Stolen time statistics will be unreliable\n");
347 return 0;
348 }
349
350 return alloc_dispatch_logs();
351}
352early_initcall(alloc_dispatch_log_kmem_cache);
353
339static void __init pSeries_setup_arch(void) 354static void __init pSeries_setup_arch(void)
340{ 355{
341 /* Discover PIC type and setup ppc_md accordingly */ 356 /* Discover PIC type and setup ppc_md accordingly */
@@ -403,6 +418,16 @@ static int pseries_set_xdabr(unsigned long dabr)
403#define CMO_CHARACTERISTICS_TOKEN 44 418#define CMO_CHARACTERISTICS_TOKEN 44
404#define CMO_MAXLENGTH 1026 419#define CMO_MAXLENGTH 1026
405 420
421void pSeries_coalesce_init(void)
422{
423 struct hvcall_mpp_x_data mpp_x_data;
424
425 if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data))
426 powerpc_firmware_features |= FW_FEATURE_XCMO;
427 else
428 powerpc_firmware_features &= ~FW_FEATURE_XCMO;
429}
430
406/** 431/**
407 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, 432 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
408 * handle that here. (Stolen from parse_system_parameter_string) 433 * handle that here. (Stolen from parse_system_parameter_string)
@@ -472,6 +497,7 @@ void pSeries_cmo_feature_init(void)
472 pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, 497 pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
473 CMO_SecPSP); 498 CMO_SecPSP);
474 powerpc_firmware_features |= FW_FEATURE_CMO; 499 powerpc_firmware_features |= FW_FEATURE_CMO;
500 pSeries_coalesce_init();
475 } else 501 } else
476 pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, 502 pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
477 CMO_SecPSP); 503 CMO_SecPSP);
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index a509c5292a67..fbffd7e47ab8 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -44,10 +44,11 @@
44#include <asm/mpic.h> 44#include <asm/mpic.h>
45#include <asm/vdso_datapage.h> 45#include <asm/vdso_datapage.h>
46#include <asm/cputhreads.h> 46#include <asm/cputhreads.h>
47#include <asm/mpic.h>
48#include <asm/xics.h>
47 49
48#include "plpar_wrappers.h" 50#include "plpar_wrappers.h"
49#include "pseries.h" 51#include "pseries.h"
50#include "xics.h"
51#include "offline_states.h" 52#include "offline_states.h"
52 53
53 54
@@ -136,7 +137,6 @@ out:
136 return 1; 137 return 1;
137} 138}
138 139
139#ifdef CONFIG_XICS
140static void __devinit smp_xics_setup_cpu(int cpu) 140static void __devinit smp_xics_setup_cpu(int cpu)
141{ 141{
142 if (cpu != boot_cpuid) 142 if (cpu != boot_cpuid)
@@ -151,14 +151,13 @@ static void __devinit smp_xics_setup_cpu(int cpu)
151 set_default_offline_state(cpu); 151 set_default_offline_state(cpu);
152#endif 152#endif
153} 153}
154#endif /* CONFIG_XICS */
155 154
156static void __devinit smp_pSeries_kick_cpu(int nr) 155static int __devinit smp_pSeries_kick_cpu(int nr)
157{ 156{
158 BUG_ON(nr < 0 || nr >= NR_CPUS); 157 BUG_ON(nr < 0 || nr >= NR_CPUS);
159 158
160 if (!smp_startup_cpu(nr)) 159 if (!smp_startup_cpu(nr))
161 return; 160 return -ENOENT;
162 161
163 /* 162 /*
164 * The processor is currently spinning, waiting for the 163 * The processor is currently spinning, waiting for the
@@ -180,6 +179,8 @@ static void __devinit smp_pSeries_kick_cpu(int nr)
180 "Ret= %ld\n", nr, rc); 179 "Ret= %ld\n", nr, rc);
181 } 180 }
182#endif 181#endif
182
183 return 0;
183} 184}
184 185
185static int smp_pSeries_cpu_bootable(unsigned int nr) 186static int smp_pSeries_cpu_bootable(unsigned int nr)
@@ -197,23 +198,22 @@ static int smp_pSeries_cpu_bootable(unsigned int nr)
197 198
198 return 1; 199 return 1;
199} 200}
200#ifdef CONFIG_MPIC 201
201static struct smp_ops_t pSeries_mpic_smp_ops = { 202static struct smp_ops_t pSeries_mpic_smp_ops = {
202 .message_pass = smp_mpic_message_pass, 203 .message_pass = smp_mpic_message_pass,
203 .probe = smp_mpic_probe, 204 .probe = smp_mpic_probe,
204 .kick_cpu = smp_pSeries_kick_cpu, 205 .kick_cpu = smp_pSeries_kick_cpu,
205 .setup_cpu = smp_mpic_setup_cpu, 206 .setup_cpu = smp_mpic_setup_cpu,
206}; 207};
207#endif 208
208#ifdef CONFIG_XICS
209static struct smp_ops_t pSeries_xics_smp_ops = { 209static struct smp_ops_t pSeries_xics_smp_ops = {
210 .message_pass = smp_xics_message_pass, 210 .message_pass = smp_muxed_ipi_message_pass,
211 .probe = smp_xics_probe, 211 .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
212 .probe = xics_smp_probe,
212 .kick_cpu = smp_pSeries_kick_cpu, 213 .kick_cpu = smp_pSeries_kick_cpu,
213 .setup_cpu = smp_xics_setup_cpu, 214 .setup_cpu = smp_xics_setup_cpu,
214 .cpu_bootable = smp_pSeries_cpu_bootable, 215 .cpu_bootable = smp_pSeries_cpu_bootable,
215}; 216};
216#endif
217 217
218/* This is called very early */ 218/* This is called very early */
219static void __init smp_init_pseries(void) 219static void __init smp_init_pseries(void)
@@ -245,14 +245,12 @@ static void __init smp_init_pseries(void)
245 pr_debug(" <- smp_init_pSeries()\n"); 245 pr_debug(" <- smp_init_pSeries()\n");
246} 246}
247 247
248#ifdef CONFIG_MPIC
249void __init smp_init_pseries_mpic(void) 248void __init smp_init_pseries_mpic(void)
250{ 249{
251 smp_ops = &pSeries_mpic_smp_ops; 250 smp_ops = &pSeries_mpic_smp_ops;
252 251
253 smp_init_pseries(); 252 smp_init_pseries();
254} 253}
255#endif
256 254
257void __init smp_init_pseries_xics(void) 255void __init smp_init_pseries_xics(void)
258{ 256{
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
deleted file mode 100644
index d6901334d66e..000000000000
--- a/arch/powerpc/platforms/pseries/xics.c
+++ /dev/null
@@ -1,949 +0,0 @@
1/*
2 * arch/powerpc/platforms/pseries/xics.c
3 *
4 * Copyright 2000 IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/types.h>
13#include <linux/threads.h>
14#include <linux/kernel.h>
15#include <linux/irq.h>
16#include <linux/smp.h>
17#include <linux/interrupt.h>
18#include <linux/init.h>
19#include <linux/radix-tree.h>
20#include <linux/cpu.h>
21#include <linux/msi.h>
22#include <linux/of.h>
23#include <linux/percpu.h>
24
25#include <asm/firmware.h>
26#include <asm/io.h>
27#include <asm/pgtable.h>
28#include <asm/smp.h>
29#include <asm/rtas.h>
30#include <asm/hvcall.h>
31#include <asm/machdep.h>
32
33#include "xics.h"
34#include "plpar_wrappers.h"
35
36static struct irq_host *xics_host;
37
38#define XICS_IPI 2
39#define XICS_IRQ_SPURIOUS 0
40
41/* Want a priority other than 0. Various HW issues require this. */
42#define DEFAULT_PRIORITY 5
43
44/*
45 * Mark IPIs as higher priority so we can take them inside interrupts that
46 * arent marked IRQF_DISABLED
47 */
48#define IPI_PRIORITY 4
49
50/* The least favored priority */
51#define LOWEST_PRIORITY 0xFF
52
53/* The number of priorities defined above */
54#define MAX_NUM_PRIORITIES 3
55
56static unsigned int default_server = 0xFF;
57static unsigned int default_distrib_server = 0;
58static unsigned int interrupt_server_size = 8;
59
60/* RTAS service tokens */
61static int ibm_get_xive;
62static int ibm_set_xive;
63static int ibm_int_on;
64static int ibm_int_off;
65
66struct xics_cppr {
67 unsigned char stack[MAX_NUM_PRIORITIES];
68 int index;
69};
70
71static DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
72
73/* Direct hardware low level accessors */
74
75/* The part of the interrupt presentation layer that we care about */
76struct xics_ipl {
77 union {
78 u32 word;
79 u8 bytes[4];
80 } xirr_poll;
81 union {
82 u32 word;
83 u8 bytes[4];
84 } xirr;
85 u32 dummy;
86 union {
87 u32 word;
88 u8 bytes[4];
89 } qirr;
90};
91
92static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
93
94static inline unsigned int direct_xirr_info_get(void)
95{
96 int cpu = smp_processor_id();
97
98 return in_be32(&xics_per_cpu[cpu]->xirr.word);
99}
100
101static inline void direct_xirr_info_set(unsigned int value)
102{
103 int cpu = smp_processor_id();
104
105 out_be32(&xics_per_cpu[cpu]->xirr.word, value);
106}
107
108static inline void direct_cppr_info(u8 value)
109{
110 int cpu = smp_processor_id();
111
112 out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value);
113}
114
115static inline void direct_qirr_info(int n_cpu, u8 value)
116{
117 out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
118}
119
120
121/* LPAR low level accessors */
122
123static inline unsigned int lpar_xirr_info_get(unsigned char cppr)
124{
125 unsigned long lpar_rc;
126 unsigned long return_value;
127
128 lpar_rc = plpar_xirr(&return_value, cppr);
129 if (lpar_rc != H_SUCCESS)
130 panic(" bad return code xirr - rc = %lx\n", lpar_rc);
131 return (unsigned int)return_value;
132}
133
134static inline void lpar_xirr_info_set(unsigned int value)
135{
136 unsigned long lpar_rc;
137
138 lpar_rc = plpar_eoi(value);
139 if (lpar_rc != H_SUCCESS)
140 panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc,
141 value);
142}
143
144static inline void lpar_cppr_info(u8 value)
145{
146 unsigned long lpar_rc;
147
148 lpar_rc = plpar_cppr(value);
149 if (lpar_rc != H_SUCCESS)
150 panic("bad return code cppr - rc = %lx\n", lpar_rc);
151}
152
153static inline void lpar_qirr_info(int n_cpu , u8 value)
154{
155 unsigned long lpar_rc;
156
157 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
158 if (lpar_rc != H_SUCCESS)
159 panic("bad return code qirr - rc = %lx\n", lpar_rc);
160}
161
162
163/* Interface to generic irq subsystem */
164
165#ifdef CONFIG_SMP
166/*
167 * For the moment we only implement delivery to all cpus or one cpu.
168 *
169 * If the requested affinity is cpu_all_mask, we set global affinity.
170 * If not we set it to the first cpu in the mask, even if multiple cpus
171 * are set. This is so things like irqbalance (which set core and package
172 * wide affinities) do the right thing.
173 */
174static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
175 unsigned int strict_check)
176{
177
178 if (!distribute_irqs)
179 return default_server;
180
181 if (!cpumask_subset(cpu_possible_mask, cpumask)) {
182 int server = cpumask_first_and(cpu_online_mask, cpumask);
183
184 if (server < nr_cpu_ids)
185 return get_hard_smp_processor_id(server);
186
187 if (strict_check)
188 return -1;
189 }
190
191 /*
192 * Workaround issue with some versions of JS20 firmware that
193 * deliver interrupts to cpus which haven't been started. This
194 * happens when using the maxcpus= boot option.
195 */
196 if (cpumask_equal(cpu_online_mask, cpu_present_mask))
197 return default_distrib_server;
198
199 return default_server;
200}
201#else
202#define get_irq_server(virq, cpumask, strict_check) (default_server)
203#endif
204
205static void xics_unmask_irq(struct irq_data *d)
206{
207 unsigned int hwirq;
208 int call_status;
209 int server;
210
211 pr_devel("xics: unmask virq %d\n", d->irq);
212
213 hwirq = (unsigned int)irq_map[d->irq].hwirq;
214 pr_devel(" -> map to hwirq 0x%x\n", hwirq);
215 if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
216 return;
217
218 server = get_irq_server(d->irq, d->affinity, 0);
219
220 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server,
221 DEFAULT_PRIORITY);
222 if (call_status != 0) {
223 printk(KERN_ERR
224 "%s: ibm_set_xive irq %u server %x returned %d\n",
225 __func__, hwirq, server, call_status);
226 return;
227 }
228
229 /* Now unmask the interrupt (often a no-op) */
230 call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq);
231 if (call_status != 0) {
232 printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
233 __func__, hwirq, call_status);
234 return;
235 }
236}
237
238static unsigned int xics_startup(struct irq_data *d)
239{
240 /*
241 * The generic MSI code returns with the interrupt disabled on the
242 * card, using the MSI mask bits. Firmware doesn't appear to unmask
243 * at that level, so we do it here by hand.
244 */
245 if (d->msi_desc)
246 unmask_msi_irq(d);
247
248 /* unmask it */
249 xics_unmask_irq(d);
250 return 0;
251}
252
253static void xics_mask_real_irq(unsigned int hwirq)
254{
255 int call_status;
256
257 if (hwirq == XICS_IPI)
258 return;
259
260 call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq);
261 if (call_status != 0) {
262 printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
263 __func__, hwirq, call_status);
264 return;
265 }
266
267 /* Have to set XIVE to 0xff to be able to remove a slot */
268 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq,
269 default_server, 0xff);
270 if (call_status != 0) {
271 printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
272 __func__, hwirq, call_status);
273 return;
274 }
275}
276
277static void xics_mask_irq(struct irq_data *d)
278{
279 unsigned int hwirq;
280
281 pr_devel("xics: mask virq %d\n", d->irq);
282
283 hwirq = (unsigned int)irq_map[d->irq].hwirq;
284 if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
285 return;
286 xics_mask_real_irq(hwirq);
287}
288
289static void xics_mask_unknown_vec(unsigned int vec)
290{
291 printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec);
292 xics_mask_real_irq(vec);
293}
294
295static inline unsigned int xics_xirr_vector(unsigned int xirr)
296{
297 /*
298 * The top byte is the old cppr, to be restored on EOI.
299 * The remaining 24 bits are the vector.
300 */
301 return xirr & 0x00ffffff;
302}
303
304static void push_cppr(unsigned int vec)
305{
306 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
307
308 if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
309 return;
310
311 if (vec == XICS_IPI)
312 os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
313 else
314 os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
315}
316
317static unsigned int xics_get_irq_direct(void)
318{
319 unsigned int xirr = direct_xirr_info_get();
320 unsigned int vec = xics_xirr_vector(xirr);
321 unsigned int irq;
322
323 if (vec == XICS_IRQ_SPURIOUS)
324 return NO_IRQ;
325
326 irq = irq_radix_revmap_lookup(xics_host, vec);
327 if (likely(irq != NO_IRQ)) {
328 push_cppr(vec);
329 return irq;
330 }
331
332 /* We don't have a linux mapping, so have rtas mask it. */
333 xics_mask_unknown_vec(vec);
334
335 /* We might learn about it later, so EOI it */
336 direct_xirr_info_set(xirr);
337 return NO_IRQ;
338}
339
340static unsigned int xics_get_irq_lpar(void)
341{
342 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
343 unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]);
344 unsigned int vec = xics_xirr_vector(xirr);
345 unsigned int irq;
346
347 if (vec == XICS_IRQ_SPURIOUS)
348 return NO_IRQ;
349
350 irq = irq_radix_revmap_lookup(xics_host, vec);
351 if (likely(irq != NO_IRQ)) {
352 push_cppr(vec);
353 return irq;
354 }
355
356 /* We don't have a linux mapping, so have RTAS mask it. */
357 xics_mask_unknown_vec(vec);
358
359 /* We might learn about it later, so EOI it */
360 lpar_xirr_info_set(xirr);
361 return NO_IRQ;
362}
363
364static unsigned char pop_cppr(void)
365{
366 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
367
368 if (WARN_ON(os_cppr->index < 1))
369 return LOWEST_PRIORITY;
370
371 return os_cppr->stack[--os_cppr->index];
372}
373
374static void xics_eoi_direct(struct irq_data *d)
375{
376 unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq;
377
378 iosync();
379 direct_xirr_info_set((pop_cppr() << 24) | hwirq);
380}
381
382static void xics_eoi_lpar(struct irq_data *d)
383{
384 unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq;
385
386 iosync();
387 lpar_xirr_info_set((pop_cppr() << 24) | hwirq);
388}
389
390static int
391xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force)
392{
393 unsigned int hwirq;
394 int status;
395 int xics_status[2];
396 int irq_server;
397
398 hwirq = (unsigned int)irq_map[d->irq].hwirq;
399 if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
400 return -1;
401
402 status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq);
403
404 if (status) {
405 printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
406 __func__, hwirq, status);
407 return -1;
408 }
409
410 irq_server = get_irq_server(d->irq, cpumask, 1);
411 if (irq_server == -1) {
412 char cpulist[128];
413 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
414 printk(KERN_WARNING
415 "%s: No online cpus in the mask %s for irq %d\n",
416 __func__, cpulist, d->irq);
417 return -1;
418 }
419
420 status = rtas_call(ibm_set_xive, 3, 1, NULL,
421 hwirq, irq_server, xics_status[1]);
422
423 if (status) {
424 printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
425 __func__, hwirq, status);
426 return -1;
427 }
428
429 return 0;
430}
431
432static struct irq_chip xics_pic_direct = {
433 .name = "XICS",
434 .irq_startup = xics_startup,
435 .irq_mask = xics_mask_irq,
436 .irq_unmask = xics_unmask_irq,
437 .irq_eoi = xics_eoi_direct,
438 .irq_set_affinity = xics_set_affinity
439};
440
441static struct irq_chip xics_pic_lpar = {
442 .name = "XICS",
443 .irq_startup = xics_startup,
444 .irq_mask = xics_mask_irq,
445 .irq_unmask = xics_unmask_irq,
446 .irq_eoi = xics_eoi_lpar,
447 .irq_set_affinity = xics_set_affinity
448};
449
450
451/* Interface to arch irq controller subsystem layer */
452
453/* Points to the irq_chip we're actually using */
454static struct irq_chip *xics_irq_chip;
455
456static int xics_host_match(struct irq_host *h, struct device_node *node)
457{
458 /* IBM machines have interrupt parents of various funky types for things
459 * like vdevices, events, etc... The trick we use here is to match
460 * everything here except the legacy 8259 which is compatible "chrp,iic"
461 */
462 return !of_device_is_compatible(node, "chrp,iic");
463}
464
465static int xics_host_map(struct irq_host *h, unsigned int virq,
466 irq_hw_number_t hw)
467{
468 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
469
470 /* Insert the interrupt mapping into the radix tree for fast lookup */
471 irq_radix_revmap_insert(xics_host, virq, hw);
472
473 irq_set_status_flags(virq, IRQ_LEVEL);
474 irq_set_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq);
475 return 0;
476}
477
478static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
479 const u32 *intspec, unsigned int intsize,
480 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
481
482{
483 /* Current xics implementation translates everything
484 * to level. It is not technically right for MSIs but this
485 * is irrelevant at this point. We might get smarter in the future
486 */
487 *out_hwirq = intspec[0];
488 *out_flags = IRQ_TYPE_LEVEL_LOW;
489
490 return 0;
491}
492
493static struct irq_host_ops xics_host_ops = {
494 .match = xics_host_match,
495 .map = xics_host_map,
496 .xlate = xics_host_xlate,
497};
498
499static void __init xics_init_host(void)
500{
501 if (firmware_has_feature(FW_FEATURE_LPAR))
502 xics_irq_chip = &xics_pic_lpar;
503 else
504 xics_irq_chip = &xics_pic_direct;
505
506 xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
507 XICS_IRQ_SPURIOUS);
508 BUG_ON(xics_host == NULL);
509 irq_set_default_host(xics_host);
510}
511
512
513/* Inter-processor interrupt support */
514
515#ifdef CONFIG_SMP
516/*
517 * XICS only has a single IPI, so encode the messages per CPU
518 */
519static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message);
520
521static inline void smp_xics_do_message(int cpu, int msg)
522{
523 unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
524
525 set_bit(msg, tgt);
526 mb();
527 if (firmware_has_feature(FW_FEATURE_LPAR))
528 lpar_qirr_info(cpu, IPI_PRIORITY);
529 else
530 direct_qirr_info(cpu, IPI_PRIORITY);
531}
532
533void smp_xics_message_pass(int target, int msg)
534{
535 unsigned int i;
536
537 if (target < NR_CPUS) {
538 smp_xics_do_message(target, msg);
539 } else {
540 for_each_online_cpu(i) {
541 if (target == MSG_ALL_BUT_SELF
542 && i == smp_processor_id())
543 continue;
544 smp_xics_do_message(i, msg);
545 }
546 }
547}
548
549static irqreturn_t xics_ipi_dispatch(int cpu)
550{
551 unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
552
553 mb(); /* order mmio clearing qirr */
554 while (*tgt) {
555 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) {
556 smp_message_recv(PPC_MSG_CALL_FUNCTION);
557 }
558 if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) {
559 smp_message_recv(PPC_MSG_RESCHEDULE);
560 }
561 if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) {
562 smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
563 }
564#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
565 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) {
566 smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
567 }
568#endif
569 }
570 return IRQ_HANDLED;
571}
572
573static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id)
574{
575 int cpu = smp_processor_id();
576
577 direct_qirr_info(cpu, 0xff);
578
579 return xics_ipi_dispatch(cpu);
580}
581
582static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id)
583{
584 int cpu = smp_processor_id();
585
586 lpar_qirr_info(cpu, 0xff);
587
588 return xics_ipi_dispatch(cpu);
589}
590
591static void xics_request_ipi(void)
592{
593 unsigned int ipi;
594 int rc;
595
596 ipi = irq_create_mapping(xics_host, XICS_IPI);
597 BUG_ON(ipi == NO_IRQ);
598
599 /*
600 * IPIs are marked IRQF_DISABLED as they must run with irqs
601 * disabled
602 */
603 irq_set_handler(ipi, handle_percpu_irq);
604 if (firmware_has_feature(FW_FEATURE_LPAR))
605 rc = request_irq(ipi, xics_ipi_action_lpar,
606 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
607 else
608 rc = request_irq(ipi, xics_ipi_action_direct,
609 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
610 BUG_ON(rc);
611}
612
613int __init smp_xics_probe(void)
614{
615 xics_request_ipi();
616
617 return cpumask_weight(cpu_possible_mask);
618}
619
620#endif /* CONFIG_SMP */
621
622
623/* Initialization */
624
625static void xics_update_irq_servers(void)
626{
627 int i, j;
628 struct device_node *np;
629 u32 ilen;
630 const u32 *ireg;
631 u32 hcpuid;
632
633 /* Find the server numbers for the boot cpu. */
634 np = of_get_cpu_node(boot_cpuid, NULL);
635 BUG_ON(!np);
636
637 ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
638 if (!ireg) {
639 of_node_put(np);
640 return;
641 }
642
643 i = ilen / sizeof(int);
644 hcpuid = get_hard_smp_processor_id(boot_cpuid);
645
646 /* Global interrupt distribution server is specified in the last
647 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
648 * entry fom this property for current boot cpu id and use it as
649 * default distribution server
650 */
651 for (j = 0; j < i; j += 2) {
652 if (ireg[j] == hcpuid) {
653 default_server = hcpuid;
654 default_distrib_server = ireg[j+1];
655 }
656 }
657
658 of_node_put(np);
659}
660
661static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
662 unsigned long size)
663{
664 int i;
665
666 /* This may look gross but it's good enough for now, we don't quite
667 * have a hard -> linux processor id matching.
668 */
669 for_each_possible_cpu(i) {
670 if (!cpu_present(i))
671 continue;
672 if (hw_id == get_hard_smp_processor_id(i)) {
673 xics_per_cpu[i] = ioremap(addr, size);
674 return;
675 }
676 }
677}
678
679static void __init xics_init_one_node(struct device_node *np,
680 unsigned int *indx)
681{
682 unsigned int ilen;
683 const u32 *ireg;
684
685 /* This code does the theorically broken assumption that the interrupt
686 * server numbers are the same as the hard CPU numbers.
687 * This happens to be the case so far but we are playing with fire...
688 * should be fixed one of these days. -BenH.
689 */
690 ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL);
691
692 /* Do that ever happen ? we'll know soon enough... but even good'old
693 * f80 does have that property ..
694 */
695 WARN_ON(ireg == NULL);
696 if (ireg) {
697 /*
698 * set node starting index for this node
699 */
700 *indx = *ireg;
701 }
702 ireg = of_get_property(np, "reg", &ilen);
703 if (!ireg)
704 panic("xics_init_IRQ: can't find interrupt reg property");
705
706 while (ilen >= (4 * sizeof(u32))) {
707 unsigned long addr, size;
708
709 /* XXX Use proper OF parsing code here !!! */
710 addr = (unsigned long)*ireg++ << 32;
711 ilen -= sizeof(u32);
712 addr |= *ireg++;
713 ilen -= sizeof(u32);
714 size = (unsigned long)*ireg++ << 32;
715 ilen -= sizeof(u32);
716 size |= *ireg++;
717 ilen -= sizeof(u32);
718 xics_map_one_cpu(*indx, addr, size);
719 (*indx)++;
720 }
721}
722
723void __init xics_init_IRQ(void)
724{
725 struct device_node *np;
726 u32 indx = 0;
727 int found = 0;
728 const u32 *isize;
729
730 ppc64_boot_msg(0x20, "XICS Init");
731
732 ibm_get_xive = rtas_token("ibm,get-xive");
733 ibm_set_xive = rtas_token("ibm,set-xive");
734 ibm_int_on = rtas_token("ibm,int-on");
735 ibm_int_off = rtas_token("ibm,int-off");
736
737 for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
738 found = 1;
739 if (firmware_has_feature(FW_FEATURE_LPAR)) {
740 of_node_put(np);
741 break;
742 }
743 xics_init_one_node(np, &indx);
744 }
745 if (found == 0)
746 return;
747
748 /* get the bit size of server numbers */
749 found = 0;
750
751 for_each_compatible_node(np, NULL, "ibm,ppc-xics") {
752 isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
753
754 if (!isize)
755 continue;
756
757 if (!found) {
758 interrupt_server_size = *isize;
759 found = 1;
760 } else if (*isize != interrupt_server_size) {
761 printk(KERN_WARNING "XICS: "
762 "mismatched ibm,interrupt-server#-size\n");
763 interrupt_server_size = max(*isize,
764 interrupt_server_size);
765 }
766 }
767
768 xics_update_irq_servers();
769 xics_init_host();
770
771 if (firmware_has_feature(FW_FEATURE_LPAR))
772 ppc_md.get_irq = xics_get_irq_lpar;
773 else
774 ppc_md.get_irq = xics_get_irq_direct;
775
776 xics_setup_cpu();
777
778 ppc64_boot_msg(0x21, "XICS Done");
779}
780
781/* Cpu startup, shutdown, and hotplug */
782
783static void xics_set_cpu_priority(unsigned char cppr)
784{
785 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
786
787 /*
788 * we only really want to set the priority when there's
789 * just one cppr value on the stack
790 */
791 WARN_ON(os_cppr->index != 0);
792
793 os_cppr->stack[0] = cppr;
794
795 if (firmware_has_feature(FW_FEATURE_LPAR))
796 lpar_cppr_info(cppr);
797 else
798 direct_cppr_info(cppr);
799 iosync();
800}
801
802/* Have the calling processor join or leave the specified global queue */
803static void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
804{
805 int index;
806 int status;
807
808 if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
809 return;
810
811 index = (1UL << interrupt_server_size) - 1 - gserver;
812
813 status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
814
815 WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
816 GLOBAL_INTERRUPT_QUEUE, index, join, status);
817}
818
819void xics_setup_cpu(void)
820{
821 xics_set_cpu_priority(LOWEST_PRIORITY);
822
823 xics_set_cpu_giq(default_distrib_server, 1);
824}
825
826void xics_teardown_cpu(void)
827{
828 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
829 int cpu = smp_processor_id();
830
831 /*
832 * we have to reset the cppr index to 0 because we're
833 * not going to return from the IPI
834 */
835 os_cppr->index = 0;
836 xics_set_cpu_priority(0);
837
838 /* Clear any pending IPI request */
839 if (firmware_has_feature(FW_FEATURE_LPAR))
840 lpar_qirr_info(cpu, 0xff);
841 else
842 direct_qirr_info(cpu, 0xff);
843}
844
845void xics_kexec_teardown_cpu(int secondary)
846{
847 xics_teardown_cpu();
848
849 /*
850 * we take the ipi irq but and never return so we
851 * need to EOI the IPI, but want to leave our priority 0
852 *
853 * should we check all the other interrupts too?
854 * should we be flagging idle loop instead?
855 * or creating some task to be scheduled?
856 */
857
858 if (firmware_has_feature(FW_FEATURE_LPAR))
859 lpar_xirr_info_set((0x00 << 24) | XICS_IPI);
860 else
861 direct_xirr_info_set((0x00 << 24) | XICS_IPI);
862
863 /*
864 * Some machines need to have at least one cpu in the GIQ,
865 * so leave the master cpu in the group.
866 */
867 if (secondary)
868 xics_set_cpu_giq(default_distrib_server, 0);
869}
870
871#ifdef CONFIG_HOTPLUG_CPU
872
873/* Interrupts are disabled. */
874void xics_migrate_irqs_away(void)
875{
876 int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
877 int virq;
878
879 /* If we used to be the default server, move to the new "boot_cpuid" */
880 if (hw_cpu == default_server)
881 xics_update_irq_servers();
882
883 /* Reject any interrupt that was queued to us... */
884 xics_set_cpu_priority(0);
885
886 /* Remove ourselves from the global interrupt queue */
887 xics_set_cpu_giq(default_distrib_server, 0);
888
889 /* Allow IPIs again... */
890 xics_set_cpu_priority(DEFAULT_PRIORITY);
891
892 for_each_irq(virq) {
893 struct irq_desc *desc;
894 struct irq_chip *chip;
895 unsigned int hwirq;
896 int xics_status[2];
897 int status;
898 unsigned long flags;
899
900 /* We can't set affinity on ISA interrupts */
901 if (virq < NUM_ISA_INTERRUPTS)
902 continue;
903 if (irq_map[virq].host != xics_host)
904 continue;
905 hwirq = (unsigned int)irq_map[virq].hwirq;
906 /* We need to get IPIs still. */
907 if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
908 continue;
909
910 desc = irq_to_desc(virq);
911
912 /* We only need to migrate enabled IRQS */
913 if (desc == NULL || desc->action == NULL)
914 continue;
915
916 chip = irq_desc_get_chip(desc);
917 if (chip == NULL || chip->irq_set_affinity == NULL)
918 continue;
919
920 raw_spin_lock_irqsave(&desc->lock, flags);
921
922 status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq);
923 if (status) {
924 printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
925 __func__, hwirq, status);
926 goto unlock;
927 }
928
929 /*
930 * We only support delivery to all cpus or to one cpu.
931 * The irq has to be migrated only in the single cpu
932 * case.
933 */
934 if (xics_status[0] != hw_cpu)
935 goto unlock;
936
937 /* This is expected during cpu offline. */
938 if (cpu_online(cpu))
939 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
940 virq, cpu);
941
942 /* Reset affinity to all cpus */
943 cpumask_setall(desc->irq_data.affinity);
944 chip->irq_set_affinity(&desc->irq_data, cpu_all_mask, true);
945unlock:
946 raw_spin_unlock_irqrestore(&desc->lock, flags);
947 }
948}
949#endif
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h
deleted file mode 100644
index d1d5a83039ae..000000000000
--- a/arch/powerpc/platforms/pseries/xics.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * arch/powerpc/platforms/pseries/xics.h
3 *
4 * Copyright 2000 IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _POWERPC_KERNEL_XICS_H
13#define _POWERPC_KERNEL_XICS_H
14
15extern void xics_init_IRQ(void);
16extern void xics_setup_cpu(void);
17extern void xics_teardown_cpu(void);
18extern void xics_kexec_teardown_cpu(int secondary);
19extern void xics_migrate_irqs_away(void);
20extern int smp_xics_probe(void);
21extern void smp_xics_message_pass(int target, int msg);
22
23#endif /* _POWERPC_KERNEL_XICS_H */
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
new file mode 100644
index 000000000000..c3c48eb62cc1
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -0,0 +1,28 @@
1config PPC_WSP
2 bool
3 default n
4
5menu "WSP platform selection"
6 depends on PPC_BOOK3E_64
7
8config PPC_PSR2
9 bool "PSR-2 platform"
10 select PPC_A2
11 select GENERIC_TBSYNC
12 select PPC_SCOM
13 select EPAPR_BOOT
14 select PPC_WSP
15 select PPC_XICS
16 select PPC_ICP_NATIVE
17 default y
18
19endmenu
20
21config PPC_A2_DD2
22 bool "Support for DD2 based A2/WSP systems"
23 depends on PPC_A2
24
25config WORKAROUND_ERRATUM_463
26 depends on PPC_A2_DD2
27 bool "Workaround erratum 463"
28 default y
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile
new file mode 100644
index 000000000000..095be73d6cd4
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/Makefile
@@ -0,0 +1,6 @@
1ccflags-y += -mno-minimal-toc
2
3obj-y += setup.o ics.o
4obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o
5obj-$(CONFIG_PPC_WSP) += scom_wsp.o
6obj-$(CONFIG_SMP) += smp.o scom_smp.o
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
new file mode 100644
index 000000000000..e53bd9e7b125
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -0,0 +1,712 @@
1/*
2 * Copyright 2008-2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/cpu.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/kernel.h>
15#include <linux/msi.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21
22#include <asm/io.h>
23#include <asm/irq.h>
24#include <asm/xics.h>
25
26#include "wsp.h"
27#include "ics.h"
28
29
30/* WSP ICS */
31
32struct wsp_ics {
33 struct ics ics;
34 struct device_node *dn;
35 void __iomem *regs;
36 spinlock_t lock;
37 unsigned long *bitmap;
38 u32 chip_id;
39 u32 lsi_base;
40 u32 lsi_count;
41 u64 hwirq_start;
42 u64 count;
43#ifdef CONFIG_SMP
44 int *hwirq_cpu_map;
45#endif
46};
47
48#define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
49
50#define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
51#define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
52#define IODA_TBL_DATA_REG(base) ((base) + 0x20)
53#define XIVE_UPDATE_REG(base) ((base) + 0x28)
54#define ICS_INT_CAPS_REG(base) ((base) + 0x30)
55
56#define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
57#define TBL_SELECT_XIST (1UL << 48)
58#define TBL_SELECT_XIVT (1UL << 49)
59
60#define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
61
62#define XIST_REQUIRED 0x8
63#define XIST_REJECTED 0x4
64#define XIST_PRESENTED 0x2
65#define XIST_PENDING 0x1
66
67#define XIVE_SERVER_SHIFT 42
68#define XIVE_SERVER_MASK 0xFFFFULL
69#define XIVE_PRIORITY_MASK 0xFFULL
70#define XIVE_PRIORITY_SHIFT 32
71#define XIVE_WRITE_ENABLE (1ULL << 63)
72
73/*
74 * The docs refer to a 6 bit field called ChipID, which consists of a
75 * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
76 * so we ignore it, and every where we use "chip id" in this code we
77 * mean the NodeID.
78 */
79#define WSP_ICS_CHIP_SHIFT 17
80
81
82static struct wsp_ics *ics_list;
83static int num_ics;
84
85/* ICS Source controller accessors */
86
87static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
88{
89 unsigned long flags;
90 u64 xive;
91
92 spin_lock_irqsave(&ics->lock, flags);
93 out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
94 xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
95 spin_unlock_irqrestore(&ics->lock, flags);
96
97 return xive;
98}
99
100static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
101{
102 xive &= ~XIVE_ADDR_MASK;
103 xive |= (irq & XIVE_ADDR_MASK);
104 xive |= XIVE_WRITE_ENABLE;
105
106 out_be64(XIVE_UPDATE_REG(ics->regs), xive);
107}
108
109static u64 xive_set_server(u64 xive, unsigned int server)
110{
111 u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
112
113 xive &= mask;
114 xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
115
116 return xive;
117}
118
119static u64 xive_set_priority(u64 xive, unsigned int priority)
120{
121 u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
122
123 xive &= mask;
124 xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
125
126 return xive;
127}
128
129
130#ifdef CONFIG_SMP
131/* Find logical CPUs within mask on a given chip and store result in ret */
132void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
133{
134 int cpu, chip;
135 struct device_node *cpu_dn, *dn;
136 const u32 *prop;
137
138 cpumask_clear(ret);
139 for_each_cpu(cpu, mask) {
140 cpu_dn = of_get_cpu_node(cpu, NULL);
141 if (!cpu_dn)
142 continue;
143
144 prop = of_get_property(cpu_dn, "at-node", NULL);
145 if (!prop) {
146 of_node_put(cpu_dn);
147 continue;
148 }
149
150 dn = of_find_node_by_phandle(*prop);
151 of_node_put(cpu_dn);
152
153 chip = wsp_get_chip_id(dn);
154 if (chip == chip_id)
155 cpumask_set_cpu(cpu, ret);
156
157 of_node_put(dn);
158 }
159}
160
161/* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
162static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
163 const cpumask_t *affinity)
164{
165 cpumask_var_t avail, newmask;
166 int ret = -ENOMEM, cpu, cpu_rover = 0, target;
167 int index = hwirq - ics->hwirq_start;
168 unsigned int nodeid;
169
170 BUG_ON(index < 0 || index >= ics->count);
171
172 if (!ics->hwirq_cpu_map)
173 return -ENOMEM;
174
175 if (!distribute_irqs) {
176 ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
177 return 0;
178 }
179
180 /* Allocate needed CPU masks */
181 if (!alloc_cpumask_var(&avail, GFP_KERNEL))
182 goto ret;
183 if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
184 goto freeavail;
185
186 /* Find PBus attached to the source of this IRQ */
187 nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
188
189 /* Find CPUs that could handle this IRQ */
190 if (affinity)
191 cpumask_and(avail, cpu_online_mask, affinity);
192 else
193 cpumask_copy(avail, cpu_online_mask);
194
195 /* Narrow selection down to logical CPUs on the same chip */
196 cpus_on_chip(nodeid, avail, newmask);
197
198 /* Ensure we haven't narrowed it down to 0 */
199 if (unlikely(cpumask_empty(newmask))) {
200 if (unlikely(cpumask_empty(avail))) {
201 ret = -1;
202 goto out;
203 }
204 cpumask_copy(newmask, avail);
205 }
206
207 /* Choose a CPU out of those we narrowed it down to in round robin */
208 target = hwirq % cpumask_weight(newmask);
209 for_each_cpu(cpu, newmask) {
210 if (cpu_rover++ >= target) {
211 ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
212 ret = 0;
213 goto out;
214 }
215 }
216
217 /* Shouldn't happen */
218 WARN_ON(1);
219
220out:
221 free_cpumask_var(newmask);
222freeavail:
223 free_cpumask_var(avail);
224ret:
225 if (ret < 0) {
226 ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
227 pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
228 hwirq, ics->hwirq_cpu_map[index]);
229 }
230 return ret;
231}
232
233static void alloc_irq_map(struct wsp_ics *ics)
234{
235 int i;
236
237 ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
238 if (!ics->hwirq_cpu_map) {
239 pr_warning("Allocate hwirq_cpu_map failed, "
240 "IRQ balancing disabled\n");
241 return;
242 }
243
244 for (i=0; i < ics->count; i++)
245 ics->hwirq_cpu_map[i] = xics_default_server;
246}
247
248static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
249{
250 int index = hwirq - ics->hwirq_start;
251
252 BUG_ON(index < 0 || index >= ics->count);
253
254 if (!ics->hwirq_cpu_map)
255 return xics_default_server;
256
257 return ics->hwirq_cpu_map[index];
258}
259#else /* !CONFIG_SMP */
260static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
261 const cpumask_t *affinity)
262{
263 return 0;
264}
265
266static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
267{
268 return xics_default_server;
269}
270
271static void alloc_irq_map(struct wsp_ics *ics) { }
272#endif
273
274static void wsp_chip_unmask_irq(struct irq_data *d)
275{
276 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
277 struct wsp_ics *ics;
278 int server;
279 u64 xive;
280
281 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
282 return;
283
284 ics = d->chip_data;
285 if (WARN_ON(!ics))
286 return;
287
288 server = get_irq_server(ics, hw_irq);
289
290 xive = wsp_ics_get_xive(ics, hw_irq);
291 xive = xive_set_server(xive, server);
292 xive = xive_set_priority(xive, DEFAULT_PRIORITY);
293 wsp_ics_set_xive(ics, hw_irq, xive);
294}
295
296static unsigned int wsp_chip_startup(struct irq_data *d)
297{
298 /* unmask it */
299 wsp_chip_unmask_irq(d);
300 return 0;
301}
302
303static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
304{
305 u64 xive;
306
307 if (hw_irq == XICS_IPI)
308 return;
309
310 if (WARN_ON(!ics))
311 return;
312 xive = wsp_ics_get_xive(ics, hw_irq);
313 xive = xive_set_server(xive, xics_default_server);
314 xive = xive_set_priority(xive, LOWEST_PRIORITY);
315 wsp_ics_set_xive(ics, hw_irq, xive);
316}
317
318static void wsp_chip_mask_irq(struct irq_data *d)
319{
320 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
321 struct wsp_ics *ics = d->chip_data;
322
323 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
324 return;
325
326 wsp_mask_real_irq(hw_irq, ics);
327}
328
329static int wsp_chip_set_affinity(struct irq_data *d,
330 const struct cpumask *cpumask, bool force)
331{
332 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
333 struct wsp_ics *ics;
334 int ret;
335 u64 xive;
336
337 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
338 return -1;
339
340 ics = d->chip_data;
341 if (WARN_ON(!ics))
342 return -1;
343 xive = wsp_ics_get_xive(ics, hw_irq);
344
345 /*
346 * For the moment only implement delivery to all cpus or one cpu.
347 * Get current irq_server for the given irq
348 */
349 ret = cache_hwirq_map(ics, d->irq, cpumask);
350 if (ret == -1) {
351 char cpulist[128];
352 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
353 pr_warning("%s: No online cpus in the mask %s for irq %d\n",
354 __func__, cpulist, d->irq);
355 return -1;
356 } else if (ret == -ENOMEM) {
357 pr_warning("%s: Out of memory\n", __func__);
358 return -1;
359 }
360
361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
362 wsp_ics_set_xive(ics, hw_irq, xive);
363
364 return 0;
365}
366
367static struct irq_chip wsp_irq_chip = {
368 .name = "WSP ICS",
369 .irq_startup = wsp_chip_startup,
370 .irq_mask = wsp_chip_mask_irq,
371 .irq_unmask = wsp_chip_unmask_irq,
372 .irq_set_affinity = wsp_chip_set_affinity
373};
374
375static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
376{
377 /* All ICSs in the system implement a global irq number space,
378 * so match against them all. */
379 return of_device_is_compatible(dn, "ibm,ppc-xics");
380}
381
382static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
383{
384 if (hwirq >= wsp_ics->hwirq_start &&
385 hwirq < wsp_ics->hwirq_start + wsp_ics->count)
386 return 1;
387
388 return 0;
389}
390
391static int wsp_ics_map(struct ics *ics, unsigned int virq)
392{
393 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
394 unsigned int hw_irq = virq_to_hw(virq);
395 unsigned long flags;
396
397 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
398 return -ENOENT;
399
400 irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
401
402 irq_set_chip_data(virq, wsp_ics);
403
404 spin_lock_irqsave(&wsp_ics->lock, flags);
405 bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
406 spin_unlock_irqrestore(&wsp_ics->lock, flags);
407
408 return 0;
409}
410
411static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
412{
413 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
414
415 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
416 return;
417
418 pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
419 wsp_mask_real_irq(hw_irq, wsp_ics);
420}
421
422static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
423{
424 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
425
426 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
427 return -ENOENT;
428
429 return get_irq_server(wsp_ics, hw_irq);
430}
431
432/* HW Number allocation API */
433
434static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
435{
436 struct device_node *iparent;
437 int i;
438
439 iparent = of_irq_find_parent(dn);
440 if (!iparent) {
441 pr_err("wsp_ics: Failed to find interrupt parent!\n");
442 return NULL;
443 }
444
445 for(i = 0; i < num_ics; i++) {
446 if(ics_list[i].dn == iparent)
447 break;
448 }
449
450 if (i >= num_ics) {
451 pr_err("wsp_ics: Unable to find parent bitmap!\n");
452 return NULL;
453 }
454
455 return &ics_list[i];
456}
457
458int wsp_ics_alloc_irq(struct device_node *dn, int num)
459{
460 struct wsp_ics *ics;
461 int order, offset;
462
463 ics = wsp_ics_find_dn_ics(dn);
464 if (!ics)
465 return -ENODEV;
466
467 /* Fast, but overly strict if num isn't a power of two */
468 order = get_count_order(num);
469
470 spin_lock_irq(&ics->lock);
471 offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
472 spin_unlock_irq(&ics->lock);
473
474 if (offset < 0)
475 return offset;
476
477 return offset + ics->hwirq_start;
478}
479
480void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
481{
482 struct wsp_ics *ics;
483
484 ics = wsp_ics_find_dn_ics(dn);
485 if (WARN_ON(!ics))
486 return;
487
488 spin_lock_irq(&ics->lock);
489 bitmap_release_region(ics->bitmap, irq, 0);
490 spin_unlock_irq(&ics->lock);
491}
492
493/* Initialisation */
494
495static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
496 struct device_node *dn)
497{
498 int len, i, j, size;
499 u32 start, count;
500 const u32 *p;
501
502 size = BITS_TO_LONGS(ics->count) * sizeof(long);
503 ics->bitmap = kzalloc(size, GFP_KERNEL);
504 if (!ics->bitmap) {
505 pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
506 return -ENOMEM;
507 }
508
509 spin_lock_init(&ics->lock);
510
511 p = of_get_property(dn, "available-ranges", &len);
512 if (!p || !len) {
513 /* FIXME this should be a WARN() once mambo is updated */
514 pr_err("wsp_ics: No available-ranges defined for %s\n",
515 dn->full_name);
516 return 0;
517 }
518
519 if (len % (2 * sizeof(u32)) != 0) {
520 /* FIXME this should be a WARN() once mambo is updated */
521 pr_err("wsp_ics: Invalid available-ranges for %s\n",
522 dn->full_name);
523 return 0;
524 }
525
526 bitmap_fill(ics->bitmap, ics->count);
527
528 for (i = 0; i < len / sizeof(u32); i += 2) {
529 start = of_read_number(p + i, 1);
530 count = of_read_number(p + i + 1, 1);
531
532 pr_devel("%s: start: %d count: %d\n", __func__, start, count);
533
534 if ((start + count) > (ics->hwirq_start + ics->count) ||
535 start < ics->hwirq_start) {
536 pr_err("wsp_ics: Invalid range! -> %d to %d\n",
537 start, start + count);
538 break;
539 }
540
541 for (j = 0; j < count; j++)
542 bitmap_release_region(ics->bitmap,
543 (start + j) - ics->hwirq_start, 0);
544 }
545
546 /* Ensure LSIs are not available for allocation */
547 bitmap_allocate_region(ics->bitmap, ics->lsi_base,
548 get_count_order(ics->lsi_count));
549
550 return 0;
551}
552
553static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
554{
555 u32 lsi_buid, msi_buid, msi_base, msi_count;
556 void __iomem *regs;
557 const u32 *p;
558 int rc, len, i;
559 u64 caps, buid;
560
561 p = of_get_property(dn, "interrupt-ranges", &len);
562 if (!p || len < (2 * sizeof(u32))) {
563 pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
564 dn->full_name);
565 return -ENOENT;
566 }
567
568 if (len > (2 * sizeof(u32))) {
569 pr_err("wsp_ics: Multiple ics ranges not supported.\n");
570 return -EINVAL;
571 }
572
573 regs = of_iomap(dn, 0);
574 if (!regs) {
575 pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
576 return -ENXIO;
577 }
578
579 ics->hwirq_start = of_read_number(p, 1);
580 ics->count = of_read_number(p + 1, 1);
581 ics->regs = regs;
582
583 ics->chip_id = wsp_get_chip_id(dn);
584 if (WARN_ON(ics->chip_id < 0))
585 ics->chip_id = 0;
586
587 /* Get some informations about the critter */
588 caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
589 buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
590 ics->lsi_count = caps >> 56;
591 msi_count = (caps >> 44) & 0x7ff;
592
593 /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
594 * rest is mixed in the interrupt number. We store the whole
595 * thing though
596 */
597 lsi_buid = (buid >> 48) & 0x1ff;
598 ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
599 msi_buid = (buid >> 37) & 0x7;
600 msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
601
602 pr_info("wsp_ics: Found %s\n", dn->full_name);
603 pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
604 ics->hwirq_start, ics->hwirq_start + ics->count - 1);
605 pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
606 ics->lsi_count, ics->lsi_base,
607 ics->lsi_base + ics->lsi_count - 1);
608 pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
609 msi_count, msi_base,
610 msi_base + msi_count - 1);
611
612 /* Let's check the HW config is sane */
613 if (ics->lsi_base < ics->hwirq_start ||
614 (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
615 pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
616 if (msi_base < ics->hwirq_start ||
617 (msi_base + msi_count) > (ics->hwirq_start + ics->count))
618 pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
619
620 /* We don't check for overlap between LSI and MSI, which will happen
621 * if we use the same BUID, I'm not sure yet how legit that is.
622 */
623
624 rc = wsp_ics_bitmap_setup(ics, dn);
625 if (rc) {
626 iounmap(regs);
627 return rc;
628 }
629
630 ics->dn = of_node_get(dn);
631 alloc_irq_map(ics);
632
633 for(i = 0; i < ics->count; i++)
634 wsp_mask_real_irq(ics->hwirq_start + i, ics);
635
636 ics->ics.map = wsp_ics_map;
637 ics->ics.mask_unknown = wsp_ics_mask_unknown;
638 ics->ics.get_server = wsp_ics_get_server;
639 ics->ics.host_match = wsp_ics_host_match;
640
641 xics_register_ics(&ics->ics);
642
643 return 0;
644}
645
646static void __init wsp_ics_set_default_server(void)
647{
648 struct device_node *np;
649 u32 hwid;
650
651 /* Find the server number for the boot cpu. */
652 np = of_get_cpu_node(boot_cpuid, NULL);
653 BUG_ON(!np);
654
655 hwid = get_hard_smp_processor_id(boot_cpuid);
656
657 pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
658 xics_default_server = hwid;
659
660 of_node_put(np);
661}
662
663static int __init wsp_ics_init(void)
664{
665 struct device_node *dn;
666 struct wsp_ics *ics;
667 int rc, found;
668
669 wsp_ics_set_default_server();
670
671 found = 0;
672 for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
673 found++;
674
675 if (found == 0) {
676 pr_err("wsp_ics: No ICS's found!\n");
677 return -ENODEV;
678 }
679
680 ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
681 if (!ics_list) {
682 pr_err("wsp_ics: No memory for structs.\n");
683 return -ENOMEM;
684 }
685
686 num_ics = 0;
687 ics = ics_list;
688 for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
689 rc = wsp_ics_setup(ics, dn);
690 if (rc == 0) {
691 ics++;
692 num_ics++;
693 }
694 }
695
696 if (found != num_ics) {
697 pr_err("wsp_ics: Failed setting up %d ICS's\n",
698 found - num_ics);
699 return -1;
700 }
701
702 return 0;
703}
704
705void __init wsp_init_irq(void)
706{
707 wsp_ics_init();
708 xics_init();
709
710 /* We need to patch our irq chip's EOI to point to the right ICP */
711 wsp_irq_chip.irq_eoi = icp_ops->eoi;
712}
diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h
new file mode 100644
index 000000000000..e34d53102640
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/ics.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright 2009 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __ICS_H
11#define __ICS_H
12
13#define XIVE_ADDR_MASK 0x7FFULL
14
15extern void wsp_init_irq(void);
16
17extern int wsp_ics_alloc_irq(struct device_node *dn, int num);
18extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq);
19
20#endif /* __ICS_H */
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
new file mode 100644
index 000000000000..be05631a3c1c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -0,0 +1,332 @@
1/*
2 * IBM Onboard Peripheral Bus Interrupt Controller
3 *
4 * Copyright 2010 Jack Miller, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/of.h>
16#include <linux/slab.h>
17#include <linux/time.h>
18
19#include <asm/reg_a2.h>
20#include <asm/irq.h>
21
22#define OPB_NR_IRQS 32
23
24#define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */
25#define OPB_MLSIR 0x50 /* MLS Interrupt Register */
26#define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */
27#define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */
28#define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */
29
30static int opb_index = 0;
31
32struct opb_pic {
33 struct irq_host *host;
34 void *regs;
35 int index;
36 spinlock_t lock;
37};
38
39static u32 opb_in(struct opb_pic *opb, int offset)
40{
41 return in_be32(opb->regs + offset);
42}
43
44static void opb_out(struct opb_pic *opb, int offset, u32 val)
45{
46 out_be32(opb->regs + offset, val);
47}
48
49static void opb_unmask_irq(struct irq_data *d)
50{
51 struct opb_pic *opb;
52 unsigned long flags;
53 u32 ier, bitset;
54
55 opb = d->chip_data;
56 bitset = (1 << (31 - irqd_to_hwirq(d)));
57
58 spin_lock_irqsave(&opb->lock, flags);
59
60 ier = opb_in(opb, OPB_MLSIER);
61 opb_out(opb, OPB_MLSIER, ier | bitset);
62 ier = opb_in(opb, OPB_MLSIER);
63
64 spin_unlock_irqrestore(&opb->lock, flags);
65}
66
67static void opb_mask_irq(struct irq_data *d)
68{
69 struct opb_pic *opb;
70 unsigned long flags;
71 u32 ier, mask;
72
73 opb = d->chip_data;
74 mask = ~(1 << (31 - irqd_to_hwirq(d)));
75
76 spin_lock_irqsave(&opb->lock, flags);
77
78 ier = opb_in(opb, OPB_MLSIER);
79 opb_out(opb, OPB_MLSIER, ier & mask);
80 ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
81
82 spin_unlock_irqrestore(&opb->lock, flags);
83}
84
85static void opb_ack_irq(struct irq_data *d)
86{
87 struct opb_pic *opb;
88 unsigned long flags;
89 u32 bitset;
90
91 opb = d->chip_data;
92 bitset = (1 << (31 - irqd_to_hwirq(d)));
93
94 spin_lock_irqsave(&opb->lock, flags);
95
96 opb_out(opb, OPB_MLSIR, bitset);
97 opb_in(opb, OPB_MLSIR); // Flush posted writes
98
99 spin_unlock_irqrestore(&opb->lock, flags);
100}
101
102static void opb_mask_ack_irq(struct irq_data *d)
103{
104 struct opb_pic *opb;
105 unsigned long flags;
106 u32 bitset;
107 u32 ier, ir;
108
109 opb = d->chip_data;
110 bitset = (1 << (31 - irqd_to_hwirq(d)));
111
112 spin_lock_irqsave(&opb->lock, flags);
113
114 ier = opb_in(opb, OPB_MLSIER);
115 opb_out(opb, OPB_MLSIER, ier & ~bitset);
116 ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
117
118 opb_out(opb, OPB_MLSIR, bitset);
119 ir = opb_in(opb, OPB_MLSIR); // Flush posted writes
120
121 spin_unlock_irqrestore(&opb->lock, flags);
122}
123
124static int opb_set_irq_type(struct irq_data *d, unsigned int flow)
125{
126 struct opb_pic *opb;
127 unsigned long flags;
128 int invert, ipr, mask, bit;
129
130 opb = d->chip_data;
131
132 /* The only information we're interested in in the type is whether it's
133 * a high or low trigger. For high triggered interrupts, the polarity
134 * set for it in the MLS Interrupt Polarity Register is 0, for low
135 * interrupts it's 1 so that the proper input in the MLS Interrupt Input
136 * Register is interrupted as asserting the interrupt. */
137
138 switch (flow) {
139 case IRQ_TYPE_NONE:
140 opb_mask_irq(d);
141 return 0;
142
143 case IRQ_TYPE_LEVEL_HIGH:
144 invert = 0;
145 break;
146
147 case IRQ_TYPE_LEVEL_LOW:
148 invert = 1;
149 break;
150
151 default:
152 return -EINVAL;
153 }
154
155 bit = (1 << (31 - irqd_to_hwirq(d)));
156 mask = ~bit;
157
158 spin_lock_irqsave(&opb->lock, flags);
159
160 ipr = opb_in(opb, OPB_MLSIPR);
161 ipr = (ipr & mask) | (invert ? bit : 0);
162 opb_out(opb, OPB_MLSIPR, ipr);
163 ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes
164
165 spin_unlock_irqrestore(&opb->lock, flags);
166
167 /* Record the type in the interrupt descriptor */
168 irqd_set_trigger_type(d, flow);
169
170 return 0;
171}
172
173static struct irq_chip opb_irq_chip = {
174 .name = "OPB",
175 .irq_mask = opb_mask_irq,
176 .irq_unmask = opb_unmask_irq,
177 .irq_mask_ack = opb_mask_ack_irq,
178 .irq_ack = opb_ack_irq,
179 .irq_set_type = opb_set_irq_type
180};
181
182static int opb_host_map(struct irq_host *host, unsigned int virq,
183 irq_hw_number_t hwirq)
184{
185 struct opb_pic *opb;
186
187 opb = host->host_data;
188
189 /* Most of the important stuff is handled by the generic host code, like
190 * the lookup, so just attach some info to the virtual irq */
191
192 irq_set_chip_data(virq, opb);
193 irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq);
194 irq_set_irq_type(virq, IRQ_TYPE_NONE);
195
196 return 0;
197}
198
199static int opb_host_xlate(struct irq_host *host, struct device_node *dn,
200 const u32 *intspec, unsigned int intsize,
201 irq_hw_number_t *out_hwirq, unsigned int *out_type)
202{
203 /* Interrupt size must == 2 */
204 BUG_ON(intsize != 2);
205 *out_hwirq = intspec[0];
206 *out_type = intspec[1];
207 return 0;
208}
209
210static struct irq_host_ops opb_host_ops = {
211 .map = opb_host_map,
212 .xlate = opb_host_xlate,
213};
214
215irqreturn_t opb_irq_handler(int irq, void *private)
216{
217 struct opb_pic *opb;
218 u32 ir, src, subvirq;
219
220 opb = (struct opb_pic *) private;
221
222 /* Read the OPB MLS Interrupt Register for
223 * asserted interrupts */
224 ir = opb_in(opb, OPB_MLSIR);
225 if (!ir)
226 return IRQ_NONE;
227
228 do {
229 /* Get 1 - 32 source, *NOT* bit */
230 src = 32 - ffs(ir);
231
232 /* Translate from the OPB's conception of interrupt number to
233 * Linux's virtual IRQ */
234
235 subvirq = irq_linear_revmap(opb->host, src);
236
237 generic_handle_irq(subvirq);
238 } while ((ir = opb_in(opb, OPB_MLSIR)));
239
240 return IRQ_HANDLED;
241}
242
243struct opb_pic *opb_pic_init_one(struct device_node *dn)
244{
245 struct opb_pic *opb;
246 struct resource res;
247
248 if (of_address_to_resource(dn, 0, &res)) {
249 printk(KERN_ERR "opb: Couldn't translate resource\n");
250 return NULL;
251 }
252
253 opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL);
254 if (!opb) {
255 printk(KERN_ERR "opb: Failed to allocate opb struct!\n");
256 return NULL;
257 }
258
259 /* Get access to the OPB MMIO registers */
260 opb->regs = ioremap(res.start + 0x10000, 0x1000);
261 if (!opb->regs) {
262 printk(KERN_ERR "opb: Failed to allocate register space!\n");
263 goto free_opb;
264 }
265
266 /* Allocate an irq host so that Linux knows that despite only
267 * having one interrupt to issue, we're the controller for multiple
268 * hardware IRQs, so later we can lookup their virtual IRQs. */
269
270 opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR,
271 OPB_NR_IRQS, &opb_host_ops, -1);
272
273 if (!opb->host) {
274 printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
275 goto free_regs;
276 }
277
278 opb->index = opb_index++;
279 spin_lock_init(&opb->lock);
280 opb->host->host_data = opb;
281
282 /* Disable all interrupts by default */
283 opb_out(opb, OPB_MLSASIER, 0);
284 opb_out(opb, OPB_MLSIER, 0);
285
286 /* ACK any interrupts left by FW */
287 opb_out(opb, OPB_MLSIR, 0xFFFFFFFF);
288
289 return opb;
290
291free_regs:
292 iounmap(opb->regs);
293free_opb:
294 kfree(opb);
295 return NULL;
296}
297
298void __init opb_pic_init(void)
299{
300 struct device_node *dn;
301 struct opb_pic *opb;
302 int virq;
303 int rc;
304
305 /* Call init_one for each OPB device */
306 for_each_compatible_node(dn, NULL, "ibm,opb") {
307
308 /* Fill in an OPB struct */
309 opb = opb_pic_init_one(dn);
310 if (!opb) {
311 printk(KERN_WARNING "opb: Failed to init node, skipped!\n");
312 continue;
313 }
314
315 /* Map / get opb's hardware virtual irq */
316 virq = irq_of_parse_and_map(dn, 0);
317 if (virq <= 0) {
318 printk("opb: irq_op_parse_and_map failed!\n");
319 continue;
320 }
321
322 /* Attach opb interrupt handler to new virtual IRQ */
323 rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb);
324 if (rc) {
325 printk("opb: request_irq failed: %d\n", rc);
326 continue;
327 }
328
329 printk("OPB%d init with %d IRQs at %p\n", opb->index,
330 OPB_NR_IRQS, opb->regs);
331 }
332}
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
new file mode 100644
index 000000000000..40f28916ff6c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/psr2.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/of.h>
16#include <linux/smp.h>
17
18#include <asm/machdep.h>
19#include <asm/system.h>
20#include <asm/time.h>
21#include <asm/udbg.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26
27static void psr2_spin(void)
28{
29 hard_irq_disable();
30 for (;;) ;
31}
32
33static void psr2_restart(char *cmd)
34{
35 psr2_spin();
36}
37
38static int psr2_probe_devices(void)
39{
40 struct device_node *np;
41
42 /* Our RTC is a ds1500. It seems to be programatically compatible
43 * with the ds1511 for which we have a driver so let's use that
44 */
45 np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
46 if (np != NULL) {
47 struct resource res;
48 if (of_address_to_resource(np, 0, &res) == 0)
49 platform_device_register_simple("ds1511", 0, &res, 1);
50 }
51 return 0;
52}
53machine_arch_initcall(psr2_md, psr2_probe_devices);
54
55static void __init psr2_setup_arch(void)
56{
57 /* init to some ~sane value until calibrate_delay() runs */
58 loops_per_jiffy = 50000000;
59
60 scom_init_wsp();
61
62 /* Setup SMP callback */
63#ifdef CONFIG_SMP
64 a2_setup_smp();
65#endif
66}
67
68static int __init psr2_probe(void)
69{
70 unsigned long root = of_get_flat_dt_root();
71
72 if (!of_flat_dt_is_compatible(root, "ibm,psr2"))
73 return 0;
74
75 return 1;
76}
77
78static void __init psr2_init_irq(void)
79{
80 wsp_init_irq();
81 opb_pic_init();
82}
83
84define_machine(psr2_md) {
85 .name = "PSR2 A2",
86 .probe = psr2_probe,
87 .setup_arch = psr2_setup_arch,
88 .restart = psr2_restart,
89 .power_off = psr2_spin,
90 .halt = psr2_spin,
91 .calibrate_decr = generic_calibrate_decr,
92 .init_IRQ = psr2_init_irq,
93 .progress = udbg_progress,
94 .power_save = book3e_idle,
95};
diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c
new file mode 100644
index 000000000000..141e78032097
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/scom_smp.c
@@ -0,0 +1,427 @@
1/*
2 * SCOM support for A2 platforms
3 *
4 * Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson,
5 * Michael Ellerman, IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/cpumask.h>
14#include <linux/io.h>
15#include <linux/of.h>
16#include <linux/spinlock.h>
17#include <linux/types.h>
18
19#include <asm/cputhreads.h>
20#include <asm/reg_a2.h>
21#include <asm/scom.h>
22#include <asm/udbg.h>
23
24#include "wsp.h"
25
26#define SCOM_RAMC 0x2a /* Ram Command */
27#define SCOM_RAMC_TGT1_EXT 0x80000000
28#define SCOM_RAMC_SRC1_EXT 0x40000000
29#define SCOM_RAMC_SRC2_EXT 0x20000000
30#define SCOM_RAMC_SRC3_EXT 0x10000000
31#define SCOM_RAMC_ENABLE 0x00080000
32#define SCOM_RAMC_THREADSEL 0x00060000
33#define SCOM_RAMC_EXECUTE 0x00010000
34#define SCOM_RAMC_MSR_OVERRIDE 0x00008000
35#define SCOM_RAMC_MSR_PR 0x00004000
36#define SCOM_RAMC_MSR_GS 0x00002000
37#define SCOM_RAMC_FORCE 0x00001000
38#define SCOM_RAMC_FLUSH 0x00000800
39#define SCOM_RAMC_INTERRUPT 0x00000004
40#define SCOM_RAMC_ERROR 0x00000002
41#define SCOM_RAMC_DONE 0x00000001
42#define SCOM_RAMI 0x29 /* Ram Instruction */
43#define SCOM_RAMIC 0x28 /* Ram Instruction and Command */
44#define SCOM_RAMIC_INSN 0xffffffff00000000
45#define SCOM_RAMD 0x2d /* Ram Data */
46#define SCOM_RAMDH 0x2e /* Ram Data High */
47#define SCOM_RAMDL 0x2f /* Ram Data Low */
48#define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */
49#define SCOM_PCCR0_ENABLE_DEBUG 0x80000000
50#define SCOM_PCCR0_ENABLE_RAM 0x40000000
51#define SCOM_THRCTL 0x30 /* Thread Control and Status */
52#define SCOM_THRCTL_T0_STOP 0x80000000
53#define SCOM_THRCTL_T1_STOP 0x40000000
54#define SCOM_THRCTL_T2_STOP 0x20000000
55#define SCOM_THRCTL_T3_STOP 0x10000000
56#define SCOM_THRCTL_T0_STEP 0x08000000
57#define SCOM_THRCTL_T1_STEP 0x04000000
58#define SCOM_THRCTL_T2_STEP 0x02000000
59#define SCOM_THRCTL_T3_STEP 0x01000000
60#define SCOM_THRCTL_T0_RUN 0x00800000
61#define SCOM_THRCTL_T1_RUN 0x00400000
62#define SCOM_THRCTL_T2_RUN 0x00200000
63#define SCOM_THRCTL_T3_RUN 0x00100000
64#define SCOM_THRCTL_T0_PM 0x00080000
65#define SCOM_THRCTL_T1_PM 0x00040000
66#define SCOM_THRCTL_T2_PM 0x00020000
67#define SCOM_THRCTL_T3_PM 0x00010000
68#define SCOM_THRCTL_T0_UDE 0x00008000
69#define SCOM_THRCTL_T1_UDE 0x00004000
70#define SCOM_THRCTL_T2_UDE 0x00002000
71#define SCOM_THRCTL_T3_UDE 0x00001000
72#define SCOM_THRCTL_ASYNC_DIS 0x00000800
73#define SCOM_THRCTL_TB_DIS 0x00000400
74#define SCOM_THRCTL_DEC_DIS 0x00000200
75#define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */
76#define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */
77
78
79static DEFINE_PER_CPU(scom_map_t, scom_ptrs);
80
81static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread)
82{
83 scom_map_t scom = per_cpu(scom_ptrs, cpu);
84 int tcpu;
85
86 if (scom_map_ok(scom)) {
87 *first_thread = 0;
88 return scom;
89 }
90
91 *first_thread = 1;
92
93 scom = scom_map_device(np, 0);
94
95 for (tcpu = cpu_first_thread_sibling(cpu);
96 tcpu <= cpu_last_thread_sibling(cpu); tcpu++)
97 per_cpu(scom_ptrs, tcpu) = scom;
98
99 /* Hack: for the boot core, this will actually get called on
100 * the second thread up, not the first so our test above will
101 * set first_thread incorrectly. */
102 if (cpu_first_thread_sibling(cpu) == 0)
103 *first_thread = 0;
104
105 return scom;
106}
107
108static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask)
109{
110 u64 cmd, mask, val;
111 int n = 0;
112
113 cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28)
114 | ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE;
115 mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR;
116
117 scom_write(scom, SCOM_RAMIC, cmd);
118
119 while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) {
120 pr_devel("Waiting on RAMC = 0x%llx\n", val);
121 if (++n == 3) {
122 pr_err("RAMC timeout on instruction 0x%08x, thread %d\n",
123 insn, thread);
124 return -1;
125 }
126 }
127
128 if (val & SCOM_RAMC_INTERRUPT) {
129 pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n",
130 insn, thread);
131 return -SCOM_RAMC_INTERRUPT;
132 }
133
134 if (val & SCOM_RAMC_ERROR) {
135 pr_err("RAMC error on instruction 0x%08x, thread %d\n",
136 insn, thread);
137 return -SCOM_RAMC_ERROR;
138 }
139
140 return 0;
141}
142
143static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt,
144 u64 *out_gpr)
145{
146 int rc;
147
148 /* or rN, rN, rN */
149 u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11);
150 rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0);
151 if (rc)
152 return rc;
153
154 *out_gpr = scom_read(scom, SCOM_RAMD);
155
156 return 0;
157}
158
159static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr)
160{
161 int rc, sprhi, sprlo;
162 u32 insn;
163
164 sprhi = spr >> 5;
165 sprlo = spr & 0x1f;
166 insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */
167
168 if (spr == 0x0ff0)
169 insn = 0x7c2000a6; /* mfmsr r1 */
170
171 rc = a2_scom_ram(scom, thread, insn, 0xf);
172 if (rc)
173 return rc;
174 return a2_scom_getgpr(scom, thread, 1, 1, out_spr);
175}
176
177static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr,
178 int alt, u64 val)
179{
180 u32 lis = 0x3c000000 | (gpr << 21);
181 u32 li = 0x38000000 | (gpr << 21);
182 u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16);
183 u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16);
184 u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16);
185 u32 highest = val >> 48;
186 u32 higher = (val >> 32) & 0xffff;
187 u32 high = (val >> 16) & 0xffff;
188 u32 low = val & 0xffff;
189 int lext = alt ? 0x8 : 0x0;
190 int oext = alt ? 0xf : 0x0;
191 int rc = 0;
192
193 if (highest)
194 rc |= a2_scom_ram(scom, thread, lis | highest, lext);
195
196 if (higher) {
197 if (highest)
198 rc |= a2_scom_ram(scom, thread, oris | higher, oext);
199 else
200 rc |= a2_scom_ram(scom, thread, li | higher, lext);
201 }
202
203 if (highest || higher)
204 rc |= a2_scom_ram(scom, thread, rldicr32, oext);
205
206 if (high) {
207 if (highest || higher)
208 rc |= a2_scom_ram(scom, thread, oris | high, oext);
209 else
210 rc |= a2_scom_ram(scom, thread, lis | high, lext);
211 }
212
213 if (highest || higher || high)
214 rc |= a2_scom_ram(scom, thread, ori | low, oext);
215 else
216 rc |= a2_scom_ram(scom, thread, li | low, lext);
217
218 return rc;
219}
220
221static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val)
222{
223 int sprhi = spr >> 5;
224 int sprlo = spr & 0x1f;
225 /* mtspr spr, r1 */
226 u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11);
227
228 if (spr == 0x0ff0)
229 insn = 0x7c200124; /* mtmsr r1 */
230
231 if (a2_scom_setgpr(scom, thread, 1, 1, val))
232 return -1;
233
234 return a2_scom_ram(scom, thread, insn, 0xf);
235}
236
237static int a2_scom_initial_tlb(scom_map_t scom, int thread)
238{
239 extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[];
240 extern u32 a2_tlbinit_after_iprot_flush[];
241 extern u32 a2_tlbinit_after_linear_map[];
242 u32 assoc, entries, i;
243 u64 epn, tlbcfg;
244 u32 *p;
245 int rc;
246
247 /* Invalidate all entries (including iprot) */
248
249 rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg);
250 if (rc)
251 goto scom_fail;
252 entries = tlbcfg & TLBnCFG_N_ENTRY;
253 assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24;
254 epn = 0;
255
256 /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
257 a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531);
258 /* Set MMUCR3 to write all thids bit to the TLB */
259 a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f);
260
261 /* Set MAS1 for 1G page size, and MAS2 to our initial EPN */
262 a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB));
263 a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
264 for (i = 0; i < entries; i++) {
265
266 a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc));
267
268 /* tlbwe */
269 rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0);
270 if (rc)
271 goto scom_fail;
272
273 /* Next entry is new address? */
274 if((i + 1) % assoc == 0) {
275 epn += (1 << 30);
276 a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
277 }
278 }
279
280 /* Setup args for linear mapping */
281 rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0));
282 if (rc)
283 goto scom_fail;
284
285 /* Linear mapping */
286 for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) {
287 rc = a2_scom_ram(scom, thread, *p, 0);
288 if (rc)
289 goto scom_fail;
290 }
291
292 /*
293 * For the boot thread, between the linear mapping and the debug
294 * mappings there is a loop to flush iprot mappings. Ramming doesn't do
295 * branches, but the secondary threads don't need to be nearly as smart
296 * (i.e. we don't need to worry about invalidating the mapping we're
297 * standing on).
298 */
299
300 /* Debug mappings. Expects r11 = MAS0 from linear map (set above) */
301 for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) {
302 rc = a2_scom_ram(scom, thread, *p, 0);
303 if (rc)
304 goto scom_fail;
305 }
306
307scom_fail:
308 if (rc)
309 pr_err("Setting up initial TLB failed, err %d\n", rc);
310
311 if (rc == -SCOM_RAMC_INTERRUPT) {
312 /* Interrupt, dump some status */
313 int rc[10];
314 u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2;
315 rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar);
316 rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0);
317 rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1);
318 rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr);
319 rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0);
320 rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1);
321 rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2);
322 rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3);
323 rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8);
324 rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2);
325 pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]);
326 pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]);
327 pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]);
328 pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]);
329 pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]);
330 pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]);
331 pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]);
332 pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]);
333 pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]);
334 pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]);
335 }
336
337 return rc;
338}
339
340int __devinit a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
341 struct device_node *np)
342{
343 u64 init_iar, init_msr, init_ccr2;
344 unsigned long start_here;
345 int rc, core_setup;
346 scom_map_t scom;
347 u64 pccr0;
348
349 scom = get_scom(lcpu, np, &core_setup);
350 if (!scom) {
351 printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu);
352 return -1;
353 }
354
355 pr_devel("Bringing up CPU%d using SCOM...\n", lcpu);
356
357 pccr0 = scom_read(scom, SCOM_PCCR0);
358 scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG |
359 SCOM_PCCR0_ENABLE_RAM);
360
361 /* Stop the thead with THRCTL. If we are setting up the TLB we stop all
362 * threads. We also disable asynchronous interrupts while RAMing.
363 */
364 if (core_setup)
365 scom_write(scom, SCOM_THRCTL_OR,
366 SCOM_THRCTL_T0_STOP |
367 SCOM_THRCTL_T1_STOP |
368 SCOM_THRCTL_T2_STOP |
369 SCOM_THRCTL_T3_STOP |
370 SCOM_THRCTL_ASYNC_DIS);
371 else
372 scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx);
373
374 /* Flush its pipeline just in case */
375 scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) |
376 SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE);
377
378 a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar);
379 a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr);
380 a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2);
381
382 /* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */
383 rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM);
384 if (rc) {
385 pr_err("Failed to set MSR ! err %d\n", rc);
386 return rc;
387 }
388
389 /* RAM in an sync/isync for the sake of it */
390 a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0);
391 a2_scom_ram(scom, thr_idx, 0x4c00012c, 0);
392
393 if (core_setup) {
394 pr_devel("CPU%d is first thread in core, initializing TLB...\n",
395 lcpu);
396 rc = a2_scom_initial_tlb(scom, thr_idx);
397 if (rc)
398 goto fail;
399 }
400
401 start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init
402 : generic_secondary_thread_init);
403 pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here);
404
405 rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here);
406 rc |= a2_scom_setgpr(scom, thr_idx, 3, 0,
407 get_hard_smp_processor_id(lcpu));
408 /*
409 * Tell book3e_secondary_core_init not to set up the TLB, we've
410 * already done that.
411 */
412 rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1);
413
414 rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx);
415
416 scom_write(scom, SCOM_RAMC, 0);
417 scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx));
418 scom_write(scom, SCOM_PCCR0, pccr0);
419fail:
420 pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded");
421 if (rc) {
422 pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n",
423 init_iar, init_msr, init_ccr2);
424 }
425
426 return rc;
427}
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c
new file mode 100644
index 000000000000..4052e2259f30
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/scom_wsp.c
@@ -0,0 +1,77 @@
1/*
2 * SCOM backend for WSP
3 *
4 * Copyright 2010 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/cpumask.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17
18#include <asm/cputhreads.h>
19#include <asm/reg_a2.h>
20#include <asm/scom.h>
21#include <asm/udbg.h>
22
23#include "wsp.h"
24
25
26static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count)
27{
28 struct resource r;
29 u64 xscom_addr;
30
31 if (!of_get_property(dev, "scom-controller", NULL)) {
32 pr_err("%s: device %s is not a SCOM controller\n",
33 __func__, dev->full_name);
34 return SCOM_MAP_INVALID;
35 }
36
37 if (of_address_to_resource(dev, 0, &r)) {
38 pr_debug("Failed to find SCOM controller address\n");
39 return 0;
40 }
41
42 /* Transform the SCOM address into an XSCOM offset */
43 xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3);
44
45 return (scom_map_t)ioremap(r.start + xscom_addr, count << 3);
46}
47
48static void wsp_scom_unmap(scom_map_t map)
49{
50 iounmap((void *)map);
51}
52
53static u64 wsp_scom_read(scom_map_t map, u32 reg)
54{
55 u64 __iomem *addr = (u64 __iomem *)map;
56
57 return in_be64(addr + reg);
58}
59
60static void wsp_scom_write(scom_map_t map, u32 reg, u64 value)
61{
62 u64 __iomem *addr = (u64 __iomem *)map;
63
64 return out_be64(addr + reg, value);
65}
66
67static const struct scom_controller wsp_scom_controller = {
68 .map = wsp_scom_map,
69 .unmap = wsp_scom_unmap,
70 .read = wsp_scom_read,
71 .write = wsp_scom_write
72};
73
74void scom_init_wsp(void)
75{
76 scom_init(&wsp_scom_controller);
77}
diff --git a/arch/powerpc/platforms/wsp/setup.c b/arch/powerpc/platforms/wsp/setup.c
new file mode 100644
index 000000000000..11ac2f05e01c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/setup.c
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2010 Michael Ellerman, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/of_platform.h>
12
13#include "wsp.h"
14
15/*
16 * Find chip-id by walking up device tree looking for ibm,wsp-chip-id property.
17 * Won't work for nodes that are not a descendant of a wsp node.
18 */
19int wsp_get_chip_id(struct device_node *dn)
20{
21 const u32 *p;
22 int rc;
23
24 /* Start looking at the specified node, not its parent */
25 dn = of_node_get(dn);
26 while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL)))
27 dn = of_get_next_parent(dn);
28
29 if (!dn)
30 return -1;
31
32 rc = *p;
33 of_node_put(dn);
34
35 return rc;
36}
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
new file mode 100644
index 000000000000..9d20fa9d3710
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/smp.c
@@ -0,0 +1,88 @@
1/*
2 * SMP Support for A2 platforms
3 *
4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/cpumask.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/of.h>
17#include <linux/smp.h>
18
19#include <asm/dbell.h>
20#include <asm/machdep.h>
21#include <asm/xics.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26static void __devinit smp_a2_setup_cpu(int cpu)
27{
28 doorbell_setup_this_cpu();
29
30 if (cpu != boot_cpuid)
31 xics_setup_cpu();
32}
33
34int __devinit smp_a2_kick_cpu(int nr)
35{
36 const char *enable_method;
37 struct device_node *np;
38 int thr_idx;
39
40 if (nr < 0 || nr >= NR_CPUS)
41 return -ENOENT;
42
43 np = of_get_cpu_node(nr, &thr_idx);
44 if (!np)
45 return -ENODEV;
46
47 enable_method = of_get_property(np, "enable-method", NULL);
48 pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method);
49
50 if (!enable_method) {
51 printk(KERN_ERR "CPU%d has no enable-method\n", nr);
52 return -ENOENT;
53 } else if (strcmp(enable_method, "ibm,a2-scom") == 0) {
54 if (a2_scom_startup_cpu(nr, thr_idx, np))
55 return -1;
56 } else {
57 printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n",
58 nr, enable_method);
59 return -EINVAL;
60 }
61
62 /*
63 * The processor is currently spinning, waiting for the
64 * cpu_start field to become non-zero After we set cpu_start,
65 * the processor will continue on to secondary_start
66 */
67 paca[nr].cpu_start = 1;
68
69 return 0;
70}
71
72static int __init smp_a2_probe(void)
73{
74 return cpus_weight(cpu_possible_map);
75}
76
77static struct smp_ops_t a2_smp_ops = {
78 .message_pass = smp_muxed_ipi_message_pass,
79 .cause_ipi = doorbell_cause_ipi,
80 .probe = smp_a2_probe,
81 .kick_cpu = smp_a2_kick_cpu,
82 .setup_cpu = smp_a2_setup_cpu,
83};
84
85void __init a2_setup_smp(void)
86{
87 smp_ops = &a2_smp_ops;
88}
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
new file mode 100644
index 000000000000..7c3e087fd2f2
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/wsp.h
@@ -0,0 +1,17 @@
1#ifndef __WSP_H
2#define __WSP_H
3
4#include <asm/wsp.h>
5
6extern void wsp_setup_pci(void);
7extern void scom_init_wsp(void);
8
9extern void a2_setup_smp(void);
10extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
11 struct device_node *np);
12int smp_a2_cpu_bootable(unsigned int nr);
13int __devinit smp_a2_kick_cpu(int nr);
14
15void opb_pic_init(void);
16
17#endif /* __WSP_H */
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 396582835cb5..d775fd148d13 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -12,3 +12,13 @@ config PPC_MSI_BITMAP
12 depends on PCI_MSI 12 depends on PCI_MSI
13 default y if MPIC 13 default y if MPIC
14 default y if FSL_PCI 14 default y if FSL_PCI
15
16source "arch/powerpc/sysdev/xics/Kconfig"
17
18config PPC_SCOM
19 bool
20
21config SCOM_DEBUGFS
22 bool "Expose SCOM controllers via debugfs"
23 depends on PPC_SCOM
24 default n
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 1e0c933ef772..6076e0074a87 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -57,3 +57,9 @@ obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o
57ifeq ($(CONFIG_SUSPEND),y) 57ifeq ($(CONFIG_SUSPEND),y)
58obj-$(CONFIG_6xx) += 6xx-suspend.o 58obj-$(CONFIG_6xx) += 6xx-suspend.o
59endif 59endif
60
61obj-$(CONFIG_PPC_SCOM) += scom.o
62
63subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
64
65obj-$(CONFIG_PPC_XICS) += xics/
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 1636dd896707..bd0d54060b94 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -216,7 +216,7 @@ static int axon_ram_probe(struct platform_device *device)
216 AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20); 216 AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);
217 217
218 bank->ph_addr = resource.start; 218 bank->ph_addr = resource.start;
219 bank->io_addr = (unsigned long) ioremap_flags( 219 bank->io_addr = (unsigned long) ioremap_prot(
220 bank->ph_addr, bank->size, _PAGE_NO_CACHE); 220 bank->ph_addr, bank->size, _PAGE_NO_CACHE);
221 if (bank->io_addr == 0) { 221 if (bank->io_addr == 0) {
222 dev_err(&device->dev, "ioremap() failed\n"); 222 dev_err(&device->dev, "ioremap() failed\n");
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index e0bc944eb23f..350787c83e22 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -58,21 +58,21 @@ static struct irq_host *cpm_pic_host;
58 58
59static void cpm_mask_irq(struct irq_data *d) 59static void cpm_mask_irq(struct irq_data *d)
60{ 60{
61 unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq; 61 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
62 62
63 clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); 63 clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
64} 64}
65 65
66static void cpm_unmask_irq(struct irq_data *d) 66static void cpm_unmask_irq(struct irq_data *d)
67{ 67{
68 unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq; 68 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
69 69
70 setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); 70 setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
71} 71}
72 72
73static void cpm_end_irq(struct irq_data *d) 73static void cpm_end_irq(struct irq_data *d)
74{ 74{
75 unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq; 75 unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
76 76
77 out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec)); 77 out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));
78} 78}
@@ -157,7 +157,7 @@ unsigned int cpm_pic_init(void)
157 goto end; 157 goto end;
158 158
159 /* Initialize the CPM interrupt controller. */ 159 /* Initialize the CPM interrupt controller. */
160 hwirq = (unsigned int)irq_map[sirq].hwirq; 160 hwirq = (unsigned int)virq_to_hw(sirq);
161 out_be32(&cpic_reg->cpic_cicr, 161 out_be32(&cpic_reg->cpic_cicr,
162 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | 162 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
163 ((hwirq/2) << 13) | CICR_HP_MASK); 163 ((hwirq/2) << 13) | CICR_HP_MASK);
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index 5495c1be472b..bcab50e2a9eb 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -81,7 +81,7 @@ static const u_char irq_to_siubit[] = {
81static void cpm2_mask_irq(struct irq_data *d) 81static void cpm2_mask_irq(struct irq_data *d)
82{ 82{
83 int bit, word; 83 int bit, word;
84 unsigned int irq_nr = virq_to_hw(d->irq); 84 unsigned int irq_nr = irqd_to_hwirq(d);
85 85
86 bit = irq_to_siubit[irq_nr]; 86 bit = irq_to_siubit[irq_nr];
87 word = irq_to_siureg[irq_nr]; 87 word = irq_to_siureg[irq_nr];
@@ -93,7 +93,7 @@ static void cpm2_mask_irq(struct irq_data *d)
93static void cpm2_unmask_irq(struct irq_data *d) 93static void cpm2_unmask_irq(struct irq_data *d)
94{ 94{
95 int bit, word; 95 int bit, word;
96 unsigned int irq_nr = virq_to_hw(d->irq); 96 unsigned int irq_nr = irqd_to_hwirq(d);
97 97
98 bit = irq_to_siubit[irq_nr]; 98 bit = irq_to_siubit[irq_nr];
99 word = irq_to_siureg[irq_nr]; 99 word = irq_to_siureg[irq_nr];
@@ -105,7 +105,7 @@ static void cpm2_unmask_irq(struct irq_data *d)
105static void cpm2_ack(struct irq_data *d) 105static void cpm2_ack(struct irq_data *d)
106{ 106{
107 int bit, word; 107 int bit, word;
108 unsigned int irq_nr = virq_to_hw(d->irq); 108 unsigned int irq_nr = irqd_to_hwirq(d);
109 109
110 bit = irq_to_siubit[irq_nr]; 110 bit = irq_to_siubit[irq_nr];
111 word = irq_to_siureg[irq_nr]; 111 word = irq_to_siureg[irq_nr];
@@ -116,7 +116,7 @@ static void cpm2_ack(struct irq_data *d)
116static void cpm2_end_irq(struct irq_data *d) 116static void cpm2_end_irq(struct irq_data *d)
117{ 117{
118 int bit, word; 118 int bit, word;
119 unsigned int irq_nr = virq_to_hw(d->irq); 119 unsigned int irq_nr = irqd_to_hwirq(d);
120 120
121 bit = irq_to_siubit[irq_nr]; 121 bit = irq_to_siubit[irq_nr];
122 word = irq_to_siureg[irq_nr]; 122 word = irq_to_siureg[irq_nr];
@@ -133,7 +133,7 @@ static void cpm2_end_irq(struct irq_data *d)
133 133
134static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) 134static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
135{ 135{
136 unsigned int src = virq_to_hw(d->irq); 136 unsigned int src = irqd_to_hwirq(d);
137 unsigned int vold, vnew, edibit; 137 unsigned int vold, vnew, edibit;
138 138
139 /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or 139 /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
index 54fb1922fe30..116415899176 100644
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
@@ -106,10 +106,10 @@ int __init instantiate_cache_sram(struct platform_device *dev,
106 goto out_free; 106 goto out_free;
107 } 107 }
108 108
109 cache_sram->base_virt = ioremap_flags(cache_sram->base_phys, 109 cache_sram->base_virt = ioremap_prot(cache_sram->base_phys,
110 cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); 110 cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL);
111 if (!cache_sram->base_virt) { 111 if (!cache_sram->base_virt) {
112 dev_err(&dev->dev, "%s: ioremap_flags failed\n", 112 dev_err(&dev->dev, "%s: ioremap_prot failed\n",
113 dev->dev.of_node->full_name); 113 dev->dev.of_node->full_name);
114 ret = -ENOMEM; 114 ret = -ENOMEM;
115 goto out_release; 115 goto out_release;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 01cd2f089512..92e78333c47c 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -110,7 +110,7 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
110 list_for_each_entry(entry, &pdev->msi_list, list) { 110 list_for_each_entry(entry, &pdev->msi_list, list) {
111 if (entry->irq == NO_IRQ) 111 if (entry->irq == NO_IRQ)
112 continue; 112 continue;
113 msi_data = irq_get_handler_data(entry->irq); 113 msi_data = irq_get_chip_data(entry->irq);
114 irq_set_msi_desc(entry->irq, NULL); 114 irq_set_msi_desc(entry->irq, NULL);
115 msi_bitmap_free_hwirqs(&msi_data->bitmap, 115 msi_bitmap_free_hwirqs(&msi_data->bitmap,
116 virq_to_hw(entry->irq), 1); 116 virq_to_hw(entry->irq), 1);
@@ -168,7 +168,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
168 rc = -ENOSPC; 168 rc = -ENOSPC;
169 goto out_free; 169 goto out_free;
170 } 170 }
171 irq_set_handler_data(virq, msi_data); 171 /* chip_data is msi_data via host->hostdata in host->map() */
172 irq_set_msi_desc(virq, entry); 172 irq_set_msi_desc(virq, entry);
173 173
174 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); 174 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
@@ -193,7 +193,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
193 u32 have_shift = 0; 193 u32 have_shift = 0;
194 struct fsl_msi_cascade_data *cascade_data; 194 struct fsl_msi_cascade_data *cascade_data;
195 195
196 cascade_data = (struct fsl_msi_cascade_data *)irq_get_handler_data(irq); 196 cascade_data = irq_get_handler_data(irq);
197 msi_data = cascade_data->msi_data; 197 msi_data = cascade_data->msi_data;
198 198
199 raw_spin_lock(&desc->lock); 199 raw_spin_lock(&desc->lock);
@@ -253,7 +253,7 @@ unlock:
253 253
254static int fsl_of_msi_remove(struct platform_device *ofdev) 254static int fsl_of_msi_remove(struct platform_device *ofdev)
255{ 255{
256 struct fsl_msi *msi = ofdev->dev.platform_data; 256 struct fsl_msi *msi = platform_get_drvdata(ofdev);
257 int virq, i; 257 int virq, i;
258 struct fsl_msi_cascade_data *cascade_data; 258 struct fsl_msi_cascade_data *cascade_data;
259 259
@@ -330,7 +330,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
330 dev_err(&dev->dev, "No memory for MSI structure\n"); 330 dev_err(&dev->dev, "No memory for MSI structure\n");
331 return -ENOMEM; 331 return -ENOMEM;
332 } 332 }
333 dev->dev.platform_data = msi; 333 platform_set_drvdata(dev, msi);
334 334
335 msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR, 335 msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR,
336 NR_MSI_IRQS, &fsl_msi_host_ops, 0); 336 NR_MSI_IRQS, &fsl_msi_host_ops, 0);
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 142770cb84b6..d18bb27e4df9 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -185,18 +185,6 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,
185 return 0; 185 return 0;
186} 186}
187 187
188static void i8259_host_unmap(struct irq_host *h, unsigned int virq)
189{
190 /* Make sure irq is masked in hardware */
191 i8259_mask_irq(irq_get_irq_data(virq));
192
193 /* remove chip and handler */
194 irq_set_chip_and_handler(virq, NULL, NULL);
195
196 /* Make sure it's completed */
197 synchronize_irq(virq);
198}
199
200static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, 188static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
201 const u32 *intspec, unsigned int intsize, 189 const u32 *intspec, unsigned int intsize,
202 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 190 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@@ -220,7 +208,6 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
220static struct irq_host_ops i8259_host_ops = { 208static struct irq_host_ops i8259_host_ops = {
221 .match = i8259_host_match, 209 .match = i8259_host_match,
222 .map = i8259_host_map, 210 .map = i8259_host_map,
223 .unmap = i8259_host_unmap,
224 .xlate = i8259_host_xlate, 211 .xlate = i8259_host_xlate,
225}; 212};
226 213
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 596554a8725e..7367d17364cb 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -521,12 +521,10 @@ static inline struct ipic * ipic_from_irq(unsigned int virq)
521 return primary_ipic; 521 return primary_ipic;
522} 522}
523 523
524#define ipic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
525
526static void ipic_unmask_irq(struct irq_data *d) 524static void ipic_unmask_irq(struct irq_data *d)
527{ 525{
528 struct ipic *ipic = ipic_from_irq(d->irq); 526 struct ipic *ipic = ipic_from_irq(d->irq);
529 unsigned int src = ipic_irq_to_hw(d->irq); 527 unsigned int src = irqd_to_hwirq(d);
530 unsigned long flags; 528 unsigned long flags;
531 u32 temp; 529 u32 temp;
532 530
@@ -542,7 +540,7 @@ static void ipic_unmask_irq(struct irq_data *d)
542static void ipic_mask_irq(struct irq_data *d) 540static void ipic_mask_irq(struct irq_data *d)
543{ 541{
544 struct ipic *ipic = ipic_from_irq(d->irq); 542 struct ipic *ipic = ipic_from_irq(d->irq);
545 unsigned int src = ipic_irq_to_hw(d->irq); 543 unsigned int src = irqd_to_hwirq(d);
546 unsigned long flags; 544 unsigned long flags;
547 u32 temp; 545 u32 temp;
548 546
@@ -562,7 +560,7 @@ static void ipic_mask_irq(struct irq_data *d)
562static void ipic_ack_irq(struct irq_data *d) 560static void ipic_ack_irq(struct irq_data *d)
563{ 561{
564 struct ipic *ipic = ipic_from_irq(d->irq); 562 struct ipic *ipic = ipic_from_irq(d->irq);
565 unsigned int src = ipic_irq_to_hw(d->irq); 563 unsigned int src = irqd_to_hwirq(d);
566 unsigned long flags; 564 unsigned long flags;
567 u32 temp; 565 u32 temp;
568 566
@@ -581,7 +579,7 @@ static void ipic_ack_irq(struct irq_data *d)
581static void ipic_mask_irq_and_ack(struct irq_data *d) 579static void ipic_mask_irq_and_ack(struct irq_data *d)
582{ 580{
583 struct ipic *ipic = ipic_from_irq(d->irq); 581 struct ipic *ipic = ipic_from_irq(d->irq);
584 unsigned int src = ipic_irq_to_hw(d->irq); 582 unsigned int src = irqd_to_hwirq(d);
585 unsigned long flags; 583 unsigned long flags;
586 u32 temp; 584 u32 temp;
587 585
@@ -604,7 +602,7 @@ static void ipic_mask_irq_and_ack(struct irq_data *d)
604static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) 602static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
605{ 603{
606 struct ipic *ipic = ipic_from_irq(d->irq); 604 struct ipic *ipic = ipic_from_irq(d->irq);
607 unsigned int src = ipic_irq_to_hw(d->irq); 605 unsigned int src = irqd_to_hwirq(d);
608 unsigned int vold, vnew, edibit; 606 unsigned int vold, vnew, edibit;
609 607
610 if (flow_type == IRQ_TYPE_NONE) 608 if (flow_type == IRQ_TYPE_NONE)
@@ -793,7 +791,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
793int ipic_set_priority(unsigned int virq, unsigned int priority) 791int ipic_set_priority(unsigned int virq, unsigned int priority)
794{ 792{
795 struct ipic *ipic = ipic_from_irq(virq); 793 struct ipic *ipic = ipic_from_irq(virq);
796 unsigned int src = ipic_irq_to_hw(virq); 794 unsigned int src = virq_to_hw(virq);
797 u32 temp; 795 u32 temp;
798 796
799 if (priority > 7) 797 if (priority > 7)
@@ -821,7 +819,7 @@ int ipic_set_priority(unsigned int virq, unsigned int priority)
821void ipic_set_highest_priority(unsigned int virq) 819void ipic_set_highest_priority(unsigned int virq)
822{ 820{
823 struct ipic *ipic = ipic_from_irq(virq); 821 struct ipic *ipic = ipic_from_irq(virq);
824 unsigned int src = ipic_irq_to_hw(virq); 822 unsigned int src = virq_to_hw(virq);
825 u32 temp; 823 u32 temp;
826 824
827 temp = ipic_read(ipic->regs, IPIC_SICFR); 825 temp = ipic_read(ipic->regs, IPIC_SICFR);
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 207324209065..ddc877a3a23a 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -115,6 +115,8 @@ int __init mmio_nvram_init(void)
115 int ret; 115 int ret;
116 116
117 nvram_node = of_find_node_by_type(NULL, "nvram"); 117 nvram_node = of_find_node_by_type(NULL, "nvram");
118 if (!nvram_node)
119 nvram_node = of_find_compatible_node(NULL, NULL, "nvram");
118 if (!nvram_node) { 120 if (!nvram_node) {
119 printk(KERN_WARNING "nvram: no node found in device-tree\n"); 121 printk(KERN_WARNING "nvram: no node found in device-tree\n");
120 return -ENODEV; 122 return -ENODEV;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index a88800ff4d01..20924f2246f0 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -28,7 +28,7 @@ int cpm_get_irq(struct pt_regs *regs);
28static void mpc8xx_unmask_irq(struct irq_data *d) 28static void mpc8xx_unmask_irq(struct irq_data *d)
29{ 29{
30 int bit, word; 30 int bit, word;
31 unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; 31 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
32 32
33 bit = irq_nr & 0x1f; 33 bit = irq_nr & 0x1f;
34 word = irq_nr >> 5; 34 word = irq_nr >> 5;
@@ -40,7 +40,7 @@ static void mpc8xx_unmask_irq(struct irq_data *d)
40static void mpc8xx_mask_irq(struct irq_data *d) 40static void mpc8xx_mask_irq(struct irq_data *d)
41{ 41{
42 int bit, word; 42 int bit, word;
43 unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; 43 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
44 44
45 bit = irq_nr & 0x1f; 45 bit = irq_nr & 0x1f;
46 word = irq_nr >> 5; 46 word = irq_nr >> 5;
@@ -52,7 +52,7 @@ static void mpc8xx_mask_irq(struct irq_data *d)
52static void mpc8xx_ack(struct irq_data *d) 52static void mpc8xx_ack(struct irq_data *d)
53{ 53{
54 int bit; 54 int bit;
55 unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; 55 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
56 56
57 bit = irq_nr & 0x1f; 57 bit = irq_nr & 0x1f;
58 out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); 58 out_be32(&siu_reg->sc_sipend, 1 << (31-bit));
@@ -61,7 +61,7 @@ static void mpc8xx_ack(struct irq_data *d)
61static void mpc8xx_end_irq(struct irq_data *d) 61static void mpc8xx_end_irq(struct irq_data *d)
62{ 62{
63 int bit, word; 63 int bit, word;
64 unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq; 64 unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
65 65
66 bit = irq_nr & 0x1f; 66 bit = irq_nr & 0x1f;
67 word = irq_nr >> 5; 67 word = irq_nr >> 5;
@@ -73,7 +73,7 @@ static void mpc8xx_end_irq(struct irq_data *d)
73static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) 73static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
74{ 74{
75 if (flow_type & IRQ_TYPE_EDGE_FALLING) { 75 if (flow_type & IRQ_TYPE_EDGE_FALLING) {
76 irq_hw_number_t hw = (unsigned int)irq_map[d->irq].hwirq; 76 irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d);
77 unsigned int siel = in_be32(&siu_reg->sc_siel); 77 unsigned int siel = in_be32(&siu_reg->sc_siel);
78 78
79 /* only external IRQ senses are programmable */ 79 /* only external IRQ senses are programmable */
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index 0892a2841c2b..fb4963abdf55 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -163,7 +163,7 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
163 163
164 spin_lock_irqsave(&mpc8xxx_gc->lock, flags); 164 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
165 165
166 setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(d->irq))); 166 setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
167 167
168 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); 168 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
169} 169}
@@ -176,7 +176,7 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
176 176
177 spin_lock_irqsave(&mpc8xxx_gc->lock, flags); 177 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
178 178
179 clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(d->irq))); 179 clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
180 180
181 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); 181 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
182} 182}
@@ -186,7 +186,7 @@ static void mpc8xxx_irq_ack(struct irq_data *d)
186 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); 186 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
187 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; 187 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
188 188
189 out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(d->irq))); 189 out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
190} 190}
191 191
192static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) 192static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -199,14 +199,14 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
199 case IRQ_TYPE_EDGE_FALLING: 199 case IRQ_TYPE_EDGE_FALLING:
200 spin_lock_irqsave(&mpc8xxx_gc->lock, flags); 200 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
201 setbits32(mm->regs + GPIO_ICR, 201 setbits32(mm->regs + GPIO_ICR,
202 mpc8xxx_gpio2mask(virq_to_hw(d->irq))); 202 mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
203 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); 203 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
204 break; 204 break;
205 205
206 case IRQ_TYPE_EDGE_BOTH: 206 case IRQ_TYPE_EDGE_BOTH:
207 spin_lock_irqsave(&mpc8xxx_gc->lock, flags); 207 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
208 clrbits32(mm->regs + GPIO_ICR, 208 clrbits32(mm->regs + GPIO_ICR,
209 mpc8xxx_gpio2mask(virq_to_hw(d->irq))); 209 mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
210 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); 210 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
211 break; 211 break;
212 212
@@ -221,7 +221,7 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
221{ 221{
222 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); 222 struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
223 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; 223 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
224 unsigned long gpio = virq_to_hw(d->irq); 224 unsigned long gpio = irqd_to_hwirq(d);
225 void __iomem *reg; 225 void __iomem *reg;
226 unsigned int shift; 226 unsigned int shift;
227 unsigned long flags; 227 unsigned long flags;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7e5dc8f4984a..3a8de5bb628a 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -6,6 +6,7 @@
6 * with various broken implementations of this HW. 6 * with various broken implementations of this HW.
7 * 7 *
8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9 * Copyright 2010-2011 Freescale Semiconductor, Inc.
9 * 10 *
10 * This file is subject to the terms and conditions of the GNU General Public 11 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive 12 * License. See the file COPYING in the main directory of this archive
@@ -219,6 +220,28 @@ static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 valu
219 _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); 220 _mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
220} 221}
221 222
223static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm)
224{
225 unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) +
226 ((tm & 3) * MPIC_INFO(TIMER_STRIDE));
227
228 if (tm >= 4)
229 offset += 0x1000 / 4;
230
231 return _mpic_read(mpic->reg_type, &mpic->tmregs, offset);
232}
233
234static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value)
235{
236 unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) +
237 ((tm & 3) * MPIC_INFO(TIMER_STRIDE));
238
239 if (tm >= 4)
240 offset += 0x1000 / 4;
241
242 _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value);
243}
244
222static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) 245static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
223{ 246{
224 unsigned int cpu = mpic_processor_id(mpic); 247 unsigned int cpu = mpic_processor_id(mpic);
@@ -269,6 +292,8 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
269#define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v)) 292#define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v))
270#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) 293#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
271#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) 294#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
295#define mpic_tm_read(i) _mpic_tm_read(mpic,(i))
296#define mpic_tm_write(i,v) _mpic_tm_write(mpic,(i),(v))
272#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) 297#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
273#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) 298#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
274#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) 299#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
@@ -608,8 +633,6 @@ static int irq_choose_cpu(const struct cpumask *mask)
608} 633}
609#endif 634#endif
610 635
611#define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
612
613/* Find an mpic associated with a given linux interrupt */ 636/* Find an mpic associated with a given linux interrupt */
614static struct mpic *mpic_find(unsigned int irq) 637static struct mpic *mpic_find(unsigned int irq)
615{ 638{
@@ -622,11 +645,18 @@ static struct mpic *mpic_find(unsigned int irq)
622/* Determine if the linux irq is an IPI */ 645/* Determine if the linux irq is an IPI */
623static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) 646static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq)
624{ 647{
625 unsigned int src = mpic_irq_to_hw(irq); 648 unsigned int src = virq_to_hw(irq);
626 649
627 return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); 650 return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
628} 651}
629 652
653/* Determine if the linux irq is a timer */
654static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq)
655{
656 unsigned int src = virq_to_hw(irq);
657
658 return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
659}
630 660
631/* Convert a cpu mask from logical to physical cpu numbers. */ 661/* Convert a cpu mask from logical to physical cpu numbers. */
632static inline u32 mpic_physmask(u32 cpumask) 662static inline u32 mpic_physmask(u32 cpumask)
@@ -634,7 +664,7 @@ static inline u32 mpic_physmask(u32 cpumask)
634 int i; 664 int i;
635 u32 mask = 0; 665 u32 mask = 0;
636 666
637 for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) 667 for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
638 mask |= (cpumask & 1) << get_hard_smp_processor_id(i); 668 mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
639 return mask; 669 return mask;
640} 670}
@@ -675,7 +705,7 @@ void mpic_unmask_irq(struct irq_data *d)
675{ 705{
676 unsigned int loops = 100000; 706 unsigned int loops = 100000;
677 struct mpic *mpic = mpic_from_irq_data(d); 707 struct mpic *mpic = mpic_from_irq_data(d);
678 unsigned int src = mpic_irq_to_hw(d->irq); 708 unsigned int src = irqd_to_hwirq(d);
679 709
680 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src); 710 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src);
681 711
@@ -696,7 +726,7 @@ void mpic_mask_irq(struct irq_data *d)
696{ 726{
697 unsigned int loops = 100000; 727 unsigned int loops = 100000;
698 struct mpic *mpic = mpic_from_irq_data(d); 728 struct mpic *mpic = mpic_from_irq_data(d);
699 unsigned int src = mpic_irq_to_hw(d->irq); 729 unsigned int src = irqd_to_hwirq(d);
700 730
701 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src); 731 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src);
702 732
@@ -734,7 +764,7 @@ void mpic_end_irq(struct irq_data *d)
734static void mpic_unmask_ht_irq(struct irq_data *d) 764static void mpic_unmask_ht_irq(struct irq_data *d)
735{ 765{
736 struct mpic *mpic = mpic_from_irq_data(d); 766 struct mpic *mpic = mpic_from_irq_data(d);
737 unsigned int src = mpic_irq_to_hw(d->irq); 767 unsigned int src = irqd_to_hwirq(d);
738 768
739 mpic_unmask_irq(d); 769 mpic_unmask_irq(d);
740 770
@@ -745,7 +775,7 @@ static void mpic_unmask_ht_irq(struct irq_data *d)
745static unsigned int mpic_startup_ht_irq(struct irq_data *d) 775static unsigned int mpic_startup_ht_irq(struct irq_data *d)
746{ 776{
747 struct mpic *mpic = mpic_from_irq_data(d); 777 struct mpic *mpic = mpic_from_irq_data(d);
748 unsigned int src = mpic_irq_to_hw(d->irq); 778 unsigned int src = irqd_to_hwirq(d);
749 779
750 mpic_unmask_irq(d); 780 mpic_unmask_irq(d);
751 mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d)); 781 mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d));
@@ -756,7 +786,7 @@ static unsigned int mpic_startup_ht_irq(struct irq_data *d)
756static void mpic_shutdown_ht_irq(struct irq_data *d) 786static void mpic_shutdown_ht_irq(struct irq_data *d)
757{ 787{
758 struct mpic *mpic = mpic_from_irq_data(d); 788 struct mpic *mpic = mpic_from_irq_data(d);
759 unsigned int src = mpic_irq_to_hw(d->irq); 789 unsigned int src = irqd_to_hwirq(d);
760 790
761 mpic_shutdown_ht_interrupt(mpic, src); 791 mpic_shutdown_ht_interrupt(mpic, src);
762 mpic_mask_irq(d); 792 mpic_mask_irq(d);
@@ -765,7 +795,7 @@ static void mpic_shutdown_ht_irq(struct irq_data *d)
765static void mpic_end_ht_irq(struct irq_data *d) 795static void mpic_end_ht_irq(struct irq_data *d)
766{ 796{
767 struct mpic *mpic = mpic_from_irq_data(d); 797 struct mpic *mpic = mpic_from_irq_data(d);
768 unsigned int src = mpic_irq_to_hw(d->irq); 798 unsigned int src = irqd_to_hwirq(d);
769 799
770#ifdef DEBUG_IRQ 800#ifdef DEBUG_IRQ
771 DBG("%s: end_irq: %d\n", mpic->name, d->irq); 801 DBG("%s: end_irq: %d\n", mpic->name, d->irq);
@@ -786,7 +816,7 @@ static void mpic_end_ht_irq(struct irq_data *d)
786static void mpic_unmask_ipi(struct irq_data *d) 816static void mpic_unmask_ipi(struct irq_data *d)
787{ 817{
788 struct mpic *mpic = mpic_from_ipi(d); 818 struct mpic *mpic = mpic_from_ipi(d);
789 unsigned int src = mpic_irq_to_hw(d->irq) - mpic->ipi_vecs[0]; 819 unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0];
790 820
791 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src); 821 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
792 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); 822 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
@@ -813,27 +843,42 @@ static void mpic_end_ipi(struct irq_data *d)
813 843
814#endif /* CONFIG_SMP */ 844#endif /* CONFIG_SMP */
815 845
846static void mpic_unmask_tm(struct irq_data *d)
847{
848 struct mpic *mpic = mpic_from_irq_data(d);
849 unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
850
851 DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, irq, src);
852 mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK);
853 mpic_tm_read(src);
854}
855
856static void mpic_mask_tm(struct irq_data *d)
857{
858 struct mpic *mpic = mpic_from_irq_data(d);
859 unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
860
861 mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK);
862 mpic_tm_read(src);
863}
864
816int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 865int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
817 bool force) 866 bool force)
818{ 867{
819 struct mpic *mpic = mpic_from_irq_data(d); 868 struct mpic *mpic = mpic_from_irq_data(d);
820 unsigned int src = mpic_irq_to_hw(d->irq); 869 unsigned int src = irqd_to_hwirq(d);
821 870
822 if (mpic->flags & MPIC_SINGLE_DEST_CPU) { 871 if (mpic->flags & MPIC_SINGLE_DEST_CPU) {
823 int cpuid = irq_choose_cpu(cpumask); 872 int cpuid = irq_choose_cpu(cpumask);
824 873
825 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); 874 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
826 } else { 875 } else {
827 cpumask_var_t tmp; 876 u32 mask = cpumask_bits(cpumask)[0];
828
829 alloc_cpumask_var(&tmp, GFP_KERNEL);
830 877
831 cpumask_and(tmp, cpumask, cpu_online_mask); 878 mask &= cpumask_bits(cpu_online_mask)[0];
832 879
833 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 880 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
834 mpic_physmask(cpumask_bits(tmp)[0])); 881 mpic_physmask(mask));
835
836 free_cpumask_var(tmp);
837 } 882 }
838 883
839 return 0; 884 return 0;
@@ -863,7 +908,7 @@ static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
863int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) 908int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
864{ 909{
865 struct mpic *mpic = mpic_from_irq_data(d); 910 struct mpic *mpic = mpic_from_irq_data(d);
866 unsigned int src = mpic_irq_to_hw(d->irq); 911 unsigned int src = irqd_to_hwirq(d);
867 unsigned int vecpri, vold, vnew; 912 unsigned int vecpri, vold, vnew;
868 913
869 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", 914 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
@@ -899,7 +944,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
899void mpic_set_vector(unsigned int virq, unsigned int vector) 944void mpic_set_vector(unsigned int virq, unsigned int vector)
900{ 945{
901 struct mpic *mpic = mpic_from_irq(virq); 946 struct mpic *mpic = mpic_from_irq(virq);
902 unsigned int src = mpic_irq_to_hw(virq); 947 unsigned int src = virq_to_hw(virq);
903 unsigned int vecpri; 948 unsigned int vecpri;
904 949
905 DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", 950 DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
@@ -917,7 +962,7 @@ void mpic_set_vector(unsigned int virq, unsigned int vector)
917void mpic_set_destination(unsigned int virq, unsigned int cpuid) 962void mpic_set_destination(unsigned int virq, unsigned int cpuid)
918{ 963{
919 struct mpic *mpic = mpic_from_irq(virq); 964 struct mpic *mpic = mpic_from_irq(virq);
920 unsigned int src = mpic_irq_to_hw(virq); 965 unsigned int src = virq_to_hw(virq);
921 966
922 DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n", 967 DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",
923 mpic, virq, src, cpuid); 968 mpic, virq, src, cpuid);
@@ -943,6 +988,12 @@ static struct irq_chip mpic_ipi_chip = {
943}; 988};
944#endif /* CONFIG_SMP */ 989#endif /* CONFIG_SMP */
945 990
991static struct irq_chip mpic_tm_chip = {
992 .irq_mask = mpic_mask_tm,
993 .irq_unmask = mpic_unmask_tm,
994 .irq_eoi = mpic_end_irq,
995};
996
946#ifdef CONFIG_MPIC_U3_HT_IRQS 997#ifdef CONFIG_MPIC_U3_HT_IRQS
947static struct irq_chip mpic_irq_ht_chip = { 998static struct irq_chip mpic_irq_ht_chip = {
948 .irq_startup = mpic_startup_ht_irq, 999 .irq_startup = mpic_startup_ht_irq,
@@ -986,6 +1037,16 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
986 } 1037 }
987#endif /* CONFIG_SMP */ 1038#endif /* CONFIG_SMP */
988 1039
1040 if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) {
1041 WARN_ON(!(mpic->flags & MPIC_PRIMARY));
1042
1043 DBG("mpic: mapping as timer\n");
1044 irq_set_chip_data(virq, mpic);
1045 irq_set_chip_and_handler(virq, &mpic->hc_tm,
1046 handle_fasteoi_irq);
1047 return 0;
1048 }
1049
989 if (hw >= mpic->irq_count) 1050 if (hw >= mpic->irq_count)
990 return -EINVAL; 1051 return -EINVAL;
991 1052
@@ -1026,6 +1087,7 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
1026 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1087 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1027 1088
1028{ 1089{
1090 struct mpic *mpic = h->host_data;
1029 static unsigned char map_mpic_senses[4] = { 1091 static unsigned char map_mpic_senses[4] = {
1030 IRQ_TYPE_EDGE_RISING, 1092 IRQ_TYPE_EDGE_RISING,
1031 IRQ_TYPE_LEVEL_LOW, 1093 IRQ_TYPE_LEVEL_LOW,
@@ -1034,7 +1096,38 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
1034 }; 1096 };
1035 1097
1036 *out_hwirq = intspec[0]; 1098 *out_hwirq = intspec[0];
1037 if (intsize > 1) { 1099 if (intsize >= 4 && (mpic->flags & MPIC_FSL)) {
1100 /*
1101 * Freescale MPIC with extended intspec:
1102 * First two cells are as usual. Third specifies
1103 * an "interrupt type". Fourth is type-specific data.
1104 *
1105 * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
1106 */
1107 switch (intspec[2]) {
1108 case 0:
1109 case 1: /* no EISR/EIMR support for now, treat as shared IRQ */
1110 break;
1111 case 2:
1112 if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs))
1113 return -EINVAL;
1114
1115 *out_hwirq = mpic->ipi_vecs[intspec[0]];
1116 break;
1117 case 3:
1118 if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs))
1119 return -EINVAL;
1120
1121 *out_hwirq = mpic->timer_vecs[intspec[0]];
1122 break;
1123 default:
1124 pr_debug("%s: unknown irq type %u\n",
1125 __func__, intspec[2]);
1126 return -EINVAL;
1127 }
1128
1129 *out_flags = map_mpic_senses[intspec[1] & 3];
1130 } else if (intsize > 1) {
1038 u32 mask = 0x3; 1131 u32 mask = 0x3;
1039 1132
1040 /* Apple invented a new race of encoding on machines with 1133 /* Apple invented a new race of encoding on machines with
@@ -1110,6 +1203,9 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1110 mpic->hc_ipi.name = name; 1203 mpic->hc_ipi.name = name;
1111#endif /* CONFIG_SMP */ 1204#endif /* CONFIG_SMP */
1112 1205
1206 mpic->hc_tm = mpic_tm_chip;
1207 mpic->hc_tm.name = name;
1208
1113 mpic->flags = flags; 1209 mpic->flags = flags;
1114 mpic->isu_size = isu_size; 1210 mpic->isu_size = isu_size;
1115 mpic->irq_count = irq_count; 1211 mpic->irq_count = irq_count;
@@ -1120,10 +1216,14 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1120 else 1216 else
1121 intvec_top = 255; 1217 intvec_top = 255;
1122 1218
1123 mpic->timer_vecs[0] = intvec_top - 8; 1219 mpic->timer_vecs[0] = intvec_top - 12;
1124 mpic->timer_vecs[1] = intvec_top - 7; 1220 mpic->timer_vecs[1] = intvec_top - 11;
1125 mpic->timer_vecs[2] = intvec_top - 6; 1221 mpic->timer_vecs[2] = intvec_top - 10;
1126 mpic->timer_vecs[3] = intvec_top - 5; 1222 mpic->timer_vecs[3] = intvec_top - 9;
1223 mpic->timer_vecs[4] = intvec_top - 8;
1224 mpic->timer_vecs[5] = intvec_top - 7;
1225 mpic->timer_vecs[6] = intvec_top - 6;
1226 mpic->timer_vecs[7] = intvec_top - 5;
1127 mpic->ipi_vecs[0] = intvec_top - 4; 1227 mpic->ipi_vecs[0] = intvec_top - 4;
1128 mpic->ipi_vecs[1] = intvec_top - 3; 1228 mpic->ipi_vecs[1] = intvec_top - 3;
1129 mpic->ipi_vecs[2] = intvec_top - 2; 1229 mpic->ipi_vecs[2] = intvec_top - 2;
@@ -1133,6 +1233,8 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1133 /* Check for "big-endian" in device-tree */ 1233 /* Check for "big-endian" in device-tree */
1134 if (node && of_get_property(node, "big-endian", NULL) != NULL) 1234 if (node && of_get_property(node, "big-endian", NULL) != NULL)
1135 mpic->flags |= MPIC_BIG_ENDIAN; 1235 mpic->flags |= MPIC_BIG_ENDIAN;
1236 if (node && of_device_is_compatible(node, "fsl,mpic"))
1237 mpic->flags |= MPIC_FSL;
1136 1238
1137 /* Look for protected sources */ 1239 /* Look for protected sources */
1138 if (node) { 1240 if (node) {
@@ -1324,15 +1426,17 @@ void __init mpic_init(struct mpic *mpic)
1324 /* Set current processor priority to max */ 1426 /* Set current processor priority to max */
1325 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); 1427 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
1326 1428
1327 /* Initialize timers: just disable them all */ 1429 /* Initialize timers to our reserved vectors and mask them for now */
1328 for (i = 0; i < 4; i++) { 1430 for (i = 0; i < 4; i++) {
1329 mpic_write(mpic->tmregs, 1431 mpic_write(mpic->tmregs,
1330 i * MPIC_INFO(TIMER_STRIDE) + 1432 i * MPIC_INFO(TIMER_STRIDE) +
1331 MPIC_INFO(TIMER_DESTINATION), 0); 1433 MPIC_INFO(TIMER_DESTINATION),
1434 1 << hard_smp_processor_id());
1332 mpic_write(mpic->tmregs, 1435 mpic_write(mpic->tmregs,
1333 i * MPIC_INFO(TIMER_STRIDE) + 1436 i * MPIC_INFO(TIMER_STRIDE) +
1334 MPIC_INFO(TIMER_VECTOR_PRI), 1437 MPIC_INFO(TIMER_VECTOR_PRI),
1335 MPIC_VECPRI_MASK | 1438 MPIC_VECPRI_MASK |
1439 (9 << MPIC_VECPRI_PRIORITY_SHIFT) |
1336 (mpic->timer_vecs[0] + i)); 1440 (mpic->timer_vecs[0] + i));
1337 } 1441 }
1338 1442
@@ -1428,7 +1532,7 @@ void __init mpic_set_serial_int(struct mpic *mpic, int enable)
1428void mpic_irq_set_priority(unsigned int irq, unsigned int pri) 1532void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
1429{ 1533{
1430 struct mpic *mpic = mpic_find(irq); 1534 struct mpic *mpic = mpic_find(irq);
1431 unsigned int src = mpic_irq_to_hw(irq); 1535 unsigned int src = virq_to_hw(irq);
1432 unsigned long flags; 1536 unsigned long flags;
1433 u32 reg; 1537 u32 reg;
1434 1538
@@ -1441,6 +1545,11 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
1441 ~MPIC_VECPRI_PRIORITY_MASK; 1545 ~MPIC_VECPRI_PRIORITY_MASK;
1442 mpic_ipi_write(src - mpic->ipi_vecs[0], 1546 mpic_ipi_write(src - mpic->ipi_vecs[0],
1443 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1547 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1548 } else if (mpic_is_tm(mpic, irq)) {
1549 reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
1550 ~MPIC_VECPRI_PRIORITY_MASK;
1551 mpic_tm_write(src - mpic->timer_vecs[0],
1552 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
1444 } else { 1553 } else {
1445 reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) 1554 reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))
1446 & ~MPIC_VECPRI_PRIORITY_MASK; 1555 & ~MPIC_VECPRI_PRIORITY_MASK;
@@ -1620,46 +1729,28 @@ void mpic_request_ipis(void)
1620 } 1729 }
1621} 1730}
1622 1731
1623static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask) 1732void smp_mpic_message_pass(int cpu, int msg)
1624{ 1733{
1625 struct mpic *mpic = mpic_primary; 1734 struct mpic *mpic = mpic_primary;
1735 u32 physmask;
1626 1736
1627 BUG_ON(mpic == NULL); 1737 BUG_ON(mpic == NULL);
1628 1738
1629#ifdef DEBUG_IPI
1630 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
1631#endif
1632
1633 mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
1634 ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
1635 mpic_physmask(cpumask_bits(cpu_mask)[0]));
1636}
1637
1638void smp_mpic_message_pass(int target, int msg)
1639{
1640 cpumask_var_t tmp;
1641
1642 /* make sure we're sending something that translates to an IPI */ 1739 /* make sure we're sending something that translates to an IPI */
1643 if ((unsigned int)msg > 3) { 1740 if ((unsigned int)msg > 3) {
1644 printk("SMP %d: smp_message_pass: unknown msg %d\n", 1741 printk("SMP %d: smp_message_pass: unknown msg %d\n",
1645 smp_processor_id(), msg); 1742 smp_processor_id(), msg);
1646 return; 1743 return;
1647 } 1744 }
1648 switch (target) { 1745
1649 case MSG_ALL: 1746#ifdef DEBUG_IPI
1650 mpic_send_ipi(msg, cpu_online_mask); 1747 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg);
1651 break; 1748#endif
1652 case MSG_ALL_BUT_SELF: 1749
1653 alloc_cpumask_var(&tmp, GFP_NOWAIT); 1750 physmask = 1 << get_hard_smp_processor_id(cpu);
1654 cpumask_andnot(tmp, cpu_online_mask, 1751
1655 cpumask_of(smp_processor_id())); 1752 mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
1656 mpic_send_ipi(msg, tmp); 1753 msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
1657 free_cpumask_var(tmp);
1658 break;
1659 default:
1660 mpic_send_ipi(msg, cpumask_of(target));
1661 break;
1662 }
1663} 1754}
1664 1755
1665int __init smp_mpic_probe(void) 1756int __init smp_mpic_probe(void)
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index e9c633c7c083..14d130268e7a 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -78,7 +78,7 @@ static struct irq_host *mv64x60_irq_host;
78 78
79static void mv64x60_mask_low(struct irq_data *d) 79static void mv64x60_mask_low(struct irq_data *d)
80{ 80{
81 int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; 81 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
82 unsigned long flags; 82 unsigned long flags;
83 83
84 spin_lock_irqsave(&mv64x60_lock, flags); 84 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -91,7 +91,7 @@ static void mv64x60_mask_low(struct irq_data *d)
91 91
92static void mv64x60_unmask_low(struct irq_data *d) 92static void mv64x60_unmask_low(struct irq_data *d)
93{ 93{
94 int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; 94 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
95 unsigned long flags; 95 unsigned long flags;
96 96
97 spin_lock_irqsave(&mv64x60_lock, flags); 97 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -115,7 +115,7 @@ static struct irq_chip mv64x60_chip_low = {
115 115
116static void mv64x60_mask_high(struct irq_data *d) 116static void mv64x60_mask_high(struct irq_data *d)
117{ 117{
118 int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; 118 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
119 unsigned long flags; 119 unsigned long flags;
120 120
121 spin_lock_irqsave(&mv64x60_lock, flags); 121 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -128,7 +128,7 @@ static void mv64x60_mask_high(struct irq_data *d)
128 128
129static void mv64x60_unmask_high(struct irq_data *d) 129static void mv64x60_unmask_high(struct irq_data *d)
130{ 130{
131 int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; 131 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
132 unsigned long flags; 132 unsigned long flags;
133 133
134 spin_lock_irqsave(&mv64x60_lock, flags); 134 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -152,7 +152,7 @@ static struct irq_chip mv64x60_chip_high = {
152 152
153static void mv64x60_mask_gpp(struct irq_data *d) 153static void mv64x60_mask_gpp(struct irq_data *d)
154{ 154{
155 int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; 155 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
156 unsigned long flags; 156 unsigned long flags;
157 157
158 spin_lock_irqsave(&mv64x60_lock, flags); 158 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -165,7 +165,7 @@ static void mv64x60_mask_gpp(struct irq_data *d)
165 165
166static void mv64x60_mask_ack_gpp(struct irq_data *d) 166static void mv64x60_mask_ack_gpp(struct irq_data *d)
167{ 167{
168 int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; 168 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
169 unsigned long flags; 169 unsigned long flags;
170 170
171 spin_lock_irqsave(&mv64x60_lock, flags); 171 spin_lock_irqsave(&mv64x60_lock, flags);
@@ -180,7 +180,7 @@ static void mv64x60_mask_ack_gpp(struct irq_data *d)
180 180
181static void mv64x60_unmask_gpp(struct irq_data *d) 181static void mv64x60_unmask_gpp(struct irq_data *d)
182{ 182{
183 int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK; 183 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
184 unsigned long flags; 184 unsigned long flags;
185 185
186 spin_lock_irqsave(&mv64x60_lock, flags); 186 spin_lock_irqsave(&mv64x60_lock, flags);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 832d6924ad1c..b2acda07220d 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -197,12 +197,10 @@ static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
197 return irq_data_get_irq_chip_data(d); 197 return irq_data_get_irq_chip_data(d);
198} 198}
199 199
200#define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
201
202static void qe_ic_unmask_irq(struct irq_data *d) 200static void qe_ic_unmask_irq(struct irq_data *d)
203{ 201{
204 struct qe_ic *qe_ic = qe_ic_from_irq_data(d); 202 struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
205 unsigned int src = virq_to_hw(d->irq); 203 unsigned int src = irqd_to_hwirq(d);
206 unsigned long flags; 204 unsigned long flags;
207 u32 temp; 205 u32 temp;
208 206
@@ -218,7 +216,7 @@ static void qe_ic_unmask_irq(struct irq_data *d)
218static void qe_ic_mask_irq(struct irq_data *d) 216static void qe_ic_mask_irq(struct irq_data *d)
219{ 217{
220 struct qe_ic *qe_ic = qe_ic_from_irq_data(d); 218 struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
221 unsigned int src = virq_to_hw(d->irq); 219 unsigned int src = irqd_to_hwirq(d);
222 unsigned long flags; 220 unsigned long flags;
223 u32 temp; 221 u32 temp;
224 222
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
new file mode 100644
index 000000000000..b2593ce30c9b
--- /dev/null
+++ b/arch/powerpc/sysdev/scom.c
@@ -0,0 +1,192 @@
1/*
2 * Copyright 2010 Benjamin Herrenschmidt, IBM Corp
3 * <benh@kernel.crashing.org>
4 * and David Gibson, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
14 * the GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/debugfs.h>
23#include <linux/slab.h>
24#include <asm/prom.h>
25#include <asm/scom.h>
26
27const struct scom_controller *scom_controller;
28EXPORT_SYMBOL_GPL(scom_controller);
29
30struct device_node *scom_find_parent(struct device_node *node)
31{
32 struct device_node *par, *tmp;
33 const u32 *p;
34
35 for (par = of_node_get(node); par;) {
36 if (of_get_property(par, "scom-controller", NULL))
37 break;
38 p = of_get_property(par, "scom-parent", NULL);
39 tmp = par;
40 if (p == NULL)
41 par = of_get_parent(par);
42 else
43 par = of_find_node_by_phandle(*p);
44 of_node_put(tmp);
45 }
46 return par;
47}
48EXPORT_SYMBOL_GPL(scom_find_parent);
49
50scom_map_t scom_map_device(struct device_node *dev, int index)
51{
52 struct device_node *parent;
53 unsigned int cells, size;
54 const u32 *prop;
55 u64 reg, cnt;
56 scom_map_t ret;
57
58 parent = scom_find_parent(dev);
59
60 if (parent == NULL)
61 return 0;
62
63 prop = of_get_property(parent, "#scom-cells", NULL);
64 cells = prop ? *prop : 1;
65
66 prop = of_get_property(dev, "scom-reg", &size);
67 if (!prop)
68 return 0;
69 size >>= 2;
70
71 if (index >= (size / (2*cells)))
72 return 0;
73
74 reg = of_read_number(&prop[index * cells * 2], cells);
75 cnt = of_read_number(&prop[index * cells * 2 + cells], cells);
76
77 ret = scom_map(parent, reg, cnt);
78 of_node_put(parent);
79
80 return ret;
81}
82EXPORT_SYMBOL_GPL(scom_map_device);
83
84#ifdef CONFIG_SCOM_DEBUGFS
85struct scom_debug_entry {
86 struct device_node *dn;
87 unsigned long addr;
88 scom_map_t map;
89 spinlock_t lock;
90 char name[8];
91 struct debugfs_blob_wrapper blob;
92};
93
94static int scom_addr_set(void *data, u64 val)
95{
96 struct scom_debug_entry *ent = data;
97
98 ent->addr = 0;
99 scom_unmap(ent->map);
100
101 ent->map = scom_map(ent->dn, val, 1);
102 if (scom_map_ok(ent->map))
103 ent->addr = val;
104 else
105 return -EFAULT;
106
107 return 0;
108}
109
110static int scom_addr_get(void *data, u64 *val)
111{
112 struct scom_debug_entry *ent = data;
113 *val = ent->addr;
114 return 0;
115}
116DEFINE_SIMPLE_ATTRIBUTE(scom_addr_fops, scom_addr_get, scom_addr_set,
117 "0x%llx\n");
118
119static int scom_val_set(void *data, u64 val)
120{
121 struct scom_debug_entry *ent = data;
122
123 if (!scom_map_ok(ent->map))
124 return -EFAULT;
125
126 scom_write(ent->map, 0, val);
127
128 return 0;
129}
130
131static int scom_val_get(void *data, u64 *val)
132{
133 struct scom_debug_entry *ent = data;
134
135 if (!scom_map_ok(ent->map))
136 return -EFAULT;
137
138 *val = scom_read(ent->map, 0);
139 return 0;
140}
141DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set,
142 "0x%llx\n");
143
144static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
145 int i)
146{
147 struct scom_debug_entry *ent;
148 struct dentry *dir;
149
150 ent = kzalloc(sizeof(*ent), GFP_KERNEL);
151 if (!ent)
152 return -ENOMEM;
153
154 ent->dn = of_node_get(dn);
155 ent->map = SCOM_MAP_INVALID;
156 spin_lock_init(&ent->lock);
157 snprintf(ent->name, 8, "scom%d", i);
158 ent->blob.data = dn->full_name;
159 ent->blob.size = strlen(dn->full_name);
160
161 dir = debugfs_create_dir(ent->name, root);
162 if (!dir) {
163 of_node_put(dn);
164 kfree(ent);
165 return -1;
166 }
167
168 debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops);
169 debugfs_create_file("value", 0600, dir, ent, &scom_val_fops);
170 debugfs_create_blob("path", 0400, dir, &ent->blob);
171
172 return 0;
173}
174
175static int scom_debug_init(void)
176{
177 struct device_node *dn;
178 struct dentry *root;
179 int i, rc;
180
181 root = debugfs_create_dir("scom", powerpc_debugfs_root);
182 if (!root)
183 return -1;
184
185 i = rc = 0;
186 for_each_node_with_property(dn, "scom-controller")
187 rc |= scom_debug_init_one(root, dn, i++);
188
189 return rc;
190}
191device_initcall(scom_debug_init);
192#endif /* CONFIG_SCOM_DEBUGFS */
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 5d9138516628..984cd2029158 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -41,8 +41,6 @@
41#define UIC_VR 0x7 41#define UIC_VR 0x7
42#define UIC_VCR 0x8 42#define UIC_VCR 0x8
43 43
44#define uic_irq_to_hw(virq) (irq_map[virq].hwirq)
45
46struct uic *primary_uic; 44struct uic *primary_uic;
47 45
48struct uic { 46struct uic {
@@ -58,7 +56,7 @@ struct uic {
58static void uic_unmask_irq(struct irq_data *d) 56static void uic_unmask_irq(struct irq_data *d)
59{ 57{
60 struct uic *uic = irq_data_get_irq_chip_data(d); 58 struct uic *uic = irq_data_get_irq_chip_data(d);
61 unsigned int src = uic_irq_to_hw(d->irq); 59 unsigned int src = irqd_to_hwirq(d);
62 unsigned long flags; 60 unsigned long flags;
63 u32 er, sr; 61 u32 er, sr;
64 62
@@ -76,7 +74,7 @@ static void uic_unmask_irq(struct irq_data *d)
76static void uic_mask_irq(struct irq_data *d) 74static void uic_mask_irq(struct irq_data *d)
77{ 75{
78 struct uic *uic = irq_data_get_irq_chip_data(d); 76 struct uic *uic = irq_data_get_irq_chip_data(d);
79 unsigned int src = uic_irq_to_hw(d->irq); 77 unsigned int src = irqd_to_hwirq(d);
80 unsigned long flags; 78 unsigned long flags;
81 u32 er; 79 u32 er;
82 80
@@ -90,7 +88,7 @@ static void uic_mask_irq(struct irq_data *d)
90static void uic_ack_irq(struct irq_data *d) 88static void uic_ack_irq(struct irq_data *d)
91{ 89{
92 struct uic *uic = irq_data_get_irq_chip_data(d); 90 struct uic *uic = irq_data_get_irq_chip_data(d);
93 unsigned int src = uic_irq_to_hw(d->irq); 91 unsigned int src = irqd_to_hwirq(d);
94 unsigned long flags; 92 unsigned long flags;
95 93
96 spin_lock_irqsave(&uic->lock, flags); 94 spin_lock_irqsave(&uic->lock, flags);
@@ -101,7 +99,7 @@ static void uic_ack_irq(struct irq_data *d)
101static void uic_mask_ack_irq(struct irq_data *d) 99static void uic_mask_ack_irq(struct irq_data *d)
102{ 100{
103 struct uic *uic = irq_data_get_irq_chip_data(d); 101 struct uic *uic = irq_data_get_irq_chip_data(d);
104 unsigned int src = uic_irq_to_hw(d->irq); 102 unsigned int src = irqd_to_hwirq(d);
105 unsigned long flags; 103 unsigned long flags;
106 u32 er, sr; 104 u32 er, sr;
107 105
@@ -126,7 +124,7 @@ static void uic_mask_ack_irq(struct irq_data *d)
126static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) 124static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
127{ 125{
128 struct uic *uic = irq_data_get_irq_chip_data(d); 126 struct uic *uic = irq_data_get_irq_chip_data(d);
129 unsigned int src = uic_irq_to_hw(d->irq); 127 unsigned int src = irqd_to_hwirq(d);
130 unsigned long flags; 128 unsigned long flags;
131 int trigger, polarity; 129 int trigger, polarity;
132 u32 tr, pr, mask; 130 u32 tr, pr, mask;
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig
new file mode 100644
index 000000000000..0031eda320c3
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/Kconfig
@@ -0,0 +1,13 @@
1config PPC_XICS
2 def_bool n
3 select PPC_SMP_MUXED_IPI
4
5config PPC_ICP_NATIVE
6 def_bool n
7
8config PPC_ICP_HV
9 def_bool n
10
11config PPC_ICS_RTAS
12 def_bool n
13
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile
new file mode 100644
index 000000000000..b75a6059337f
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/Makefile
@@ -0,0 +1,6 @@
1subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
2
3obj-y += xics-common.o
4obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o
5obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o
6obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
new file mode 100644
index 000000000000..9518d367a64f
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright 2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 */
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/irq.h>
13#include <linux/smp.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/cpu.h>
17#include <linux/of.h>
18
19#include <asm/smp.h>
20#include <asm/irq.h>
21#include <asm/errno.h>
22#include <asm/xics.h>
23#include <asm/io.h>
24#include <asm/hvcall.h>
25
26static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
27{
28 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
29 long rc;
30
31 rc = plpar_hcall(H_XIRR, retbuf, cppr);
32 if (rc != H_SUCCESS)
33 panic(" bad return code xirr - rc = %lx\n", rc);
34 return (unsigned int)retbuf[0];
35}
36
37static inline void icp_hv_set_xirr(unsigned int value)
38{
39 long rc = plpar_hcall_norets(H_EOI, value);
40 if (rc != H_SUCCESS)
41 panic("bad return code EOI - rc = %ld, value=%x\n", rc, value);
42}
43
44static inline void icp_hv_set_cppr(u8 value)
45{
46 long rc = plpar_hcall_norets(H_CPPR, value);
47 if (rc != H_SUCCESS)
48 panic("bad return code cppr - rc = %lx\n", rc);
49}
50
51static inline void icp_hv_set_qirr(int n_cpu , u8 value)
52{
53 long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu),
54 value);
55 if (rc != H_SUCCESS)
56 panic("bad return code qirr - rc = %lx\n", rc);
57}
58
59static void icp_hv_eoi(struct irq_data *d)
60{
61 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
62
63 iosync();
64 icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
65}
66
67static void icp_hv_teardown_cpu(void)
68{
69 int cpu = smp_processor_id();
70
71 /* Clear any pending IPI */
72 icp_hv_set_qirr(cpu, 0xff);
73}
74
75static void icp_hv_flush_ipi(void)
76{
77 /* We take the ipi irq but and never return so we
78 * need to EOI the IPI, but want to leave our priority 0
79 *
80 * should we check all the other interrupts too?
81 * should we be flagging idle loop instead?
82 * or creating some task to be scheduled?
83 */
84
85 icp_hv_set_xirr((0x00 << 24) | XICS_IPI);
86}
87
88static unsigned int icp_hv_get_irq(void)
89{
90 unsigned int xirr = icp_hv_get_xirr(xics_cppr_top());
91 unsigned int vec = xirr & 0x00ffffff;
92 unsigned int irq;
93
94 if (vec == XICS_IRQ_SPURIOUS)
95 return NO_IRQ;
96
97 irq = irq_radix_revmap_lookup(xics_host, vec);
98 if (likely(irq != NO_IRQ)) {
99 xics_push_cppr(vec);
100 return irq;
101 }
102
103 /* We don't have a linux mapping, so have rtas mask it. */
104 xics_mask_unknown_vec(vec);
105
106 /* We might learn about it later, so EOI it */
107 icp_hv_set_xirr(xirr);
108
109 return NO_IRQ;
110}
111
112static void icp_hv_set_cpu_priority(unsigned char cppr)
113{
114 xics_set_base_cppr(cppr);
115 icp_hv_set_cppr(cppr);
116 iosync();
117}
118
119#ifdef CONFIG_SMP
120
121static void icp_hv_cause_ipi(int cpu, unsigned long data)
122{
123 icp_hv_set_qirr(cpu, IPI_PRIORITY);
124}
125
126static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
127{
128 int cpu = smp_processor_id();
129
130 icp_hv_set_qirr(cpu, 0xff);
131
132 return smp_ipi_demux();
133}
134
135#endif /* CONFIG_SMP */
136
137static const struct icp_ops icp_hv_ops = {
138 .get_irq = icp_hv_get_irq,
139 .eoi = icp_hv_eoi,
140 .set_priority = icp_hv_set_cpu_priority,
141 .teardown_cpu = icp_hv_teardown_cpu,
142 .flush_ipi = icp_hv_flush_ipi,
143#ifdef CONFIG_SMP
144 .ipi_action = icp_hv_ipi_action,
145 .cause_ipi = icp_hv_cause_ipi,
146#endif
147};
148
149int icp_hv_init(void)
150{
151 struct device_node *np;
152
153 np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp");
154 if (!np)
155 np = of_find_node_by_type(NULL,
156 "PowerPC-External-Interrupt-Presentation");
157 if (!np)
158 return -ENODEV;
159
160 icp_ops = &icp_hv_ops;
161
162 return 0;
163}
164
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
new file mode 100644
index 000000000000..1f15ad436140
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -0,0 +1,293 @@
1/*
2 * Copyright 2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 */
10
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/irq.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/cpu.h>
18#include <linux/of.h>
19#include <linux/spinlock.h>
20
21#include <asm/prom.h>
22#include <asm/io.h>
23#include <asm/smp.h>
24#include <asm/irq.h>
25#include <asm/errno.h>
26#include <asm/xics.h>
27
28struct icp_ipl {
29 union {
30 u32 word;
31 u8 bytes[4];
32 } xirr_poll;
33 union {
34 u32 word;
35 u8 bytes[4];
36 } xirr;
37 u32 dummy;
38 union {
39 u32 word;
40 u8 bytes[4];
41 } qirr;
42 u32 link_a;
43 u32 link_b;
44 u32 link_c;
45};
46
47static struct icp_ipl __iomem *icp_native_regs[NR_CPUS];
48
49static inline unsigned int icp_native_get_xirr(void)
50{
51 int cpu = smp_processor_id();
52
53 return in_be32(&icp_native_regs[cpu]->xirr.word);
54}
55
56static inline void icp_native_set_xirr(unsigned int value)
57{
58 int cpu = smp_processor_id();
59
60 out_be32(&icp_native_regs[cpu]->xirr.word, value);
61}
62
63static inline void icp_native_set_cppr(u8 value)
64{
65 int cpu = smp_processor_id();
66
67 out_8(&icp_native_regs[cpu]->xirr.bytes[0], value);
68}
69
70static inline void icp_native_set_qirr(int n_cpu, u8 value)
71{
72 out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value);
73}
74
75static void icp_native_set_cpu_priority(unsigned char cppr)
76{
77 xics_set_base_cppr(cppr);
78 icp_native_set_cppr(cppr);
79 iosync();
80}
81
82static void icp_native_eoi(struct irq_data *d)
83{
84 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
85
86 iosync();
87 icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq);
88}
89
90static void icp_native_teardown_cpu(void)
91{
92 int cpu = smp_processor_id();
93
94 /* Clear any pending IPI */
95 icp_native_set_qirr(cpu, 0xff);
96}
97
98static void icp_native_flush_ipi(void)
99{
100 /* We take the ipi irq but and never return so we
101 * need to EOI the IPI, but want to leave our priority 0
102 *
103 * should we check all the other interrupts too?
104 * should we be flagging idle loop instead?
105 * or creating some task to be scheduled?
106 */
107
108 icp_native_set_xirr((0x00 << 24) | XICS_IPI);
109}
110
111static unsigned int icp_native_get_irq(void)
112{
113 unsigned int xirr = icp_native_get_xirr();
114 unsigned int vec = xirr & 0x00ffffff;
115 unsigned int irq;
116
117 if (vec == XICS_IRQ_SPURIOUS)
118 return NO_IRQ;
119
120 irq = irq_radix_revmap_lookup(xics_host, vec);
121 if (likely(irq != NO_IRQ)) {
122 xics_push_cppr(vec);
123 return irq;
124 }
125
126 /* We don't have a linux mapping, so have rtas mask it. */
127 xics_mask_unknown_vec(vec);
128
129 /* We might learn about it later, so EOI it */
130 icp_native_set_xirr(xirr);
131
132 return NO_IRQ;
133}
134
135#ifdef CONFIG_SMP
136
137static void icp_native_cause_ipi(int cpu, unsigned long data)
138{
139 icp_native_set_qirr(cpu, IPI_PRIORITY);
140}
141
142static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
143{
144 int cpu = smp_processor_id();
145
146 icp_native_set_qirr(cpu, 0xff);
147
148 return smp_ipi_demux();
149}
150
151#endif /* CONFIG_SMP */
152
153static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
154 unsigned long size)
155{
156 char *rname;
157 int i, cpu = -1;
158
159 /* This may look gross but it's good enough for now, we don't quite
160 * have a hard -> linux processor id matching.
161 */
162 for_each_possible_cpu(i) {
163 if (!cpu_present(i))
164 continue;
165 if (hw_id == get_hard_smp_processor_id(i)) {
166 cpu = i;
167 break;
168 }
169 }
170
171 /* Fail, skip that CPU. Don't print, it's normal, some XICS come up
172 * with way more entries in there than you have CPUs
173 */
174 if (cpu == -1)
175 return 0;
176
177 rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
178 cpu, hw_id);
179
180 if (!request_mem_region(addr, size, rname)) {
181 pr_warning("icp_native: Could not reserve ICP MMIO"
182 " for CPU %d, interrupt server #0x%x\n",
183 cpu, hw_id);
184 return -EBUSY;
185 }
186
187 icp_native_regs[cpu] = ioremap(addr, size);
188 if (!icp_native_regs[cpu]) {
189 pr_warning("icp_native: Failed ioremap for CPU %d, "
190 "interrupt server #0x%x, addr %#lx\n",
191 cpu, hw_id, addr);
192 release_mem_region(addr, size);
193 return -ENOMEM;
194 }
195 return 0;
196}
197
198static int __init icp_native_init_one_node(struct device_node *np,
199 unsigned int *indx)
200{
201 unsigned int ilen;
202 const u32 *ireg;
203 int i;
204 int reg_tuple_size;
205 int num_servers = 0;
206
207 /* This code does the theorically broken assumption that the interrupt
208 * server numbers are the same as the hard CPU numbers.
209 * This happens to be the case so far but we are playing with fire...
210 * should be fixed one of these days. -BenH.
211 */
212 ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen);
213
214 /* Do that ever happen ? we'll know soon enough... but even good'old
215 * f80 does have that property ..
216 */
217 WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32)));
218
219 if (ireg) {
220 *indx = of_read_number(ireg, 1);
221 if (ilen >= 2*sizeof(u32))
222 num_servers = of_read_number(ireg + 1, 1);
223 }
224
225 ireg = of_get_property(np, "reg", &ilen);
226 if (!ireg) {
227 pr_err("icp_native: Can't find interrupt reg property");
228 return -1;
229 }
230
231 reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4;
232 if (((ilen % reg_tuple_size) != 0)
233 || (num_servers && (num_servers != (ilen / reg_tuple_size)))) {
234 pr_err("icp_native: ICP reg len (%d) != num servers (%d)",
235 ilen / reg_tuple_size, num_servers);
236 return -1;
237 }
238
239 for (i = 0; i < (ilen / reg_tuple_size); i++) {
240 struct resource r;
241 int err;
242
243 err = of_address_to_resource(np, i, &r);
244 if (err) {
245 pr_err("icp_native: Could not translate ICP MMIO"
246 " for interrupt server 0x%x (%d)\n", *indx, err);
247 return -1;
248 }
249
250 if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start))
251 return -1;
252
253 (*indx)++;
254 }
255 return 0;
256}
257
258static const struct icp_ops icp_native_ops = {
259 .get_irq = icp_native_get_irq,
260 .eoi = icp_native_eoi,
261 .set_priority = icp_native_set_cpu_priority,
262 .teardown_cpu = icp_native_teardown_cpu,
263 .flush_ipi = icp_native_flush_ipi,
264#ifdef CONFIG_SMP
265 .ipi_action = icp_native_ipi_action,
266 .cause_ipi = icp_native_cause_ipi,
267#endif
268};
269
270int icp_native_init(void)
271{
272 struct device_node *np;
273 u32 indx = 0;
274 int found = 0;
275
276 for_each_compatible_node(np, NULL, "ibm,ppc-xicp")
277 if (icp_native_init_one_node(np, &indx) == 0)
278 found = 1;
279 if (!found) {
280 for_each_node_by_type(np,
281 "PowerPC-External-Interrupt-Presentation") {
282 if (icp_native_init_one_node(np, &indx) == 0)
283 found = 1;
284 }
285 }
286
287 if (found == 0)
288 return -ENODEV;
289
290 icp_ops = &icp_native_ops;
291
292 return 0;
293}
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
new file mode 100644
index 000000000000..c782f85cf7e4
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -0,0 +1,240 @@
1#include <linux/types.h>
2#include <linux/kernel.h>
3#include <linux/irq.h>
4#include <linux/smp.h>
5#include <linux/interrupt.h>
6#include <linux/init.h>
7#include <linux/cpu.h>
8#include <linux/of.h>
9#include <linux/spinlock.h>
10#include <linux/msi.h>
11
12#include <asm/prom.h>
13#include <asm/smp.h>
14#include <asm/machdep.h>
15#include <asm/irq.h>
16#include <asm/errno.h>
17#include <asm/xics.h>
18#include <asm/rtas.h>
19
20/* RTAS service tokens */
21static int ibm_get_xive;
22static int ibm_set_xive;
23static int ibm_int_on;
24static int ibm_int_off;
25
26static int ics_rtas_map(struct ics *ics, unsigned int virq);
27static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec);
28static long ics_rtas_get_server(struct ics *ics, unsigned long vec);
29static int ics_rtas_host_match(struct ics *ics, struct device_node *node);
30
31/* Only one global & state struct ics */
32static struct ics ics_rtas = {
33 .map = ics_rtas_map,
34 .mask_unknown = ics_rtas_mask_unknown,
35 .get_server = ics_rtas_get_server,
36 .host_match = ics_rtas_host_match,
37};
38
39static void ics_rtas_unmask_irq(struct irq_data *d)
40{
41 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
42 int call_status;
43 int server;
44
45 pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq);
46
47 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
48 return;
49
50 server = xics_get_irq_server(d->irq, d->affinity, 0);
51
52 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server,
53 DEFAULT_PRIORITY);
54 if (call_status != 0) {
55 printk(KERN_ERR
56 "%s: ibm_set_xive irq %u server %x returned %d\n",
57 __func__, hw_irq, server, call_status);
58 return;
59 }
60
61 /* Now unmask the interrupt (often a no-op) */
62 call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq);
63 if (call_status != 0) {
64 printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
65 __func__, hw_irq, call_status);
66 return;
67 }
68}
69
70static unsigned int ics_rtas_startup(struct irq_data *d)
71{
72#ifdef CONFIG_PCI_MSI
73 /*
74 * The generic MSI code returns with the interrupt disabled on the
75 * card, using the MSI mask bits. Firmware doesn't appear to unmask
76 * at that level, so we do it here by hand.
77 */
78 if (d->msi_desc)
79 unmask_msi_irq(d);
80#endif
81 /* unmask it */
82 ics_rtas_unmask_irq(d);
83 return 0;
84}
85
86static void ics_rtas_mask_real_irq(unsigned int hw_irq)
87{
88 int call_status;
89
90 if (hw_irq == XICS_IPI)
91 return;
92
93 call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq);
94 if (call_status != 0) {
95 printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
96 __func__, hw_irq, call_status);
97 return;
98 }
99
100 /* Have to set XIVE to 0xff to be able to remove a slot */
101 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq,
102 xics_default_server, 0xff);
103 if (call_status != 0) {
104 printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
105 __func__, hw_irq, call_status);
106 return;
107 }
108}
109
110static void ics_rtas_mask_irq(struct irq_data *d)
111{
112 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
113
114 pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq);
115
116 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
117 return;
118 ics_rtas_mask_real_irq(hw_irq);
119}
120
121static int ics_rtas_set_affinity(struct irq_data *d,
122 const struct cpumask *cpumask,
123 bool force)
124{
125 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
126 int status;
127 int xics_status[2];
128 int irq_server;
129
130 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
131 return -1;
132
133 status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq);
134
135 if (status) {
136 printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
137 __func__, hw_irq, status);
138 return -1;
139 }
140
141 irq_server = xics_get_irq_server(d->irq, cpumask, 1);
142 if (irq_server == -1) {
143 char cpulist[128];
144 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
145 printk(KERN_WARNING
146 "%s: No online cpus in the mask %s for irq %d\n",
147 __func__, cpulist, d->irq);
148 return -1;
149 }
150
151 status = rtas_call(ibm_set_xive, 3, 1, NULL,
152 hw_irq, irq_server, xics_status[1]);
153
154 if (status) {
155 printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
156 __func__, hw_irq, status);
157 return -1;
158 }
159
160 return IRQ_SET_MASK_OK;
161}
162
163static struct irq_chip ics_rtas_irq_chip = {
164 .name = "XICS",
165 .irq_startup = ics_rtas_startup,
166 .irq_mask = ics_rtas_mask_irq,
167 .irq_unmask = ics_rtas_unmask_irq,
168 .irq_eoi = NULL, /* Patched at init time */
169 .irq_set_affinity = ics_rtas_set_affinity
170};
171
172static int ics_rtas_map(struct ics *ics, unsigned int virq)
173{
174 unsigned int hw_irq = (unsigned int)virq_to_hw(virq);
175 int status[2];
176 int rc;
177
178 if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS))
179 return -EINVAL;
180
181 /* Check if RTAS knows about this interrupt */
182 rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq);
183 if (rc)
184 return -ENXIO;
185
186 irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq);
187 irq_set_chip_data(virq, &ics_rtas);
188
189 return 0;
190}
191
192static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec)
193{
194 ics_rtas_mask_real_irq(vec);
195}
196
197static long ics_rtas_get_server(struct ics *ics, unsigned long vec)
198{
199 int rc, status[2];
200
201 rc = rtas_call(ibm_get_xive, 1, 3, status, vec);
202 if (rc)
203 return -1;
204 return status[0];
205}
206
207static int ics_rtas_host_match(struct ics *ics, struct device_node *node)
208{
209 /* IBM machines have interrupt parents of various funky types for things
210 * like vdevices, events, etc... The trick we use here is to match
211 * everything here except the legacy 8259 which is compatible "chrp,iic"
212 */
213 return !of_device_is_compatible(node, "chrp,iic");
214}
215
216int ics_rtas_init(void)
217{
218 ibm_get_xive = rtas_token("ibm,get-xive");
219 ibm_set_xive = rtas_token("ibm,set-xive");
220 ibm_int_on = rtas_token("ibm,int-on");
221 ibm_int_off = rtas_token("ibm,int-off");
222
223 /* We enable the RTAS "ICS" if RTAS is present with the
224 * appropriate tokens
225 */
226 if (ibm_get_xive == RTAS_UNKNOWN_SERVICE ||
227 ibm_set_xive == RTAS_UNKNOWN_SERVICE)
228 return -ENODEV;
229
230 /* We need to patch our irq chip's EOI to point to the
231 * right ICP
232 */
233 ics_rtas_irq_chip.irq_eoi = icp_ops->eoi;
234
235 /* Register ourselves */
236 xics_register_ics(&ics_rtas);
237
238 return 0;
239}
240
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
new file mode 100644
index 000000000000..445c5a01b766
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -0,0 +1,443 @@
1/*
2 * Copyright 2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 */
10#include <linux/types.h>
11#include <linux/threads.h>
12#include <linux/kernel.h>
13#include <linux/irq.h>
14#include <linux/debugfs.h>
15#include <linux/smp.h>
16#include <linux/interrupt.h>
17#include <linux/seq_file.h>
18#include <linux/init.h>
19#include <linux/cpu.h>
20#include <linux/of.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23
24#include <asm/prom.h>
25#include <asm/io.h>
26#include <asm/smp.h>
27#include <asm/machdep.h>
28#include <asm/irq.h>
29#include <asm/errno.h>
30#include <asm/rtas.h>
31#include <asm/xics.h>
32#include <asm/firmware.h>
33
34/* Globals common to all ICP/ICS implementations */
35const struct icp_ops *icp_ops;
36
37unsigned int xics_default_server = 0xff;
38unsigned int xics_default_distrib_server = 0;
39unsigned int xics_interrupt_server_size = 8;
40
41DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
42
43struct irq_host *xics_host;
44
45static LIST_HEAD(ics_list);
46
47void xics_update_irq_servers(void)
48{
49 int i, j;
50 struct device_node *np;
51 u32 ilen;
52 const u32 *ireg;
53 u32 hcpuid;
54
55 /* Find the server numbers for the boot cpu. */
56 np = of_get_cpu_node(boot_cpuid, NULL);
57 BUG_ON(!np);
58
59 hcpuid = get_hard_smp_processor_id(boot_cpuid);
60 xics_default_server = xics_default_distrib_server = hcpuid;
61
62 pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server);
63
64 ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
65 if (!ireg) {
66 of_node_put(np);
67 return;
68 }
69
70 i = ilen / sizeof(int);
71
72 /* Global interrupt distribution server is specified in the last
73 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
74 * entry fom this property for current boot cpu id and use it as
75 * default distribution server
76 */
77 for (j = 0; j < i; j += 2) {
78 if (ireg[j] == hcpuid) {
79 xics_default_distrib_server = ireg[j+1];
80 break;
81 }
82 }
83 pr_devel("xics: xics_default_distrib_server = 0x%x\n",
84 xics_default_distrib_server);
85 of_node_put(np);
86}
87
88/* GIQ stuff, currently only supported on RTAS setups, will have
89 * to be sorted properly for bare metal
90 */
91void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
92{
93#ifdef CONFIG_PPC_RTAS
94 int index;
95 int status;
96
97 if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
98 return;
99
100 index = (1UL << xics_interrupt_server_size) - 1 - gserver;
101
102 status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
103
104 WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
105 GLOBAL_INTERRUPT_QUEUE, index, join, status);
106#endif
107}
108
109void xics_setup_cpu(void)
110{
111 icp_ops->set_priority(LOWEST_PRIORITY);
112
113 xics_set_cpu_giq(xics_default_distrib_server, 1);
114}
115
116void xics_mask_unknown_vec(unsigned int vec)
117{
118 struct ics *ics;
119
120 pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec);
121
122 list_for_each_entry(ics, &ics_list, link)
123 ics->mask_unknown(ics, vec);
124}
125
126
127#ifdef CONFIG_SMP
128
129static void xics_request_ipi(void)
130{
131 unsigned int ipi;
132
133 ipi = irq_create_mapping(xics_host, XICS_IPI);
134 BUG_ON(ipi == NO_IRQ);
135
136 /*
137 * IPIs are marked IRQF_DISABLED as they must run with irqs
138 * disabled, and PERCPU. The handler was set in map.
139 */
140 BUG_ON(request_irq(ipi, icp_ops->ipi_action,
141 IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL));
142}
143
144int __init xics_smp_probe(void)
145{
146 /* Setup cause_ipi callback based on which ICP is used */
147 smp_ops->cause_ipi = icp_ops->cause_ipi;
148
149 /* Register all the IPIs */
150 xics_request_ipi();
151
152 return cpumask_weight(cpu_possible_mask);
153}
154
155#endif /* CONFIG_SMP */
156
157void xics_teardown_cpu(void)
158{
159 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
160
161 /*
162 * we have to reset the cppr index to 0 because we're
163 * not going to return from the IPI
164 */
165 os_cppr->index = 0;
166 icp_ops->set_priority(0);
167 icp_ops->teardown_cpu();
168}
169
170void xics_kexec_teardown_cpu(int secondary)
171{
172 xics_teardown_cpu();
173
174 icp_ops->flush_ipi();
175
176 /*
177 * Some machines need to have at least one cpu in the GIQ,
178 * so leave the master cpu in the group.
179 */
180 if (secondary)
181 xics_set_cpu_giq(xics_default_distrib_server, 0);
182}
183
184
185#ifdef CONFIG_HOTPLUG_CPU
186
187/* Interrupts are disabled. */
188void xics_migrate_irqs_away(void)
189{
190 int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
191 unsigned int irq, virq;
192
193 /* If we used to be the default server, move to the new "boot_cpuid" */
194 if (hw_cpu == xics_default_server)
195 xics_update_irq_servers();
196
197 /* Reject any interrupt that was queued to us... */
198 icp_ops->set_priority(0);
199
200 /* Remove ourselves from the global interrupt queue */
201 xics_set_cpu_giq(xics_default_distrib_server, 0);
202
203 /* Allow IPIs again... */
204 icp_ops->set_priority(DEFAULT_PRIORITY);
205
206 for_each_irq(virq) {
207 struct irq_desc *desc;
208 struct irq_chip *chip;
209 long server;
210 unsigned long flags;
211 struct ics *ics;
212
213 /* We can't set affinity on ISA interrupts */
214 if (virq < NUM_ISA_INTERRUPTS)
215 continue;
216 if (!virq_is_host(virq, xics_host))
217 continue;
218 irq = (unsigned int)virq_to_hw(virq);
219 /* We need to get IPIs still. */
220 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
221 continue;
222 desc = irq_to_desc(virq);
223 /* We only need to migrate enabled IRQS */
224 if (!desc || !desc->action)
225 continue;
226 chip = irq_desc_get_chip(desc);
227 if (!chip || !chip->irq_set_affinity)
228 continue;
229
230 raw_spin_lock_irqsave(&desc->lock, flags);
231
232 /* Locate interrupt server */
233 server = -1;
234 ics = irq_get_chip_data(virq);
235 if (ics)
236 server = ics->get_server(ics, irq);
237 if (server < 0) {
238 printk(KERN_ERR "%s: Can't find server for irq %d\n",
239 __func__, irq);
240 goto unlock;
241 }
242
243 /* We only support delivery to all cpus or to one cpu.
244 * The irq has to be migrated only in the single cpu
245 * case.
246 */
247 if (server != hw_cpu)
248 goto unlock;
249
250 /* This is expected during cpu offline. */
251 if (cpu_online(cpu))
252 pr_warning("IRQ %u affinity broken off cpu %u\n",
253 virq, cpu);
254
255 /* Reset affinity to all cpus */
256 raw_spin_unlock_irqrestore(&desc->lock, flags);
257 irq_set_affinity(virq, cpu_all_mask);
258 continue;
259unlock:
260 raw_spin_unlock_irqrestore(&desc->lock, flags);
261 }
262}
263#endif /* CONFIG_HOTPLUG_CPU */
264
265#ifdef CONFIG_SMP
266/*
267 * For the moment we only implement delivery to all cpus or one cpu.
268 *
269 * If the requested affinity is cpu_all_mask, we set global affinity.
270 * If not we set it to the first cpu in the mask, even if multiple cpus
271 * are set. This is so things like irqbalance (which set core and package
272 * wide affinities) do the right thing.
273 *
274 * We need to fix this to implement support for the links
275 */
276int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
277 unsigned int strict_check)
278{
279
280 if (!distribute_irqs)
281 return xics_default_server;
282
283 if (!cpumask_subset(cpu_possible_mask, cpumask)) {
284 int server = cpumask_first_and(cpu_online_mask, cpumask);
285
286 if (server < nr_cpu_ids)
287 return get_hard_smp_processor_id(server);
288
289 if (strict_check)
290 return -1;
291 }
292
293 /*
294 * Workaround issue with some versions of JS20 firmware that
295 * deliver interrupts to cpus which haven't been started. This
296 * happens when using the maxcpus= boot option.
297 */
298 if (cpumask_equal(cpu_online_mask, cpu_present_mask))
299 return xics_default_distrib_server;
300
301 return xics_default_server;
302}
303#endif /* CONFIG_SMP */
304
305static int xics_host_match(struct irq_host *h, struct device_node *node)
306{
307 struct ics *ics;
308
309 list_for_each_entry(ics, &ics_list, link)
310 if (ics->host_match(ics, node))
311 return 1;
312
313 return 0;
314}
315
316/* Dummies */
317static void xics_ipi_unmask(struct irq_data *d) { }
318static void xics_ipi_mask(struct irq_data *d) { }
319
320static struct irq_chip xics_ipi_chip = {
321 .name = "XICS",
322 .irq_eoi = NULL, /* Patched at init time */
323 .irq_mask = xics_ipi_mask,
324 .irq_unmask = xics_ipi_unmask,
325};
326
327static int xics_host_map(struct irq_host *h, unsigned int virq,
328 irq_hw_number_t hw)
329{
330 struct ics *ics;
331
332 pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
333
334 /* Insert the interrupt mapping into the radix tree for fast lookup */
335 irq_radix_revmap_insert(xics_host, virq, hw);
336
337 /* They aren't all level sensitive but we just don't really know */
338 irq_set_status_flags(virq, IRQ_LEVEL);
339
340 /* Don't call into ICS for IPIs */
341 if (hw == XICS_IPI) {
342 irq_set_chip_and_handler(virq, &xics_ipi_chip,
343 handle_percpu_irq);
344 return 0;
345 }
346
347 /* Let the ICS setup the chip data */
348 list_for_each_entry(ics, &ics_list, link)
349 if (ics->map(ics, virq) == 0)
350 return 0;
351
352 return -EINVAL;
353}
354
355static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
356 const u32 *intspec, unsigned int intsize,
357 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
358
359{
360 /* Current xics implementation translates everything
361 * to level. It is not technically right for MSIs but this
362 * is irrelevant at this point. We might get smarter in the future
363 */
364 *out_hwirq = intspec[0];
365 *out_flags = IRQ_TYPE_LEVEL_LOW;
366
367 return 0;
368}
369
370static struct irq_host_ops xics_host_ops = {
371 .match = xics_host_match,
372 .map = xics_host_map,
373 .xlate = xics_host_xlate,
374};
375
376static void __init xics_init_host(void)
377{
378 xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
379 XICS_IRQ_SPURIOUS);
380 BUG_ON(xics_host == NULL);
381 irq_set_default_host(xics_host);
382}
383
384void __init xics_register_ics(struct ics *ics)
385{
386 list_add(&ics->link, &ics_list);
387}
388
389static void __init xics_get_server_size(void)
390{
391 struct device_node *np;
392 const u32 *isize;
393
394 /* We fetch the interrupt server size from the first ICS node
395 * we find if any
396 */
397 np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics");
398 if (!np)
399 return;
400 isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
401 if (!isize)
402 return;
403 xics_interrupt_server_size = *isize;
404 of_node_put(np);
405}
406
407void __init xics_init(void)
408{
409 int rc = -1;
410
411 /* Fist locate ICP */
412#ifdef CONFIG_PPC_ICP_HV
413 if (firmware_has_feature(FW_FEATURE_LPAR))
414 rc = icp_hv_init();
415#endif
416#ifdef CONFIG_PPC_ICP_NATIVE
417 if (rc < 0)
418 rc = icp_native_init();
419#endif
420 if (rc < 0) {
421 pr_warning("XICS: Cannot find a Presentation Controller !\n");
422 return;
423 }
424
425 /* Copy get_irq callback over to ppc_md */
426 ppc_md.get_irq = icp_ops->get_irq;
427
428 /* Patch up IPI chip EOI */
429 xics_ipi_chip.irq_eoi = icp_ops->eoi;
430
431 /* Now locate ICS */
432#ifdef CONFIG_PPC_ICS_RTAS
433 rc = ics_rtas_init();
434#endif
435 if (rc < 0)
436 pr_warning("XICS: Cannot find a Source Controller !\n");
437
438 /* Initialize common bits */
439 xics_get_server_size();
440 xics_update_irq_servers();
441 xics_init_host();
442 xics_setup_cpu();
443}
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 0a13fc19e287..6183799754af 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -71,7 +71,7 @@ static unsigned char xilinx_intc_map_senses[] = {
71 */ 71 */
72static void xilinx_intc_mask(struct irq_data *d) 72static void xilinx_intc_mask(struct irq_data *d)
73{ 73{
74 int irq = virq_to_hw(d->irq); 74 int irq = irqd_to_hwirq(d);
75 void * regs = irq_data_get_irq_chip_data(d); 75 void * regs = irq_data_get_irq_chip_data(d);
76 pr_debug("mask: %d\n", irq); 76 pr_debug("mask: %d\n", irq);
77 out_be32(regs + XINTC_CIE, 1 << irq); 77 out_be32(regs + XINTC_CIE, 1 << irq);
@@ -87,7 +87,7 @@ static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type)
87 */ 87 */
88static void xilinx_intc_level_unmask(struct irq_data *d) 88static void xilinx_intc_level_unmask(struct irq_data *d)
89{ 89{
90 int irq = virq_to_hw(d->irq); 90 int irq = irqd_to_hwirq(d);
91 void * regs = irq_data_get_irq_chip_data(d); 91 void * regs = irq_data_get_irq_chip_data(d);
92 pr_debug("unmask: %d\n", irq); 92 pr_debug("unmask: %d\n", irq);
93 out_be32(regs + XINTC_SIE, 1 << irq); 93 out_be32(regs + XINTC_SIE, 1 << irq);
@@ -112,7 +112,7 @@ static struct irq_chip xilinx_intc_level_irqchip = {
112 */ 112 */
113static void xilinx_intc_edge_unmask(struct irq_data *d) 113static void xilinx_intc_edge_unmask(struct irq_data *d)
114{ 114{
115 int irq = virq_to_hw(d->irq); 115 int irq = irqd_to_hwirq(d);
116 void *regs = irq_data_get_irq_chip_data(d); 116 void *regs = irq_data_get_irq_chip_data(d);
117 pr_debug("unmask: %d\n", irq); 117 pr_debug("unmask: %d\n", irq);
118 out_be32(regs + XINTC_SIE, 1 << irq); 118 out_be32(regs + XINTC_SIE, 1 << irq);
@@ -120,7 +120,7 @@ static void xilinx_intc_edge_unmask(struct irq_data *d)
120 120
121static void xilinx_intc_edge_ack(struct irq_data *d) 121static void xilinx_intc_edge_ack(struct irq_data *d)
122{ 122{
123 int irq = virq_to_hw(d->irq); 123 int irq = irqd_to_hwirq(d);
124 void * regs = irq_data_get_irq_chip_data(d); 124 void * regs = irq_data_get_irq_chip_data(d);
125 pr_debug("ack: %d\n", irq); 125 pr_debug("ack: %d\n", irq);
126 out_be32(regs + XINTC_IAR, 1 << irq); 126 out_be32(regs + XINTC_IAR, 1 << irq);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 33794c1d92c3..42541bbcc7fa 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -334,7 +334,7 @@ static void release_output_lock(void)
334 334
335int cpus_are_in_xmon(void) 335int cpus_are_in_xmon(void)
336{ 336{
337 return !cpus_empty(cpus_in_xmon); 337 return !cpumask_empty(&cpus_in_xmon);
338} 338}
339#endif 339#endif
340 340
@@ -373,7 +373,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
373 373
374#ifdef CONFIG_SMP 374#ifdef CONFIG_SMP
375 cpu = smp_processor_id(); 375 cpu = smp_processor_id();
376 if (cpu_isset(cpu, cpus_in_xmon)) { 376 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
377 get_output_lock(); 377 get_output_lock();
378 excprint(regs); 378 excprint(regs);
379 printf("cpu 0x%x: Exception %lx %s in xmon, " 379 printf("cpu 0x%x: Exception %lx %s in xmon, "
@@ -396,10 +396,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
396 } 396 }
397 397
398 xmon_fault_jmp[cpu] = recurse_jmp; 398 xmon_fault_jmp[cpu] = recurse_jmp;
399 cpu_set(cpu, cpus_in_xmon); 399 cpumask_set_cpu(cpu, &cpus_in_xmon);
400 400
401 bp = NULL; 401 bp = NULL;
402 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) 402 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
403 bp = at_breakpoint(regs->nip); 403 bp = at_breakpoint(regs->nip);
404 if (bp || unrecoverable_excp(regs)) 404 if (bp || unrecoverable_excp(regs))
405 fromipi = 0; 405 fromipi = 0;
@@ -437,10 +437,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
437 xmon_owner = cpu; 437 xmon_owner = cpu;
438 mb(); 438 mb();
439 if (ncpus > 1) { 439 if (ncpus > 1) {
440 smp_send_debugger_break(MSG_ALL_BUT_SELF); 440 smp_send_debugger_break();
441 /* wait for other cpus to come in */ 441 /* wait for other cpus to come in */
442 for (timeout = 100000000; timeout != 0; --timeout) { 442 for (timeout = 100000000; timeout != 0; --timeout) {
443 if (cpus_weight(cpus_in_xmon) >= ncpus) 443 if (cpumask_weight(&cpus_in_xmon) >= ncpus)
444 break; 444 break;
445 barrier(); 445 barrier();
446 } 446 }
@@ -484,7 +484,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
484 } 484 }
485 } 485 }
486 leave: 486 leave:
487 cpu_clear(cpu, cpus_in_xmon); 487 cpumask_clear_cpu(cpu, &cpus_in_xmon);
488 xmon_fault_jmp[cpu] = NULL; 488 xmon_fault_jmp[cpu] = NULL;
489#else 489#else
490 /* UP is simple... */ 490 /* UP is simple... */
@@ -529,7 +529,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
529 } 529 }
530 } 530 }
531#else 531#else
532 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) { 532 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
533 bp = at_breakpoint(regs->nip); 533 bp = at_breakpoint(regs->nip);
534 if (bp != NULL) { 534 if (bp != NULL) {
535 int stepped = emulate_step(regs, bp->instr[0]); 535 int stepped = emulate_step(regs, bp->instr[0]);
@@ -578,7 +578,7 @@ static int xmon_bpt(struct pt_regs *regs)
578 struct bpt *bp; 578 struct bpt *bp;
579 unsigned long offset; 579 unsigned long offset;
580 580
581 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) 581 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
582 return 0; 582 return 0;
583 583
584 /* Are we at the trap at bp->instr[1] for some bp? */ 584 /* Are we at the trap at bp->instr[1] for some bp? */
@@ -609,7 +609,7 @@ static int xmon_sstep(struct pt_regs *regs)
609 609
610static int xmon_dabr_match(struct pt_regs *regs) 610static int xmon_dabr_match(struct pt_regs *regs)
611{ 611{
612 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) 612 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
613 return 0; 613 return 0;
614 if (dabr.enabled == 0) 614 if (dabr.enabled == 0)
615 return 0; 615 return 0;
@@ -619,7 +619,7 @@ static int xmon_dabr_match(struct pt_regs *regs)
619 619
620static int xmon_iabr_match(struct pt_regs *regs) 620static int xmon_iabr_match(struct pt_regs *regs)
621{ 621{
622 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) 622 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
623 return 0; 623 return 0;
624 if (iabr == NULL) 624 if (iabr == NULL)
625 return 0; 625 return 0;
@@ -630,7 +630,7 @@ static int xmon_iabr_match(struct pt_regs *regs)
630static int xmon_ipi(struct pt_regs *regs) 630static int xmon_ipi(struct pt_regs *regs)
631{ 631{
632#ifdef CONFIG_SMP 632#ifdef CONFIG_SMP
633 if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon)) 633 if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon))
634 xmon_core(regs, 1); 634 xmon_core(regs, 1);
635#endif 635#endif
636 return 0; 636 return 0;
@@ -644,7 +644,7 @@ static int xmon_fault_handler(struct pt_regs *regs)
644 if (in_xmon && catch_memory_errors) 644 if (in_xmon && catch_memory_errors)
645 handle_fault(regs); /* doesn't return */ 645 handle_fault(regs); /* doesn't return */
646 646
647 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) { 647 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
648 bp = in_breakpoint_table(regs->nip, &offset); 648 bp = in_breakpoint_table(regs->nip, &offset);
649 if (bp != NULL) { 649 if (bp != NULL) {
650 regs->nip = bp->address + offset; 650 regs->nip = bp->address + offset;
@@ -929,7 +929,7 @@ static int do_step(struct pt_regs *regs)
929 int stepped; 929 int stepped;
930 930
931 /* check we are in 64-bit kernel mode, translation enabled */ 931 /* check we are in 64-bit kernel mode, translation enabled */
932 if ((regs->msr & (MSR_SF|MSR_PR|MSR_IR)) == (MSR_SF|MSR_IR)) { 932 if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) {
933 if (mread(regs->nip, &instr, 4) == 4) { 933 if (mread(regs->nip, &instr, 4) == 4) {
934 stepped = emulate_step(regs, instr); 934 stepped = emulate_step(regs, instr);
935 if (stepped < 0) { 935 if (stepped < 0) {
@@ -976,7 +976,7 @@ static int cpu_cmd(void)
976 printf("cpus stopped:"); 976 printf("cpus stopped:");
977 count = 0; 977 count = 0;
978 for (cpu = 0; cpu < NR_CPUS; ++cpu) { 978 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
979 if (cpu_isset(cpu, cpus_in_xmon)) { 979 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
980 if (count == 0) 980 if (count == 0)
981 printf(" %x", cpu); 981 printf(" %x", cpu);
982 ++count; 982 ++count;
@@ -992,7 +992,7 @@ static int cpu_cmd(void)
992 return 0; 992 return 0;
993 } 993 }
994 /* try to switch to cpu specified */ 994 /* try to switch to cpu specified */
995 if (!cpu_isset(cpu, cpus_in_xmon)) { 995 if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) {
996 printf("cpu 0x%x isn't in xmon\n", cpu); 996 printf("cpu 0x%x isn't in xmon\n", cpu);
997 return 0; 997 return 0;
998 } 998 }
@@ -1497,6 +1497,10 @@ static void prregs(struct pt_regs *fp)
1497#endif 1497#endif
1498 printf("pc = "); 1498 printf("pc = ");
1499 xmon_print_symbol(fp->nip, " ", "\n"); 1499 xmon_print_symbol(fp->nip, " ", "\n");
1500 if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) {
1501 printf("cfar= ");
1502 xmon_print_symbol(fp->orig_gpr3, " ", "\n");
1503 }
1500 printf("lr = "); 1504 printf("lr = ");
1501 xmon_print_symbol(fp->link, " ", "\n"); 1505 xmon_print_symbol(fp->link, " ", "\n");
1502 printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr); 1506 printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr);
@@ -2663,7 +2667,7 @@ static void dump_stab(void)
2663 2667
2664void dump_segments(void) 2668void dump_segments(void)
2665{ 2669{
2666 if (cpu_has_feature(CPU_FTR_SLB)) 2670 if (mmu_has_feature(MMU_FTR_SLB))
2667 dump_slb(); 2671 dump_slb();
2668 else 2672 else
2669 dump_stab(); 2673 dump_stab();
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index a4a6c2f044b5..cf39bc08ce08 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -295,7 +295,7 @@ static int bsr_create_devs(struct device_node *bn)
295static int __init bsr_init(void) 295static int __init bsr_init(void)
296{ 296{
297 struct device_node *np; 297 struct device_node *np;
298 dev_t bsr_dev = MKDEV(bsr_major, 0); 298 dev_t bsr_dev;
299 int ret = -ENODEV; 299 int ret = -ENODEV;
300 int result; 300 int result;
301 301
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 24af12fc8228..c0221eec8817 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -269,11 +269,8 @@ struct t4_swsqe {
269 269
270static inline pgprot_t t4_pgprot_wc(pgprot_t prot) 270static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
271{ 271{
272#if defined(__i386__) || defined(__x86_64__) 272#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
273 return pgprot_writecombine(prot); 273 return pgprot_writecombine(prot);
274#elif defined(CONFIG_PPC64)
275 return __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE) &
276 ~(pgprot_t)_PAGE_GUARDED);
277#else 274#else
278 return pgprot_noncached(prot); 275 return pgprot_noncached(prot);
279#endif 276#endif
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 8b021eb0d48c..6cccd60c594e 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -40,7 +40,7 @@
40#include <linux/init.h> 40#include <linux/init.h>
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/sysdev.h> 43#include <linux/syscore_ops.h>
44#include <linux/freezer.h> 44#include <linux/freezer.h>
45#include <linux/syscalls.h> 45#include <linux/syscalls.h>
46#include <linux/suspend.h> 46#include <linux/suspend.h>
@@ -2527,12 +2527,9 @@ void pmu_blink(int n)
2527#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 2527#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
2528int pmu_sys_suspended; 2528int pmu_sys_suspended;
2529 2529
2530static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state) 2530static int pmu_syscore_suspend(void)
2531{ 2531{
2532 if (state.event != PM_EVENT_SUSPEND || pmu_sys_suspended) 2532 /* Suspend PMU event interrupts */
2533 return 0;
2534
2535 /* Suspend PMU event interrupts */\
2536 pmu_suspend(); 2533 pmu_suspend();
2537 pmu_sys_suspended = 1; 2534 pmu_sys_suspended = 1;
2538 2535
@@ -2544,12 +2541,12 @@ static int pmu_sys_suspend(struct sys_device *sysdev, pm_message_t state)
2544 return 0; 2541 return 0;
2545} 2542}
2546 2543
2547static int pmu_sys_resume(struct sys_device *sysdev) 2544static void pmu_syscore_resume(void)
2548{ 2545{
2549 struct adb_request req; 2546 struct adb_request req;
2550 2547
2551 if (!pmu_sys_suspended) 2548 if (!pmu_sys_suspended)
2552 return 0; 2549 return;
2553 2550
2554 /* Tell PMU we are ready */ 2551 /* Tell PMU we are ready */
2555 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 2552 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
@@ -2562,50 +2559,21 @@ static int pmu_sys_resume(struct sys_device *sysdev)
2562 /* Resume PMU event interrupts */ 2559 /* Resume PMU event interrupts */
2563 pmu_resume(); 2560 pmu_resume();
2564 pmu_sys_suspended = 0; 2561 pmu_sys_suspended = 0;
2565
2566 return 0;
2567} 2562}
2568 2563
2569#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2564static struct syscore_ops pmu_syscore_ops = {
2570 2565 .suspend = pmu_syscore_suspend,
2571static struct sysdev_class pmu_sysclass = { 2566 .resume = pmu_syscore_resume,
2572 .name = "pmu",
2573};
2574
2575static struct sys_device device_pmu = {
2576 .cls = &pmu_sysclass,
2577};
2578
2579static struct sysdev_driver driver_pmu = {
2580#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
2581 .suspend = &pmu_sys_suspend,
2582 .resume = &pmu_sys_resume,
2583#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
2584}; 2567};
2585 2568
2586static int __init init_pmu_sysfs(void) 2569static int pmu_syscore_register(void)
2587{ 2570{
2588 int rc; 2571 register_syscore_ops(&pmu_syscore_ops);
2589 2572
2590 rc = sysdev_class_register(&pmu_sysclass);
2591 if (rc) {
2592 printk(KERN_ERR "Failed registering PMU sys class\n");
2593 return -ENODEV;
2594 }
2595 rc = sysdev_register(&device_pmu);
2596 if (rc) {
2597 printk(KERN_ERR "Failed registering PMU sys device\n");
2598 return -ENODEV;
2599 }
2600 rc = sysdev_driver_register(&pmu_sysclass, &driver_pmu);
2601 if (rc) {
2602 printk(KERN_ERR "Failed registering PMU sys driver\n");
2603 return -ENODEV;
2604 }
2605 return 0; 2573 return 0;
2606} 2574}
2607 2575subsys_initcall(pmu_syscore_register);
2608subsys_initcall(init_pmu_sysfs); 2576#endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
2609 2577
2610EXPORT_SYMBOL(pmu_request); 2578EXPORT_SYMBOL(pmu_request);
2611EXPORT_SYMBOL(pmu_queue_request); 2579EXPORT_SYMBOL(pmu_queue_request);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 4e007c6a4b44..d80dcdee88f3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -481,5 +481,6 @@ source "drivers/misc/cb710/Kconfig"
481source "drivers/misc/iwmc3200top/Kconfig" 481source "drivers/misc/iwmc3200top/Kconfig"
482source "drivers/misc/ti-st/Kconfig" 482source "drivers/misc/ti-st/Kconfig"
483source "drivers/misc/lis3lv02d/Kconfig" 483source "drivers/misc/lis3lv02d/Kconfig"
484source "drivers/misc/carma/Kconfig"
484 485
485endif # MISC_DEVICES 486endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f5468602961f..848e8464faab 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_PCH_PHUB) += pch_phub.o
44obj-y += ti-st/ 44obj-y += ti-st/
45obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o 45obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
46obj-y += lis3lv02d/ 46obj-y += lis3lv02d/
47obj-y += carma/
diff --git a/drivers/misc/carma/Kconfig b/drivers/misc/carma/Kconfig
new file mode 100644
index 000000000000..c90370ed712b
--- /dev/null
+++ b/drivers/misc/carma/Kconfig
@@ -0,0 +1,17 @@
1config CARMA_FPGA
2 tristate "CARMA DATA-FPGA Access Driver"
3 depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA
4 select VIDEOBUF_DMA_SG
5 default n
6 help
7 Say Y here to include support for communicating with the data
8 processing FPGAs on the OVRO CARMA board.
9
10config CARMA_FPGA_PROGRAM
11 tristate "CARMA DATA-FPGA Programmer"
12 depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA
13 select VIDEOBUF_DMA_SG
14 default n
15 help
16 Say Y here to include support for programming the data processing
17 FPGAs on the OVRO CARMA board.
diff --git a/drivers/misc/carma/Makefile b/drivers/misc/carma/Makefile
new file mode 100644
index 000000000000..ff36ac2ce534
--- /dev/null
+++ b/drivers/misc/carma/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_CARMA_FPGA) += carma-fpga.o
2obj-$(CONFIG_CARMA_FPGA_PROGRAM) += carma-fpga-program.o
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
new file mode 100644
index 000000000000..7ce6065dc20e
--- /dev/null
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -0,0 +1,1141 @@
1/*
2 * CARMA Board DATA-FPGA Programmer
3 *
4 * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/dma-mapping.h>
13#include <linux/of_platform.h>
14#include <linux/completion.h>
15#include <linux/miscdevice.h>
16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/highmem.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/leds.h>
25#include <linux/slab.h>
26#include <linux/kref.h>
27#include <linux/fs.h>
28#include <linux/io.h>
29
30#include <media/videobuf-dma-sg.h>
31
32/* MPC8349EMDS specific get_immrbase() */
33#include <sysdev/fsl_soc.h>
34
35static const char drv_name[] = "carma-fpga-program";
36
37/*
38 * Firmware images are always this exact size
39 *
40 * 12849552 bytes for a CARMA Digitizer Board (EP2S90 FPGAs)
41 * 18662880 bytes for a CARMA Correlator Board (EP2S130 FPGAs)
42 */
43#define FW_SIZE_EP2S90 12849552
44#define FW_SIZE_EP2S130 18662880
45
46struct fpga_dev {
47 struct miscdevice miscdev;
48
49 /* Reference count */
50 struct kref ref;
51
52 /* Device Registers */
53 struct device *dev;
54 void __iomem *regs;
55 void __iomem *immr;
56
57 /* Freescale DMA Device */
58 struct dma_chan *chan;
59
60 /* Interrupts */
61 int irq, status;
62 struct completion completion;
63
64 /* FPGA Bitfile */
65 struct mutex lock;
66
67 struct videobuf_dmabuf vb;
68 bool vb_allocated;
69
70 /* max size and written bytes */
71 size_t fw_size;
72 size_t bytes;
73};
74
75/*
76 * FPGA Bitfile Helpers
77 */
78
79/**
80 * fpga_drop_firmware_data() - drop the bitfile image from memory
81 * @priv: the driver's private data structure
82 *
83 * LOCKING: must hold priv->lock
84 */
85static void fpga_drop_firmware_data(struct fpga_dev *priv)
86{
87 videobuf_dma_free(&priv->vb);
88 priv->vb_allocated = false;
89 priv->bytes = 0;
90}
91
92/*
93 * Private Data Reference Count
94 */
95
96static void fpga_dev_remove(struct kref *ref)
97{
98 struct fpga_dev *priv = container_of(ref, struct fpga_dev, ref);
99
100 /* free any firmware image that was not programmed */
101 fpga_drop_firmware_data(priv);
102
103 mutex_destroy(&priv->lock);
104 kfree(priv);
105}
106
107/*
108 * LED Trigger (could be a seperate module)
109 */
110
111/*
112 * NOTE: this whole thing does have the problem that whenever the led's are
113 * NOTE: first set to use the fpga trigger, they could be in the wrong state
114 */
115
116DEFINE_LED_TRIGGER(ledtrig_fpga);
117
118static void ledtrig_fpga_programmed(bool enabled)
119{
120 if (enabled)
121 led_trigger_event(ledtrig_fpga, LED_FULL);
122 else
123 led_trigger_event(ledtrig_fpga, LED_OFF);
124}
125
126/*
127 * FPGA Register Helpers
128 */
129
130/* Register Definitions */
131#define FPGA_CONFIG_CONTROL 0x40
132#define FPGA_CONFIG_STATUS 0x44
133#define FPGA_CONFIG_FIFO_SIZE 0x48
134#define FPGA_CONFIG_FIFO_USED 0x4C
135#define FPGA_CONFIG_TOTAL_BYTE_COUNT 0x50
136#define FPGA_CONFIG_CUR_BYTE_COUNT 0x54
137
138#define FPGA_FIFO_ADDRESS 0x3000
139
140static int fpga_fifo_size(void __iomem *regs)
141{
142 return ioread32be(regs + FPGA_CONFIG_FIFO_SIZE);
143}
144
145#define CFG_STATUS_ERR_MASK 0xfffe
146
147static int fpga_config_error(void __iomem *regs)
148{
149 return ioread32be(regs + FPGA_CONFIG_STATUS) & CFG_STATUS_ERR_MASK;
150}
151
152static int fpga_fifo_empty(void __iomem *regs)
153{
154 return ioread32be(regs + FPGA_CONFIG_FIFO_USED) == 0;
155}
156
157static void fpga_fifo_write(void __iomem *regs, u32 val)
158{
159 iowrite32be(val, regs + FPGA_FIFO_ADDRESS);
160}
161
162static void fpga_set_byte_count(void __iomem *regs, u32 count)
163{
164 iowrite32be(count, regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
165}
166
167#define CFG_CTL_ENABLE (1 << 0)
168#define CFG_CTL_RESET (1 << 1)
169#define CFG_CTL_DMA (1 << 2)
170
171static void fpga_programmer_enable(struct fpga_dev *priv, bool dma)
172{
173 u32 val;
174
175 val = (dma) ? (CFG_CTL_ENABLE | CFG_CTL_DMA) : CFG_CTL_ENABLE;
176 iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
177}
178
179static void fpga_programmer_disable(struct fpga_dev *priv)
180{
181 iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
182}
183
184static void fpga_dump_registers(struct fpga_dev *priv)
185{
186 u32 control, status, size, used, total, curr;
187
188 /* good status: do nothing */
189 if (priv->status == 0)
190 return;
191
192 /* Dump all status registers */
193 control = ioread32be(priv->regs + FPGA_CONFIG_CONTROL);
194 status = ioread32be(priv->regs + FPGA_CONFIG_STATUS);
195 size = ioread32be(priv->regs + FPGA_CONFIG_FIFO_SIZE);
196 used = ioread32be(priv->regs + FPGA_CONFIG_FIFO_USED);
197 total = ioread32be(priv->regs + FPGA_CONFIG_TOTAL_BYTE_COUNT);
198 curr = ioread32be(priv->regs + FPGA_CONFIG_CUR_BYTE_COUNT);
199
200 dev_err(priv->dev, "Configuration failed, dumping status registers\n");
201 dev_err(priv->dev, "Control: 0x%.8x\n", control);
202 dev_err(priv->dev, "Status: 0x%.8x\n", status);
203 dev_err(priv->dev, "FIFO Size: 0x%.8x\n", size);
204 dev_err(priv->dev, "FIFO Used: 0x%.8x\n", used);
205 dev_err(priv->dev, "FIFO Total: 0x%.8x\n", total);
206 dev_err(priv->dev, "FIFO Curr: 0x%.8x\n", curr);
207}
208
209/*
210 * FPGA Power Supply Code
211 */
212
213#define CTL_PWR_CONTROL 0x2006
214#define CTL_PWR_STATUS 0x200A
215#define CTL_PWR_FAIL 0x200B
216
217#define PWR_CONTROL_ENABLE 0x01
218
219#define PWR_STATUS_ERROR_MASK 0x10
220#define PWR_STATUS_GOOD 0x0f
221
222/*
223 * Determine if the FPGA power is good for all supplies
224 */
225static bool fpga_power_good(struct fpga_dev *priv)
226{
227 u8 val;
228
229 val = ioread8(priv->regs + CTL_PWR_STATUS);
230 if (val & PWR_STATUS_ERROR_MASK)
231 return false;
232
233 return val == PWR_STATUS_GOOD;
234}
235
236/*
237 * Disable the FPGA power supplies
238 */
239static void fpga_disable_power_supplies(struct fpga_dev *priv)
240{
241 unsigned long start;
242 u8 val;
243
244 iowrite8(0x0, priv->regs + CTL_PWR_CONTROL);
245
246 /*
247 * Wait 500ms for the power rails to discharge
248 *
249 * Without this delay, the CTL-CPLD state machine can get into a
250 * state where it is waiting for the power-goods to assert, but they
251 * never do. This only happens when enabling and disabling the
252 * power sequencer very rapidly.
253 *
254 * The loop below will also wait for the power goods to de-assert,
255 * but testing has shown that they are always disabled by the time
256 * the sleep completes. However, omitting the sleep and only waiting
257 * for the power-goods to de-assert was not sufficient to ensure
258 * that the power sequencer would not wedge itself.
259 */
260 msleep(500);
261
262 start = jiffies;
263 while (time_before(jiffies, start + HZ)) {
264 val = ioread8(priv->regs + CTL_PWR_STATUS);
265 if (!(val & PWR_STATUS_GOOD))
266 break;
267
268 usleep_range(5000, 10000);
269 }
270
271 val = ioread8(priv->regs + CTL_PWR_STATUS);
272 if (val & PWR_STATUS_GOOD) {
273 dev_err(priv->dev, "power disable failed: "
274 "power goods: status 0x%.2x\n", val);
275 }
276
277 if (val & PWR_STATUS_ERROR_MASK) {
278 dev_err(priv->dev, "power disable failed: "
279 "alarm bit set: status 0x%.2x\n", val);
280 }
281}
282
283/**
284 * fpga_enable_power_supplies() - enable the DATA-FPGA power supplies
285 * @priv: the driver's private data structure
286 *
287 * Enable the DATA-FPGA power supplies, waiting up to 1 second for
288 * them to enable successfully.
289 *
290 * Returns 0 on success, -ERRNO otherwise
291 */
292static int fpga_enable_power_supplies(struct fpga_dev *priv)
293{
294 unsigned long start = jiffies;
295
296 if (fpga_power_good(priv)) {
297 dev_dbg(priv->dev, "power was already good\n");
298 return 0;
299 }
300
301 iowrite8(PWR_CONTROL_ENABLE, priv->regs + CTL_PWR_CONTROL);
302 while (time_before(jiffies, start + HZ)) {
303 if (fpga_power_good(priv))
304 return 0;
305
306 usleep_range(5000, 10000);
307 }
308
309 return fpga_power_good(priv) ? 0 : -ETIMEDOUT;
310}
311
312/*
313 * Determine if the FPGA power supplies are all enabled
314 */
315static bool fpga_power_enabled(struct fpga_dev *priv)
316{
317 u8 val;
318
319 val = ioread8(priv->regs + CTL_PWR_CONTROL);
320 if (val & PWR_CONTROL_ENABLE)
321 return true;
322
323 return false;
324}
325
326/*
327 * Determine if the FPGA's are programmed and running correctly
328 */
329static bool fpga_running(struct fpga_dev *priv)
330{
331 if (!fpga_power_good(priv))
332 return false;
333
334 /* Check the config done bit */
335 return ioread32be(priv->regs + FPGA_CONFIG_STATUS) & (1 << 18);
336}
337
338/*
339 * FPGA Programming Code
340 */
341
342/**
343 * fpga_program_block() - put a block of data into the programmer's FIFO
344 * @priv: the driver's private data structure
345 * @buf: the data to program
346 * @count: the length of data to program (must be a multiple of 4 bytes)
347 *
348 * Returns 0 on success, -ERRNO otherwise
349 */
350static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count)
351{
352 u32 *data = buf;
353 int size = fpga_fifo_size(priv->regs);
354 int i, len;
355 unsigned long timeout;
356
357 /* enforce correct data length for the FIFO */
358 BUG_ON(count % 4 != 0);
359
360 while (count > 0) {
361
362 /* Get the size of the block to write (maximum is FIFO_SIZE) */
363 len = min_t(size_t, count, size);
364 timeout = jiffies + HZ / 4;
365
366 /* Write the block */
367 for (i = 0; i < len / 4; i++)
368 fpga_fifo_write(priv->regs, data[i]);
369
370 /* Update the amounts left */
371 count -= len;
372 data += len / 4;
373
374 /* Wait for the fifo to empty */
375 while (true) {
376
377 if (fpga_fifo_empty(priv->regs)) {
378 break;
379 } else {
380 dev_dbg(priv->dev, "Fifo not empty\n");
381 cpu_relax();
382 }
383
384 if (fpga_config_error(priv->regs)) {
385 dev_err(priv->dev, "Error detected\n");
386 return -EIO;
387 }
388
389 if (time_after(jiffies, timeout)) {
390 dev_err(priv->dev, "Fifo drain timeout\n");
391 return -ETIMEDOUT;
392 }
393
394 usleep_range(5000, 10000);
395 }
396 }
397
398 return 0;
399}
400
401/**
402 * fpga_program_cpu() - program the DATA-FPGA's using the CPU
403 * @priv: the driver's private data structure
404 *
405 * This is useful when the DMA programming method fails. It is possible to
406 * wedge the Freescale DMA controller such that the DMA programming method
407 * always fails. This method has always succeeded.
408 *
409 * Returns 0 on success, -ERRNO otherwise
410 */
411static noinline int fpga_program_cpu(struct fpga_dev *priv)
412{
413 int ret;
414
415 /* Disable the programmer */
416 fpga_programmer_disable(priv);
417
418 /* Set the total byte count */
419 fpga_set_byte_count(priv->regs, priv->bytes);
420 dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
421
422 /* Enable the controller for programming */
423 fpga_programmer_enable(priv, false);
424 dev_dbg(priv->dev, "enabled the controller\n");
425
426 /* Write each chunk of the FPGA bitfile to FPGA programmer */
427 ret = fpga_program_block(priv, priv->vb.vaddr, priv->bytes);
428 if (ret)
429 goto out_disable_controller;
430
431 /* Wait for the interrupt handler to signal that programming finished */
432 ret = wait_for_completion_timeout(&priv->completion, 2 * HZ);
433 if (!ret) {
434 dev_err(priv->dev, "Timed out waiting for completion\n");
435 ret = -ETIMEDOUT;
436 goto out_disable_controller;
437 }
438
439 /* Retrieve the status from the interrupt handler */
440 ret = priv->status;
441
442out_disable_controller:
443 fpga_programmer_disable(priv);
444 return ret;
445}
446
447#define FIFO_DMA_ADDRESS 0xf0003000
448#define FIFO_MAX_LEN 4096
449
450/**
451 * fpga_program_dma() - program the DATA-FPGA's using the DMA engine
452 * @priv: the driver's private data structure
453 *
454 * Program the DATA-FPGA's using the Freescale DMA engine. This requires that
455 * the engine is programmed such that the hardware DMA request lines can
456 * control the entire DMA transaction. The system controller FPGA then
457 * completely offloads the programming from the CPU.
458 *
459 * Returns 0 on success, -ERRNO otherwise
460 */
461static noinline int fpga_program_dma(struct fpga_dev *priv)
462{
463 struct videobuf_dmabuf *vb = &priv->vb;
464 struct dma_chan *chan = priv->chan;
465 struct dma_async_tx_descriptor *tx;
466 size_t num_pages, len, avail = 0;
467 struct dma_slave_config config;
468 struct scatterlist *sg;
469 struct sg_table table;
470 dma_cookie_t cookie;
471 int ret, i;
472
473 /* Disable the programmer */
474 fpga_programmer_disable(priv);
475
476 /* Allocate a scatterlist for the DMA destination */
477 num_pages = DIV_ROUND_UP(priv->bytes, FIFO_MAX_LEN);
478 ret = sg_alloc_table(&table, num_pages, GFP_KERNEL);
479 if (ret) {
480 dev_err(priv->dev, "Unable to allocate dst scatterlist\n");
481 ret = -ENOMEM;
482 goto out_return;
483 }
484
485 /*
486 * This is an ugly hack
487 *
488 * We fill in a scatterlist as if it were mapped for DMA. This is
489 * necessary because there exists no better structure for this
490 * inside the kernel code.
491 *
492 * As an added bonus, we can use the DMAEngine API for all of this,
493 * rather than inventing another extremely similar API.
494 */
495 avail = priv->bytes;
496 for_each_sg(table.sgl, sg, num_pages, i) {
497 len = min_t(size_t, avail, FIFO_MAX_LEN);
498 sg_dma_address(sg) = FIFO_DMA_ADDRESS;
499 sg_dma_len(sg) = len;
500
501 avail -= len;
502 }
503
504 /* Map the buffer for DMA */
505 ret = videobuf_dma_map(priv->dev, &priv->vb);
506 if (ret) {
507 dev_err(priv->dev, "Unable to map buffer for DMA\n");
508 goto out_free_table;
509 }
510
511 /*
512 * Configure the DMA channel to transfer FIFO_SIZE / 2 bytes per
513 * transaction, and then put it under external control
514 */
515 memset(&config, 0, sizeof(config));
516 config.direction = DMA_TO_DEVICE;
517 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
518 config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
519 ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
520 (unsigned long)&config);
521 if (ret) {
522 dev_err(priv->dev, "DMA slave configuration failed\n");
523 goto out_dma_unmap;
524 }
525
526 ret = chan->device->device_control(chan, FSLDMA_EXTERNAL_START, 1);
527 if (ret) {
528 dev_err(priv->dev, "DMA external control setup failed\n");
529 goto out_dma_unmap;
530 }
531
532 /* setup and submit the DMA transaction */
533 tx = chan->device->device_prep_dma_sg(chan,
534 table.sgl, num_pages,
535 vb->sglist, vb->sglen, 0);
536 if (!tx) {
537 dev_err(priv->dev, "Unable to prep DMA transaction\n");
538 ret = -ENOMEM;
539 goto out_dma_unmap;
540 }
541
542 cookie = tx->tx_submit(tx);
543 if (dma_submit_error(cookie)) {
544 dev_err(priv->dev, "Unable to submit DMA transaction\n");
545 ret = -ENOMEM;
546 goto out_dma_unmap;
547 }
548
549 dma_async_memcpy_issue_pending(chan);
550
551 /* Set the total byte count */
552 fpga_set_byte_count(priv->regs, priv->bytes);
553 dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes);
554
555 /* Enable the controller for DMA programming */
556 fpga_programmer_enable(priv, true);
557 dev_dbg(priv->dev, "enabled the controller\n");
558
559 /* Wait for the interrupt handler to signal that programming finished */
560 ret = wait_for_completion_timeout(&priv->completion, 2 * HZ);
561 if (!ret) {
562 dev_err(priv->dev, "Timed out waiting for completion\n");
563 ret = -ETIMEDOUT;
564 goto out_disable_controller;
565 }
566
567 /* Retrieve the status from the interrupt handler */
568 ret = priv->status;
569
570out_disable_controller:
571 fpga_programmer_disable(priv);
572out_dma_unmap:
573 videobuf_dma_unmap(priv->dev, vb);
574out_free_table:
575 sg_free_table(&table);
576out_return:
577 return ret;
578}
579
580/*
581 * Interrupt Handling
582 */
583
584static irqreturn_t fpga_irq(int irq, void *dev_id)
585{
586 struct fpga_dev *priv = dev_id;
587
588 /* Save the status */
589 priv->status = fpga_config_error(priv->regs) ? -EIO : 0;
590 dev_dbg(priv->dev, "INTERRUPT status %d\n", priv->status);
591 fpga_dump_registers(priv);
592
593 /* Disabling the programmer clears the interrupt */
594 fpga_programmer_disable(priv);
595
596 /* Notify any waiters */
597 complete(&priv->completion);
598
599 return IRQ_HANDLED;
600}
601
602/*
603 * SYSFS Helpers
604 */
605
606/**
607 * fpga_do_stop() - deconfigure (reset) the DATA-FPGA's
608 * @priv: the driver's private data structure
609 *
610 * LOCKING: must hold priv->lock
611 */
612static int fpga_do_stop(struct fpga_dev *priv)
613{
614 u32 val;
615
616 /* Set the led to unprogrammed */
617 ledtrig_fpga_programmed(false);
618
619 /* Pulse the config line to reset the FPGA's */
620 val = CFG_CTL_ENABLE | CFG_CTL_RESET;
621 iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL);
622 iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL);
623
624 return 0;
625}
626
627static noinline int fpga_do_program(struct fpga_dev *priv)
628{
629 int ret;
630
631 if (priv->bytes != priv->fw_size) {
632 dev_err(priv->dev, "Incorrect bitfile size: got %zu bytes, "
633 "should be %zu bytes\n",
634 priv->bytes, priv->fw_size);
635 return -EINVAL;
636 }
637
638 if (!fpga_power_enabled(priv)) {
639 dev_err(priv->dev, "Power not enabled\n");
640 return -EINVAL;
641 }
642
643 if (!fpga_power_good(priv)) {
644 dev_err(priv->dev, "Power not good\n");
645 return -EINVAL;
646 }
647
648 /* Set the LED to unprogrammed */
649 ledtrig_fpga_programmed(false);
650
651 /* Try to program the FPGA's using DMA */
652 ret = fpga_program_dma(priv);
653
654 /* If DMA failed or doesn't exist, try with CPU */
655 if (ret) {
656 dev_warn(priv->dev, "Falling back to CPU programming\n");
657 ret = fpga_program_cpu(priv);
658 }
659
660 if (ret) {
661 dev_err(priv->dev, "Unable to program FPGA's\n");
662 return ret;
663 }
664
665 /* Drop the firmware bitfile from memory */
666 fpga_drop_firmware_data(priv);
667
668 dev_dbg(priv->dev, "FPGA programming successful\n");
669 ledtrig_fpga_programmed(true);
670
671 return 0;
672}
673
674/*
675 * File Operations
676 */
677
678static int fpga_open(struct inode *inode, struct file *filp)
679{
680 /*
681 * The miscdevice layer puts our struct miscdevice into the
682 * filp->private_data field. We use this to find our private
683 * data and then overwrite it with our own private structure.
684 */
685 struct fpga_dev *priv = container_of(filp->private_data,
686 struct fpga_dev, miscdev);
687 unsigned int nr_pages;
688 int ret;
689
690 /* We only allow one process at a time */
691 ret = mutex_lock_interruptible(&priv->lock);
692 if (ret)
693 return ret;
694
695 filp->private_data = priv;
696 kref_get(&priv->ref);
697
698 /* Truncation: drop any existing data */
699 if (filp->f_flags & O_TRUNC)
700 priv->bytes = 0;
701
702 /* Check if we have already allocated a buffer */
703 if (priv->vb_allocated)
704 return 0;
705
706 /* Allocate a buffer to hold enough data for the bitfile */
707 nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE);
708 ret = videobuf_dma_init_kernel(&priv->vb, DMA_TO_DEVICE, nr_pages);
709 if (ret) {
710 dev_err(priv->dev, "unable to allocate data buffer\n");
711 mutex_unlock(&priv->lock);
712 kref_put(&priv->ref, fpga_dev_remove);
713 return ret;
714 }
715
716 priv->vb_allocated = true;
717 return 0;
718}
719
720static int fpga_release(struct inode *inode, struct file *filp)
721{
722 struct fpga_dev *priv = filp->private_data;
723
724 mutex_unlock(&priv->lock);
725 kref_put(&priv->ref, fpga_dev_remove);
726 return 0;
727}
728
729static ssize_t fpga_write(struct file *filp, const char __user *buf,
730 size_t count, loff_t *f_pos)
731{
732 struct fpga_dev *priv = filp->private_data;
733
734 /* FPGA bitfiles have an exact size: disallow anything else */
735 if (priv->bytes >= priv->fw_size)
736 return -ENOSPC;
737
738 count = min_t(size_t, priv->fw_size - priv->bytes, count);
739 if (copy_from_user(priv->vb.vaddr + priv->bytes, buf, count))
740 return -EFAULT;
741
742 priv->bytes += count;
743 return count;
744}
745
746static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count,
747 loff_t *f_pos)
748{
749 struct fpga_dev *priv = filp->private_data;
750
751 count = min_t(size_t, priv->bytes - *f_pos, count);
752 if (copy_to_user(buf, priv->vb.vaddr + *f_pos, count))
753 return -EFAULT;
754
755 *f_pos += count;
756 return count;
757}
758
759static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
760{
761 struct fpga_dev *priv = filp->private_data;
762 loff_t newpos;
763
764 /* only read-only opens are allowed to seek */
765 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
766 return -EINVAL;
767
768 switch (origin) {
769 case SEEK_SET: /* seek relative to the beginning of the file */
770 newpos = offset;
771 break;
772 case SEEK_CUR: /* seek relative to current position in the file */
773 newpos = filp->f_pos + offset;
774 break;
775 case SEEK_END: /* seek relative to the end of the file */
776 newpos = priv->fw_size - offset;
777 break;
778 default:
779 return -EINVAL;
780 }
781
782 /* check for sanity */
783 if (newpos > priv->fw_size)
784 return -EINVAL;
785
786 filp->f_pos = newpos;
787 return newpos;
788}
789
790static const struct file_operations fpga_fops = {
791 .open = fpga_open,
792 .release = fpga_release,
793 .write = fpga_write,
794 .read = fpga_read,
795 .llseek = fpga_llseek,
796};
797
798/*
799 * Device Attributes
800 */
801
802static ssize_t pfail_show(struct device *dev, struct device_attribute *attr,
803 char *buf)
804{
805 struct fpga_dev *priv = dev_get_drvdata(dev);
806 u8 val;
807
808 val = ioread8(priv->regs + CTL_PWR_FAIL);
809 return snprintf(buf, PAGE_SIZE, "0x%.2x\n", val);
810}
811
812static ssize_t pgood_show(struct device *dev, struct device_attribute *attr,
813 char *buf)
814{
815 struct fpga_dev *priv = dev_get_drvdata(dev);
816 return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_good(priv));
817}
818
819static ssize_t penable_show(struct device *dev, struct device_attribute *attr,
820 char *buf)
821{
822 struct fpga_dev *priv = dev_get_drvdata(dev);
823 return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_enabled(priv));
824}
825
826static ssize_t penable_store(struct device *dev, struct device_attribute *attr,
827 const char *buf, size_t count)
828{
829 struct fpga_dev *priv = dev_get_drvdata(dev);
830 unsigned long val;
831 int ret;
832
833 if (strict_strtoul(buf, 0, &val))
834 return -EINVAL;
835
836 if (val) {
837 ret = fpga_enable_power_supplies(priv);
838 if (ret)
839 return ret;
840 } else {
841 fpga_do_stop(priv);
842 fpga_disable_power_supplies(priv);
843 }
844
845 return count;
846}
847
848static ssize_t program_show(struct device *dev, struct device_attribute *attr,
849 char *buf)
850{
851 struct fpga_dev *priv = dev_get_drvdata(dev);
852 return snprintf(buf, PAGE_SIZE, "%d\n", fpga_running(priv));
853}
854
855static ssize_t program_store(struct device *dev, struct device_attribute *attr,
856 const char *buf, size_t count)
857{
858 struct fpga_dev *priv = dev_get_drvdata(dev);
859 unsigned long val;
860 int ret;
861
862 if (strict_strtoul(buf, 0, &val))
863 return -EINVAL;
864
865 /* We can't have an image writer and be programming simultaneously */
866 if (mutex_lock_interruptible(&priv->lock))
867 return -ERESTARTSYS;
868
869 /* Program or Reset the FPGA's */
870 ret = val ? fpga_do_program(priv) : fpga_do_stop(priv);
871 if (ret)
872 goto out_unlock;
873
874 /* Success */
875 ret = count;
876
877out_unlock:
878 mutex_unlock(&priv->lock);
879 return ret;
880}
881
882static DEVICE_ATTR(power_fail, S_IRUGO, pfail_show, NULL);
883static DEVICE_ATTR(power_good, S_IRUGO, pgood_show, NULL);
884static DEVICE_ATTR(power_enable, S_IRUGO | S_IWUSR,
885 penable_show, penable_store);
886
887static DEVICE_ATTR(program, S_IRUGO | S_IWUSR,
888 program_show, program_store);
889
890static struct attribute *fpga_attributes[] = {
891 &dev_attr_power_fail.attr,
892 &dev_attr_power_good.attr,
893 &dev_attr_power_enable.attr,
894 &dev_attr_program.attr,
895 NULL,
896};
897
898static const struct attribute_group fpga_attr_group = {
899 .attrs = fpga_attributes,
900};
901
902/*
903 * OpenFirmware Device Subsystem
904 */
905
906#define SYS_REG_VERSION 0x00
907#define SYS_REG_GEOGRAPHIC 0x10
908
909static bool dma_filter(struct dma_chan *chan, void *data)
910{
911 /*
912 * DMA Channel #0 is the only acceptable device
913 *
914 * This probably won't survive an unload/load cycle of the Freescale
915 * DMAEngine driver, but that won't be a problem
916 */
917 return chan->chan_id == 0 && chan->device->dev_id == 0;
918}
919
920static int fpga_of_remove(struct platform_device *op)
921{
922 struct fpga_dev *priv = dev_get_drvdata(&op->dev);
923 struct device *this_device = priv->miscdev.this_device;
924
925 sysfs_remove_group(&this_device->kobj, &fpga_attr_group);
926 misc_deregister(&priv->miscdev);
927
928 free_irq(priv->irq, priv);
929 irq_dispose_mapping(priv->irq);
930
931 /* make sure the power supplies are off */
932 fpga_disable_power_supplies(priv);
933
934 /* unmap registers */
935 iounmap(priv->immr);
936 iounmap(priv->regs);
937
938 dma_release_channel(priv->chan);
939
940 /* drop our reference to the private data structure */
941 kref_put(&priv->ref, fpga_dev_remove);
942 return 0;
943}
944
945/* CTL-CPLD Version Register */
946#define CTL_CPLD_VERSION 0x2000
947
948static int fpga_of_probe(struct platform_device *op,
949 const struct of_device_id *match)
950{
951 struct device_node *of_node = op->dev.of_node;
952 struct device *this_device;
953 struct fpga_dev *priv;
954 dma_cap_mask_t mask;
955 u32 ver;
956 int ret;
957
958 /* Allocate private data */
959 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
960 if (!priv) {
961 dev_err(&op->dev, "Unable to allocate private data\n");
962 ret = -ENOMEM;
963 goto out_return;
964 }
965
966 /* Setup the miscdevice */
967 priv->miscdev.minor = MISC_DYNAMIC_MINOR;
968 priv->miscdev.name = drv_name;
969 priv->miscdev.fops = &fpga_fops;
970
971 kref_init(&priv->ref);
972
973 dev_set_drvdata(&op->dev, priv);
974 priv->dev = &op->dev;
975 mutex_init(&priv->lock);
976 init_completion(&priv->completion);
977 videobuf_dma_init(&priv->vb);
978
979 dev_set_drvdata(priv->dev, priv);
980 dma_cap_zero(mask);
981 dma_cap_set(DMA_MEMCPY, mask);
982 dma_cap_set(DMA_INTERRUPT, mask);
983 dma_cap_set(DMA_SLAVE, mask);
984 dma_cap_set(DMA_SG, mask);
985
986 /* Get control of DMA channel #0 */
987 priv->chan = dma_request_channel(mask, dma_filter, NULL);
988 if (!priv->chan) {
989 dev_err(&op->dev, "Unable to acquire DMA channel #0\n");
990 ret = -ENODEV;
991 goto out_free_priv;
992 }
993
994 /* Remap the registers for use */
995 priv->regs = of_iomap(of_node, 0);
996 if (!priv->regs) {
997 dev_err(&op->dev, "Unable to ioremap registers\n");
998 ret = -ENOMEM;
999 goto out_dma_release_channel;
1000 }
1001
1002 /* Remap the IMMR for use */
1003 priv->immr = ioremap(get_immrbase(), 0x100000);
1004 if (!priv->immr) {
1005 dev_err(&op->dev, "Unable to ioremap IMMR\n");
1006 ret = -ENOMEM;
1007 goto out_unmap_regs;
1008 }
1009
1010 /*
1011 * Check that external DMA is configured
1012 *
1013 * U-Boot does this for us, but we should check it and bail out if
1014 * there is a problem. Failing to have this register setup correctly
1015 * will cause the DMA controller to transfer a single cacheline
1016 * worth of data, then wedge itself.
1017 */
1018 if ((ioread32be(priv->immr + 0x114) & 0xE00) != 0xE00) {
1019 dev_err(&op->dev, "External DMA control not configured\n");
1020 ret = -ENODEV;
1021 goto out_unmap_immr;
1022 }
1023
1024 /*
1025 * Check the CTL-CPLD version
1026 *
1027 * This driver uses the CTL-CPLD DATA-FPGA power sequencer, and we
1028 * don't want to run on any version of the CTL-CPLD that does not use
1029 * a compatible register layout.
1030 *
1031 * v2: changed register layout, added power sequencer
1032 * v3: added glitch filter on the i2c overcurrent/overtemp outputs
1033 */
1034 ver = ioread8(priv->regs + CTL_CPLD_VERSION);
1035 if (ver != 0x02 && ver != 0x03) {
1036 dev_err(&op->dev, "CTL-CPLD is not version 0x02 or 0x03!\n");
1037 ret = -ENODEV;
1038 goto out_unmap_immr;
1039 }
1040
1041 /* Set the exact size that the firmware image should be */
1042 ver = ioread32be(priv->regs + SYS_REG_VERSION);
1043 priv->fw_size = (ver & (1 << 18)) ? FW_SIZE_EP2S130 : FW_SIZE_EP2S90;
1044
1045 /* Find the correct IRQ number */
1046 priv->irq = irq_of_parse_and_map(of_node, 0);
1047 if (priv->irq == NO_IRQ) {
1048 dev_err(&op->dev, "Unable to find IRQ line\n");
1049 ret = -ENODEV;
1050 goto out_unmap_immr;
1051 }
1052
1053 /* Request the IRQ */
1054 ret = request_irq(priv->irq, fpga_irq, IRQF_SHARED, drv_name, priv);
1055 if (ret) {
1056 dev_err(&op->dev, "Unable to request IRQ %d\n", priv->irq);
1057 ret = -ENODEV;
1058 goto out_irq_dispose_mapping;
1059 }
1060
1061 /* Reset and stop the FPGA's, just in case */
1062 fpga_do_stop(priv);
1063
1064 /* Register the miscdevice */
1065 ret = misc_register(&priv->miscdev);
1066 if (ret) {
1067 dev_err(&op->dev, "Unable to register miscdevice\n");
1068 goto out_free_irq;
1069 }
1070
1071 /* Create the sysfs files */
1072 this_device = priv->miscdev.this_device;
1073 dev_set_drvdata(this_device, priv);
1074 ret = sysfs_create_group(&this_device->kobj, &fpga_attr_group);
1075 if (ret) {
1076 dev_err(&op->dev, "Unable to create sysfs files\n");
1077 goto out_misc_deregister;
1078 }
1079
1080 dev_info(priv->dev, "CARMA FPGA Programmer: %s rev%s with %s FPGAs\n",
1081 (ver & (1 << 17)) ? "Correlator" : "Digitizer",
1082 (ver & (1 << 16)) ? "B" : "A",
1083 (ver & (1 << 18)) ? "EP2S130" : "EP2S90");
1084
1085 return 0;
1086
1087out_misc_deregister:
1088 misc_deregister(&priv->miscdev);
1089out_free_irq:
1090 free_irq(priv->irq, priv);
1091out_irq_dispose_mapping:
1092 irq_dispose_mapping(priv->irq);
1093out_unmap_immr:
1094 iounmap(priv->immr);
1095out_unmap_regs:
1096 iounmap(priv->regs);
1097out_dma_release_channel:
1098 dma_release_channel(priv->chan);
1099out_free_priv:
1100 kref_put(&priv->ref, fpga_dev_remove);
1101out_return:
1102 return ret;
1103}
1104
1105static struct of_device_id fpga_of_match[] = {
1106 { .compatible = "carma,fpga-programmer", },
1107 {},
1108};
1109
1110static struct of_platform_driver fpga_of_driver = {
1111 .probe = fpga_of_probe,
1112 .remove = fpga_of_remove,
1113 .driver = {
1114 .name = drv_name,
1115 .of_match_table = fpga_of_match,
1116 .owner = THIS_MODULE,
1117 },
1118};
1119
1120/*
1121 * Module Init / Exit
1122 */
1123
1124static int __init fpga_init(void)
1125{
1126 led_trigger_register_simple("fpga", &ledtrig_fpga);
1127 return of_register_platform_driver(&fpga_of_driver);
1128}
1129
1130static void __exit fpga_exit(void)
1131{
1132 of_unregister_platform_driver(&fpga_of_driver);
1133 led_trigger_unregister_simple(ledtrig_fpga);
1134}
1135
1136MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
1137MODULE_DESCRIPTION("CARMA Board DATA-FPGA Programmer");
1138MODULE_LICENSE("GPL");
1139
1140module_init(fpga_init);
1141module_exit(fpga_exit);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
new file mode 100644
index 000000000000..3965821fef17
--- /dev/null
+++ b/drivers/misc/carma/carma-fpga.c
@@ -0,0 +1,1433 @@
1/*
2 * CARMA DATA-FPGA Access Driver
3 *
4 * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/*
13 * FPGA Memory Dump Format
14 *
15 * FPGA #0 control registers (32 x 32-bit words)
16 * FPGA #1 control registers (32 x 32-bit words)
17 * FPGA #2 control registers (32 x 32-bit words)
18 * FPGA #3 control registers (32 x 32-bit words)
19 * SYSFPGA control registers (32 x 32-bit words)
20 * FPGA #0 correlation array (NUM_CORL0 correlation blocks)
21 * FPGA #1 correlation array (NUM_CORL1 correlation blocks)
22 * FPGA #2 correlation array (NUM_CORL2 correlation blocks)
23 * FPGA #3 correlation array (NUM_CORL3 correlation blocks)
24 *
25 * Each correlation array consists of:
26 *
27 * Correlation Data (2 x NUM_LAGSn x 32-bit words)
28 * Pipeline Metadata (2 x NUM_METAn x 32-bit words)
29 * Quantization Counters (2 x NUM_QCNTn x 32-bit words)
30 *
31 * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from
32 * the FPGA configuration registers. They do not change once the FPGA's
33 * have been programmed, they only change on re-programming.
34 */
35
36/*
37 * Basic Description:
38 *
39 * This driver is used to capture correlation spectra off of the four data
40 * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore
41 * this driver supports dynamic enable/disable of capture while the device
42 * remains open.
43 *
44 * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast
45 * capture rate, all buffers are pre-allocated to avoid any potentially long
46 * running memory allocations while capturing.
47 *
48 * There are two lists and one pointer which are used to keep track of the
49 * different states of data buffers.
50 *
51 * 1) free list
52 * This list holds all empty data buffers which are ready to receive data.
53 *
54 * 2) inflight pointer
55 * This pointer holds the currently inflight data buffer. This buffer is having
56 * data copied into it by the DMA engine.
57 *
58 * 3) used list
59 * This list holds data buffers which have been filled, and are waiting to be
60 * read by userspace.
61 *
62 * All buffers start life on the free list, then move successively to the
63 * inflight pointer, and then to the used list. After they have been read by
64 * userspace, they are moved back to the free list. The cycle repeats as long
65 * as necessary.
66 *
67 * It should be noted that all buffers are mapped and ready for DMA when they
68 * are on any of the three lists. They are only unmapped when they are in the
69 * process of being read by userspace.
70 */
71
72/*
73 * Notes on the IRQ masking scheme:
74 *
75 * The IRQ masking scheme here is different than most other hardware. The only
76 * way for the DATA-FPGAs to detect if the kernel has taken too long to copy
77 * the data is if the status registers are not cleared before the next
78 * correlation data dump is ready.
79 *
80 * The interrupt line is connected to the status registers, such that when they
81 * are cleared, the interrupt is de-asserted. Therein lies our problem. We need
82 * to schedule a long-running DMA operation and return from the interrupt
83 * handler quickly, but we cannot clear the status registers.
84 *
85 * To handle this, the system controller FPGA has the capability to connect the
86 * interrupt line to a user-controlled GPIO pin. This pin is driven high
87 * (unasserted) and left that way. To mask the interrupt, we change the
88 * interrupt source to the GPIO pin. Tada, we hid the interrupt. :)
89 */
90
91#include <linux/of_platform.h>
92#include <linux/dma-mapping.h>
93#include <linux/miscdevice.h>
94#include <linux/interrupt.h>
95#include <linux/dmaengine.h>
96#include <linux/seq_file.h>
97#include <linux/highmem.h>
98#include <linux/debugfs.h>
99#include <linux/kernel.h>
100#include <linux/module.h>
101#include <linux/poll.h>
102#include <linux/init.h>
103#include <linux/slab.h>
104#include <linux/kref.h>
105#include <linux/io.h>
106
107#include <media/videobuf-dma-sg.h>
108
109/* system controller registers */
110#define SYS_IRQ_SOURCE_CTL 0x24
111#define SYS_IRQ_OUTPUT_EN 0x28
112#define SYS_IRQ_OUTPUT_DATA 0x2C
113#define SYS_IRQ_INPUT_DATA 0x30
114#define SYS_FPGA_CONFIG_STATUS 0x44
115
116/* GPIO IRQ line assignment */
117#define IRQ_CORL_DONE 0x10
118
119/* FPGA registers */
120#define MMAP_REG_VERSION 0x00
121#define MMAP_REG_CORL_CONF1 0x08
122#define MMAP_REG_CORL_CONF2 0x0C
123#define MMAP_REG_STATUS 0x48
124
125#define SYS_FPGA_BLOCK 0xF0000000
126
127#define DATA_FPGA_START 0x400000
128#define DATA_FPGA_SIZE 0x80000
129
130static const char drv_name[] = "carma-fpga";
131
132#define NUM_FPGA 4
133
134#define MIN_DATA_BUFS 8
135#define MAX_DATA_BUFS 64
136
137struct fpga_info {
138 unsigned int num_lag_ram;
139 unsigned int blk_size;
140};
141
142struct data_buf {
143 struct list_head entry;
144 struct videobuf_dmabuf vb;
145 size_t size;
146};
147
148struct fpga_device {
149 /* character device */
150 struct miscdevice miscdev;
151 struct device *dev;
152 struct mutex mutex;
153
154 /* reference count */
155 struct kref ref;
156
157 /* FPGA registers and information */
158 struct fpga_info info[NUM_FPGA];
159 void __iomem *regs;
160 int irq;
161
162 /* FPGA Physical Address/Size Information */
163 resource_size_t phys_addr;
164 size_t phys_size;
165
166 /* DMA structures */
167 struct sg_table corl_table;
168 unsigned int corl_nents;
169 struct dma_chan *chan;
170
171 /* Protection for all members below */
172 spinlock_t lock;
173
174 /* Device enable/disable flag */
175 bool enabled;
176
177 /* Correlation data buffers */
178 wait_queue_head_t wait;
179 struct list_head free;
180 struct list_head used;
181 struct data_buf *inflight;
182
183 /* Information about data buffers */
184 unsigned int num_dropped;
185 unsigned int num_buffers;
186 size_t bufsize;
187 struct dentry *dbg_entry;
188};
189
190struct fpga_reader {
191 struct fpga_device *priv;
192 struct data_buf *buf;
193 off_t buf_start;
194};
195
196static void fpga_device_release(struct kref *ref)
197{
198 struct fpga_device *priv = container_of(ref, struct fpga_device, ref);
199
200 /* the last reader has exited, cleanup the last bits */
201 mutex_destroy(&priv->mutex);
202 kfree(priv);
203}
204
205/*
206 * Data Buffer Allocation Helpers
207 */
208
209/**
210 * data_free_buffer() - free a single data buffer and all allocated memory
211 * @buf: the buffer to free
212 *
213 * This will free all of the pages allocated to the given data buffer, and
214 * then free the structure itself
215 */
216static void data_free_buffer(struct data_buf *buf)
217{
218 /* It is ok to free a NULL buffer */
219 if (!buf)
220 return;
221
222 /* free all memory */
223 videobuf_dma_free(&buf->vb);
224 kfree(buf);
225}
226
227/**
228 * data_alloc_buffer() - allocate and fill a data buffer with pages
229 * @bytes: the number of bytes required
230 *
231 * This allocates all space needed for a data buffer. It must be mapped before
232 * use in a DMA transaction using videobuf_dma_map().
233 *
234 * Returns NULL on failure
235 */
236static struct data_buf *data_alloc_buffer(const size_t bytes)
237{
238 unsigned int nr_pages;
239 struct data_buf *buf;
240 int ret;
241
242 /* calculate the number of pages necessary */
243 nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
244
245 /* allocate the buffer structure */
246 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
247 if (!buf)
248 goto out_return;
249
250 /* initialize internal fields */
251 INIT_LIST_HEAD(&buf->entry);
252 buf->size = bytes;
253
254 /* allocate the videobuf */
255 videobuf_dma_init(&buf->vb);
256 ret = videobuf_dma_init_kernel(&buf->vb, DMA_FROM_DEVICE, nr_pages);
257 if (ret)
258 goto out_free_buf;
259
260 return buf;
261
262out_free_buf:
263 kfree(buf);
264out_return:
265 return NULL;
266}
267
268/**
269 * data_free_buffers() - free all allocated buffers
270 * @priv: the driver's private data structure
271 *
272 * Free all buffers allocated by the driver (except those currently in the
273 * process of being read by userspace).
274 *
275 * LOCKING: must hold dev->mutex
276 * CONTEXT: user
277 */
278static void data_free_buffers(struct fpga_device *priv)
279{
280 struct data_buf *buf, *tmp;
281
282 /* the device should be stopped, no DMA in progress */
283 BUG_ON(priv->inflight != NULL);
284
285 list_for_each_entry_safe(buf, tmp, &priv->free, entry) {
286 list_del_init(&buf->entry);
287 videobuf_dma_unmap(priv->dev, &buf->vb);
288 data_free_buffer(buf);
289 }
290
291 list_for_each_entry_safe(buf, tmp, &priv->used, entry) {
292 list_del_init(&buf->entry);
293 videobuf_dma_unmap(priv->dev, &buf->vb);
294 data_free_buffer(buf);
295 }
296
297 priv->num_buffers = 0;
298 priv->bufsize = 0;
299}
300
301/**
302 * data_alloc_buffers() - allocate 1 seconds worth of data buffers
303 * @priv: the driver's private data structure
304 *
305 * Allocate enough buffers for a whole second worth of data
306 *
307 * This routine will attempt to degrade nicely by succeeding even if a full
308 * second worth of data buffers could not be allocated, as long as a minimum
309 * number were allocated. In this case, it will print a message to the kernel
310 * log.
311 *
312 * The device must not be modifying any lists when this is called.
313 *
314 * CONTEXT: user
315 * LOCKING: must hold dev->mutex
316 *
317 * Returns 0 on success, -ERRNO otherwise
318 */
319static int data_alloc_buffers(struct fpga_device *priv)
320{
321 struct data_buf *buf;
322 int i, ret;
323
324 for (i = 0; i < MAX_DATA_BUFS; i++) {
325
326 /* allocate a buffer */
327 buf = data_alloc_buffer(priv->bufsize);
328 if (!buf)
329 break;
330
331 /* map it for DMA */
332 ret = videobuf_dma_map(priv->dev, &buf->vb);
333 if (ret) {
334 data_free_buffer(buf);
335 break;
336 }
337
338 /* add it to the list of free buffers */
339 list_add_tail(&buf->entry, &priv->free);
340 priv->num_buffers++;
341 }
342
343 /* Make sure we allocated the minimum required number of buffers */
344 if (priv->num_buffers < MIN_DATA_BUFS) {
345 dev_err(priv->dev, "Unable to allocate enough data buffers\n");
346 data_free_buffers(priv);
347 return -ENOMEM;
348 }
349
350 /* Warn if we are running in a degraded state, but do not fail */
351 if (priv->num_buffers < MAX_DATA_BUFS) {
352 dev_warn(priv->dev,
353 "Unable to allocate %d buffers, using %d buffers instead\n",
354 MAX_DATA_BUFS, i);
355 }
356
357 return 0;
358}
359
360/*
361 * DMA Operations Helpers
362 */
363
364/**
365 * fpga_start_addr() - get the physical address a DATA-FPGA
366 * @priv: the driver's private data structure
367 * @fpga: the DATA-FPGA number (zero based)
368 */
369static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga)
370{
371 return priv->phys_addr + 0x400000 + (0x80000 * fpga);
372}
373
374/**
375 * fpga_block_addr() - get the physical address of a correlation data block
376 * @priv: the driver's private data structure
377 * @fpga: the DATA-FPGA number (zero based)
378 * @blknum: the correlation block number (zero based)
379 */
380static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga,
381 unsigned int blknum)
382{
383 return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum));
384}
385
386#define REG_BLOCK_SIZE (32 * 4)
387
388/**
389 * data_setup_corl_table() - create the scatterlist for correlation dumps
390 * @priv: the driver's private data structure
391 *
392 * Create the scatterlist for transferring a correlation dump from the
393 * DATA FPGAs. This structure will be reused for each buffer than needs
394 * to be filled with correlation data.
395 *
396 * Returns 0 on success, -ERRNO otherwise
397 */
398static int data_setup_corl_table(struct fpga_device *priv)
399{
400 struct sg_table *table = &priv->corl_table;
401 struct scatterlist *sg;
402 struct fpga_info *info;
403 int i, j, ret;
404
405 /* Calculate the number of entries needed */
406 priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
407 for (i = 0; i < NUM_FPGA; i++)
408 priv->corl_nents += priv->info[i].num_lag_ram;
409
410 /* Allocate the scatterlist table */
411 ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL);
412 if (ret) {
413 dev_err(priv->dev, "unable to allocate DMA table\n");
414 return ret;
415 }
416
417 /* Add the DATA FPGA registers to the scatterlist */
418 sg = table->sgl;
419 for (i = 0; i < NUM_FPGA; i++) {
420 sg_dma_address(sg) = fpga_start_addr(priv, i);
421 sg_dma_len(sg) = REG_BLOCK_SIZE;
422 sg = sg_next(sg);
423 }
424
425 /* Add the SYS-FPGA registers to the scatterlist */
426 sg_dma_address(sg) = SYS_FPGA_BLOCK;
427 sg_dma_len(sg) = REG_BLOCK_SIZE;
428 sg = sg_next(sg);
429
430 /* Add the FPGA correlation data blocks to the scatterlist */
431 for (i = 0; i < NUM_FPGA; i++) {
432 info = &priv->info[i];
433 for (j = 0; j < info->num_lag_ram; j++) {
434 sg_dma_address(sg) = fpga_block_addr(priv, i, j);
435 sg_dma_len(sg) = info->blk_size;
436 sg = sg_next(sg);
437 }
438 }
439
440 /*
441 * All physical addresses and lengths are present in the structure
442 * now. It can be reused for every FPGA DATA interrupt
443 */
444 return 0;
445}
446
447/*
448 * FPGA Register Access Helpers
449 */
450
451static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga,
452 unsigned int reg, u32 val)
453{
454 const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
455 iowrite32be(val, priv->regs + fpga_start + reg);
456}
457
458static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga,
459 unsigned int reg)
460{
461 const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE);
462 return ioread32be(priv->regs + fpga_start + reg);
463}
464
465/**
466 * data_calculate_bufsize() - calculate the data buffer size required
467 * @priv: the driver's private data structure
468 *
469 * Calculate the total buffer size needed to hold a single block
470 * of correlation data
471 *
472 * CONTEXT: user
473 *
474 * Returns 0 on success, -ERRNO otherwise
475 */
476static int data_calculate_bufsize(struct fpga_device *priv)
477{
478 u32 num_corl, num_lags, num_meta, num_qcnt, num_pack;
479 u32 conf1, conf2, version;
480 u32 num_lag_ram, blk_size;
481 int i;
482
483 /* Each buffer starts with the 5 FPGA register areas */
484 priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE;
485
486 /* Read and store the configuration data for each FPGA */
487 for (i = 0; i < NUM_FPGA; i++) {
488 version = fpga_read_reg(priv, i, MMAP_REG_VERSION);
489 conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1);
490 conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2);
491
492 /* minor version 2 and later */
493 if ((version & 0x000000FF) >= 2) {
494 num_corl = (conf1 & 0x000000F0) >> 4;
495 num_pack = (conf1 & 0x00000F00) >> 8;
496 num_lags = (conf1 & 0x00FFF000) >> 12;
497 num_meta = (conf1 & 0x7F000000) >> 24;
498 num_qcnt = (conf2 & 0x00000FFF) >> 0;
499 } else {
500 num_corl = (conf1 & 0x000000F0) >> 4;
501 num_pack = 1; /* implied */
502 num_lags = (conf1 & 0x000FFF00) >> 8;
503 num_meta = (conf1 & 0x7FF00000) >> 20;
504 num_qcnt = (conf2 & 0x00000FFF) >> 0;
505 }
506
507 num_lag_ram = (num_corl + num_pack - 1) / num_pack;
508 blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8;
509
510 priv->info[i].num_lag_ram = num_lag_ram;
511 priv->info[i].blk_size = blk_size;
512 priv->bufsize += num_lag_ram * blk_size;
513
514 dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl);
515 dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack);
516 dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags);
517 dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta);
518 dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt);
519 dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size);
520 }
521
522 dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize);
523 return 0;
524}
525
526/*
527 * Interrupt Handling
528 */
529
530/**
531 * data_disable_interrupts() - stop the device from generating interrupts
532 * @priv: the driver's private data structure
533 *
534 * Hide interrupts by switching to GPIO interrupt source
535 *
536 * LOCKING: must hold dev->lock
537 */
538static void data_disable_interrupts(struct fpga_device *priv)
539{
540 /* hide the interrupt by switching the IRQ driver to GPIO */
541 iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL);
542}
543
544/**
545 * data_enable_interrupts() - allow the device to generate interrupts
546 * @priv: the driver's private data structure
547 *
548 * Unhide interrupts by switching to the FPGA interrupt source. At the
549 * same time, clear the DATA-FPGA status registers.
550 *
551 * LOCKING: must hold dev->lock
552 */
553static void data_enable_interrupts(struct fpga_device *priv)
554{
555 /* clear the actual FPGA corl_done interrupt */
556 fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0);
557 fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0);
558 fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0);
559 fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0);
560
561 /* flush the writes */
562 fpga_read_reg(priv, 0, MMAP_REG_STATUS);
563
564 /* switch back to the external interrupt source */
565 iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL);
566}
567
568/**
569 * data_dma_cb() - DMAEngine callback for DMA completion
570 * @data: the driver's private data structure
571 *
572 * Complete a DMA transfer from the DATA-FPGA's
573 *
574 * This is called via the DMA callback mechanism, and will handle moving the
575 * completed DMA transaction to the used list, and then wake any processes
576 * waiting for new data
577 *
578 * CONTEXT: any, softirq expected
579 */
580static void data_dma_cb(void *data)
581{
582 struct fpga_device *priv = data;
583 unsigned long flags;
584
585 spin_lock_irqsave(&priv->lock, flags);
586
587 /* If there is no inflight buffer, we've got a bug */
588 BUG_ON(priv->inflight == NULL);
589
590 /* Move the inflight buffer onto the used list */
591 list_move_tail(&priv->inflight->entry, &priv->used);
592 priv->inflight = NULL;
593
594 /* clear the FPGA status and re-enable interrupts */
595 data_enable_interrupts(priv);
596
597 spin_unlock_irqrestore(&priv->lock, flags);
598
599 /*
600 * We've changed both the inflight and used lists, so we need
601 * to wake up any processes that are blocking for those events
602 */
603 wake_up(&priv->wait);
604}
605
606/**
607 * data_submit_dma() - prepare and submit the required DMA to fill a buffer
608 * @priv: the driver's private data structure
609 * @buf: the data buffer
610 *
611 * Prepare and submit the necessary DMA transactions to fill a correlation
612 * data buffer.
613 *
614 * LOCKING: must hold dev->lock
615 * CONTEXT: hardirq only
616 *
617 * Returns 0 on success, -ERRNO otherwise
618 */
619static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
620{
621 struct scatterlist *dst_sg, *src_sg;
622 unsigned int dst_nents, src_nents;
623 struct dma_chan *chan = priv->chan;
624 struct dma_async_tx_descriptor *tx;
625 dma_cookie_t cookie;
626 dma_addr_t dst, src;
627
628 dst_sg = buf->vb.sglist;
629 dst_nents = buf->vb.sglen;
630
631 src_sg = priv->corl_table.sgl;
632 src_nents = priv->corl_nents;
633
634 /*
635 * All buffers passed to this function should be ready and mapped
636 * for DMA already. Therefore, we don't need to do anything except
637 * submit it to the Freescale DMA Engine for processing
638 */
639
640 /* setup the scatterlist to scatterlist transfer */
641 tx = chan->device->device_prep_dma_sg(chan,
642 dst_sg, dst_nents,
643 src_sg, src_nents,
644 0);
645 if (!tx) {
646 dev_err(priv->dev, "unable to prep scatterlist DMA\n");
647 return -ENOMEM;
648 }
649
650 /* submit the transaction to the DMA controller */
651 cookie = tx->tx_submit(tx);
652 if (dma_submit_error(cookie)) {
653 dev_err(priv->dev, "unable to submit scatterlist DMA\n");
654 return -ENOMEM;
655 }
656
657 /* Prepare the re-read of the SYS-FPGA block */
658 dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE);
659 src = SYS_FPGA_BLOCK;
660 tx = chan->device->device_prep_dma_memcpy(chan, dst, src,
661 REG_BLOCK_SIZE,
662 DMA_PREP_INTERRUPT);
663 if (!tx) {
664 dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n");
665 return -ENOMEM;
666 }
667
668 /* Setup the callback */
669 tx->callback = data_dma_cb;
670 tx->callback_param = priv;
671
672 /* submit the transaction to the DMA controller */
673 cookie = tx->tx_submit(tx);
674 if (dma_submit_error(cookie)) {
675 dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n");
676 return -ENOMEM;
677 }
678
679 return 0;
680}
681
682#define CORL_DONE 0x1
683#define CORL_ERR 0x2
684
685static irqreturn_t data_irq(int irq, void *dev_id)
686{
687 struct fpga_device *priv = dev_id;
688 bool submitted = false;
689 struct data_buf *buf;
690 u32 status;
691 int i;
692
693 /* detect spurious interrupts via FPGA status */
694 for (i = 0; i < 4; i++) {
695 status = fpga_read_reg(priv, i, MMAP_REG_STATUS);
696 if (!(status & (CORL_DONE | CORL_ERR))) {
697 dev_err(priv->dev, "spurious irq detected (FPGA)\n");
698 return IRQ_NONE;
699 }
700 }
701
702 /* detect spurious interrupts via raw IRQ pin readback */
703 status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA);
704 if (status & IRQ_CORL_DONE) {
705 dev_err(priv->dev, "spurious irq detected (IRQ)\n");
706 return IRQ_NONE;
707 }
708
709 spin_lock(&priv->lock);
710
711 /* hide the interrupt by switching the IRQ driver to GPIO */
712 data_disable_interrupts(priv);
713
714 /* If there are no free buffers, drop this data */
715 if (list_empty(&priv->free)) {
716 priv->num_dropped++;
717 goto out;
718 }
719
720 buf = list_first_entry(&priv->free, struct data_buf, entry);
721 list_del_init(&buf->entry);
722 BUG_ON(buf->size != priv->bufsize);
723
724 /* Submit a DMA transfer to get the correlation data */
725 if (data_submit_dma(priv, buf)) {
726 dev_err(priv->dev, "Unable to setup DMA transfer\n");
727 list_move_tail(&buf->entry, &priv->free);
728 goto out;
729 }
730
731 /* Save the buffer for the DMA callback */
732 priv->inflight = buf;
733 submitted = true;
734
735 /* Start the DMA Engine */
736 dma_async_memcpy_issue_pending(priv->chan);
737
738out:
739 /* If no DMA was submitted, re-enable interrupts */
740 if (!submitted)
741 data_enable_interrupts(priv);
742
743 spin_unlock(&priv->lock);
744 return IRQ_HANDLED;
745}
746
747/*
748 * Realtime Device Enable Helpers
749 */
750
751/**
752 * data_device_enable() - enable the device for buffered dumping
753 * @priv: the driver's private data structure
754 *
755 * Enable the device for buffered dumping. Allocates buffers and hooks up
756 * the interrupt handler. When this finishes, data will come pouring in.
757 *
758 * LOCKING: must hold dev->mutex
759 * CONTEXT: user context only
760 *
761 * Returns 0 on success, -ERRNO otherwise
762 */
763static int data_device_enable(struct fpga_device *priv)
764{
765 u32 val;
766 int ret;
767
768 /* multiple enables are safe: they do nothing */
769 if (priv->enabled)
770 return 0;
771
772 /* check that the FPGAs are programmed */
773 val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS);
774 if (!(val & (1 << 18))) {
775 dev_err(priv->dev, "DATA-FPGAs are not enabled\n");
776 return -ENODATA;
777 }
778
779 /* read the FPGAs to calculate the buffer size */
780 ret = data_calculate_bufsize(priv);
781 if (ret) {
782 dev_err(priv->dev, "unable to calculate buffer size\n");
783 goto out_error;
784 }
785
786 /* allocate the correlation data buffers */
787 ret = data_alloc_buffers(priv);
788 if (ret) {
789 dev_err(priv->dev, "unable to allocate buffers\n");
790 goto out_error;
791 }
792
793 /* setup the source scatterlist for dumping correlation data */
794 ret = data_setup_corl_table(priv);
795 if (ret) {
796 dev_err(priv->dev, "unable to setup correlation DMA table\n");
797 goto out_error;
798 }
799
800 /* hookup the irq handler */
801 ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv);
802 if (ret) {
803 dev_err(priv->dev, "unable to request IRQ handler\n");
804 goto out_error;
805 }
806
807 /* switch to the external FPGA IRQ line */
808 data_enable_interrupts(priv);
809
810 /* success, we're enabled */
811 priv->enabled = true;
812 return 0;
813
814out_error:
815 sg_free_table(&priv->corl_table);
816 priv->corl_nents = 0;
817
818 data_free_buffers(priv);
819 return ret;
820}
821
822/**
823 * data_device_disable() - disable the device for buffered dumping
824 * @priv: the driver's private data structure
825 *
826 * Disable the device for buffered dumping. Stops new DMA transactions from
827 * being generated, waits for all outstanding DMA to complete, and then frees
828 * all buffers.
829 *
830 * LOCKING: must hold dev->mutex
831 * CONTEXT: user only
832 *
833 * Returns 0 on success, -ERRNO otherwise
834 */
835static int data_device_disable(struct fpga_device *priv)
836{
837 int ret;
838
839 /* allow multiple disable */
840 if (!priv->enabled)
841 return 0;
842
843 /* switch to the internal GPIO IRQ line */
844 data_disable_interrupts(priv);
845
846 /* unhook the irq handler */
847 free_irq(priv->irq, priv);
848
849 /*
850 * wait for all outstanding DMA to complete
851 *
852 * Device interrupts are disabled, therefore another buffer cannot
853 * be marked inflight.
854 */
855 ret = wait_event_interruptible(priv->wait, priv->inflight == NULL);
856 if (ret)
857 return ret;
858
859 /* free the correlation table */
860 sg_free_table(&priv->corl_table);
861 priv->corl_nents = 0;
862
863 /*
864 * We are taking the spinlock not to protect priv->enabled, but instead
865 * to make sure that there are no readers in the process of altering
866 * the free or used lists while we are setting this flag.
867 */
868 spin_lock_irq(&priv->lock);
869 priv->enabled = false;
870 spin_unlock_irq(&priv->lock);
871
872 /* free all buffers: the free and used lists are not being changed */
873 data_free_buffers(priv);
874 return 0;
875}
876
877/*
878 * DEBUGFS Interface
879 */
880#ifdef CONFIG_DEBUG_FS
881
882/*
883 * Count the number of entries in the given list
884 */
885static unsigned int list_num_entries(struct list_head *list)
886{
887 struct list_head *entry;
888 unsigned int ret = 0;
889
890 list_for_each(entry, list)
891 ret++;
892
893 return ret;
894}
895
896static int data_debug_show(struct seq_file *f, void *offset)
897{
898 struct fpga_device *priv = f->private;
899 int ret;
900
901 /*
902 * Lock the mutex first, so that we get an accurate value for enable
903 * Lock the spinlock next, to get accurate list counts
904 */
905 ret = mutex_lock_interruptible(&priv->mutex);
906 if (ret)
907 return ret;
908
909 spin_lock_irq(&priv->lock);
910
911 seq_printf(f, "enabled: %d\n", priv->enabled);
912 seq_printf(f, "bufsize: %d\n", priv->bufsize);
913 seq_printf(f, "num_buffers: %d\n", priv->num_buffers);
914 seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free));
915 seq_printf(f, "inflight: %d\n", priv->inflight != NULL);
916 seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used));
917 seq_printf(f, "num_dropped: %d\n", priv->num_dropped);
918
919 spin_unlock_irq(&priv->lock);
920 mutex_unlock(&priv->mutex);
921 return 0;
922}
923
924static int data_debug_open(struct inode *inode, struct file *file)
925{
926 return single_open(file, data_debug_show, inode->i_private);
927}
928
929static const struct file_operations data_debug_fops = {
930 .owner = THIS_MODULE,
931 .open = data_debug_open,
932 .read = seq_read,
933 .llseek = seq_lseek,
934 .release = single_release,
935};
936
937static int data_debugfs_init(struct fpga_device *priv)
938{
939 priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv,
940 &data_debug_fops);
941 if (IS_ERR(priv->dbg_entry))
942 return PTR_ERR(priv->dbg_entry);
943
944 return 0;
945}
946
947static void data_debugfs_exit(struct fpga_device *priv)
948{
949 debugfs_remove(priv->dbg_entry);
950}
951
952#else
953
954static inline int data_debugfs_init(struct fpga_device *priv)
955{
956 return 0;
957}
958
959static inline void data_debugfs_exit(struct fpga_device *priv)
960{
961}
962
963#endif /* CONFIG_DEBUG_FS */
964
965/*
966 * SYSFS Attributes
967 */
968
969static ssize_t data_en_show(struct device *dev, struct device_attribute *attr,
970 char *buf)
971{
972 struct fpga_device *priv = dev_get_drvdata(dev);
973 return snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
974}
975
976static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
977 const char *buf, size_t count)
978{
979 struct fpga_device *priv = dev_get_drvdata(dev);
980 unsigned long enable;
981 int ret;
982
983 ret = strict_strtoul(buf, 0, &enable);
984 if (ret) {
985 dev_err(priv->dev, "unable to parse enable input\n");
986 return -EINVAL;
987 }
988
989 ret = mutex_lock_interruptible(&priv->mutex);
990 if (ret)
991 return ret;
992
993 if (enable)
994 ret = data_device_enable(priv);
995 else
996 ret = data_device_disable(priv);
997
998 if (ret) {
999 dev_err(priv->dev, "device %s failed\n",
1000 enable ? "enable" : "disable");
1001 count = ret;
1002 goto out_unlock;
1003 }
1004
1005out_unlock:
1006 mutex_unlock(&priv->mutex);
1007 return count;
1008}
1009
1010static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set);
1011
1012static struct attribute *data_sysfs_attrs[] = {
1013 &dev_attr_enable.attr,
1014 NULL,
1015};
1016
1017static const struct attribute_group rt_sysfs_attr_group = {
1018 .attrs = data_sysfs_attrs,
1019};
1020
1021/*
1022 * FPGA Realtime Data Character Device
1023 */
1024
1025static int data_open(struct inode *inode, struct file *filp)
1026{
1027 /*
1028 * The miscdevice layer puts our struct miscdevice into the
1029 * filp->private_data field. We use this to find our private
1030 * data and then overwrite it with our own private structure.
1031 */
1032 struct fpga_device *priv = container_of(filp->private_data,
1033 struct fpga_device, miscdev);
1034 struct fpga_reader *reader;
1035 int ret;
1036
1037 /* allocate private data */
1038 reader = kzalloc(sizeof(*reader), GFP_KERNEL);
1039 if (!reader)
1040 return -ENOMEM;
1041
1042 reader->priv = priv;
1043 reader->buf = NULL;
1044
1045 filp->private_data = reader;
1046 ret = nonseekable_open(inode, filp);
1047 if (ret) {
1048 dev_err(priv->dev, "nonseekable-open failed\n");
1049 kfree(reader);
1050 return ret;
1051 }
1052
1053 /*
1054 * success, increase the reference count of the private data structure
1055 * so that it doesn't disappear if the device is unbound
1056 */
1057 kref_get(&priv->ref);
1058 return 0;
1059}
1060
1061static int data_release(struct inode *inode, struct file *filp)
1062{
1063 struct fpga_reader *reader = filp->private_data;
1064 struct fpga_device *priv = reader->priv;
1065
1066 /* free the per-reader structure */
1067 data_free_buffer(reader->buf);
1068 kfree(reader);
1069 filp->private_data = NULL;
1070
1071 /* decrement our reference count to the private data */
1072 kref_put(&priv->ref, fpga_device_release);
1073 return 0;
1074}
1075
1076static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
1077 loff_t *f_pos)
1078{
1079 struct fpga_reader *reader = filp->private_data;
1080 struct fpga_device *priv = reader->priv;
1081 struct list_head *used = &priv->used;
1082 struct data_buf *dbuf;
1083 size_t avail;
1084 void *data;
1085 int ret;
1086
1087 /* check if we already have a partial buffer */
1088 if (reader->buf) {
1089 dbuf = reader->buf;
1090 goto have_buffer;
1091 }
1092
1093 spin_lock_irq(&priv->lock);
1094
1095 /* Block until there is at least one buffer on the used list */
1096 while (list_empty(used)) {
1097 spin_unlock_irq(&priv->lock);
1098
1099 if (filp->f_flags & O_NONBLOCK)
1100 return -EAGAIN;
1101
1102 ret = wait_event_interruptible(priv->wait, !list_empty(used));
1103 if (ret)
1104 return ret;
1105
1106 spin_lock_irq(&priv->lock);
1107 }
1108
1109 /* Grab the first buffer off of the used list */
1110 dbuf = list_first_entry(used, struct data_buf, entry);
1111 list_del_init(&dbuf->entry);
1112
1113 spin_unlock_irq(&priv->lock);
1114
1115 /* Buffers are always mapped: unmap it */
1116 videobuf_dma_unmap(priv->dev, &dbuf->vb);
1117
1118 /* save the buffer for later */
1119 reader->buf = dbuf;
1120 reader->buf_start = 0;
1121
1122have_buffer:
1123 /* Get the number of bytes available */
1124 avail = dbuf->size - reader->buf_start;
1125 data = dbuf->vb.vaddr + reader->buf_start;
1126
1127 /* Get the number of bytes we can transfer */
1128 count = min(count, avail);
1129
1130 /* Copy the data to the userspace buffer */
1131 if (copy_to_user(ubuf, data, count))
1132 return -EFAULT;
1133
1134 /* Update the amount of available space */
1135 avail -= count;
1136
1137 /*
1138 * If there is still some data available, save the buffer for the
1139 * next userspace call to read() and return
1140 */
1141 if (avail > 0) {
1142 reader->buf_start += count;
1143 reader->buf = dbuf;
1144 return count;
1145 }
1146
1147 /*
1148 * Get the buffer ready to be reused for DMA
1149 *
1150 * If it fails, we pretend that the read never happed and return
1151 * -EFAULT to userspace. The read will be retried.
1152 */
1153 ret = videobuf_dma_map(priv->dev, &dbuf->vb);
1154 if (ret) {
1155 dev_err(priv->dev, "unable to remap buffer for DMA\n");
1156 return -EFAULT;
1157 }
1158
1159 /* Lock against concurrent enable/disable */
1160 spin_lock_irq(&priv->lock);
1161
1162 /* the reader is finished with this buffer */
1163 reader->buf = NULL;
1164
1165 /*
1166 * One of two things has happened, the device is disabled, or the
1167 * device has been reconfigured underneath us. In either case, we
1168 * should just throw away the buffer.
1169 */
1170 if (!priv->enabled || dbuf->size != priv->bufsize) {
1171 videobuf_dma_unmap(priv->dev, &dbuf->vb);
1172 data_free_buffer(dbuf);
1173 goto out_unlock;
1174 }
1175
1176 /* The buffer is safe to reuse, so add it back to the free list */
1177 list_add_tail(&dbuf->entry, &priv->free);
1178
1179out_unlock:
1180 spin_unlock_irq(&priv->lock);
1181 return count;
1182}
1183
1184static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl)
1185{
1186 struct fpga_reader *reader = filp->private_data;
1187 struct fpga_device *priv = reader->priv;
1188 unsigned int mask = 0;
1189
1190 poll_wait(filp, &priv->wait, tbl);
1191
1192 if (!list_empty(&priv->used))
1193 mask |= POLLIN | POLLRDNORM;
1194
1195 return mask;
1196}
1197
1198static int data_mmap(struct file *filp, struct vm_area_struct *vma)
1199{
1200 struct fpga_reader *reader = filp->private_data;
1201 struct fpga_device *priv = reader->priv;
1202 unsigned long offset, vsize, psize, addr;
1203
1204 /* VMA properties */
1205 offset = vma->vm_pgoff << PAGE_SHIFT;
1206 vsize = vma->vm_end - vma->vm_start;
1207 psize = priv->phys_size - offset;
1208 addr = (priv->phys_addr + offset) >> PAGE_SHIFT;
1209
1210 /* Check against the FPGA region's physical memory size */
1211 if (vsize > psize) {
1212 dev_err(priv->dev, "requested mmap mapping too large\n");
1213 return -EINVAL;
1214 }
1215
1216 /* IO memory (stop cacheing) */
1217 vma->vm_flags |= VM_IO | VM_RESERVED;
1218 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1219
1220 return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
1221 vma->vm_page_prot);
1222}
1223
1224static const struct file_operations data_fops = {
1225 .owner = THIS_MODULE,
1226 .open = data_open,
1227 .release = data_release,
1228 .read = data_read,
1229 .poll = data_poll,
1230 .mmap = data_mmap,
1231 .llseek = no_llseek,
1232};
1233
1234/*
1235 * OpenFirmware Device Subsystem
1236 */
1237
1238static bool dma_filter(struct dma_chan *chan, void *data)
1239{
1240 /*
1241 * DMA Channel #0 is used for the FPGA Programmer, so ignore it
1242 *
1243 * This probably won't survive an unload/load cycle of the Freescale
1244 * DMAEngine driver, but that won't be a problem
1245 */
1246 if (chan->chan_id == 0 && chan->device->dev_id == 0)
1247 return false;
1248
1249 return true;
1250}
1251
1252static int data_of_probe(struct platform_device *op,
1253 const struct of_device_id *match)
1254{
1255 struct device_node *of_node = op->dev.of_node;
1256 struct device *this_device;
1257 struct fpga_device *priv;
1258 struct resource res;
1259 dma_cap_mask_t mask;
1260 int ret;
1261
1262 /* Allocate private data */
1263 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1264 if (!priv) {
1265 dev_err(&op->dev, "Unable to allocate device private data\n");
1266 ret = -ENOMEM;
1267 goto out_return;
1268 }
1269
1270 dev_set_drvdata(&op->dev, priv);
1271 priv->dev = &op->dev;
1272 kref_init(&priv->ref);
1273 mutex_init(&priv->mutex);
1274
1275 dev_set_drvdata(priv->dev, priv);
1276 spin_lock_init(&priv->lock);
1277 INIT_LIST_HEAD(&priv->free);
1278 INIT_LIST_HEAD(&priv->used);
1279 init_waitqueue_head(&priv->wait);
1280
1281 /* Setup the misc device */
1282 priv->miscdev.minor = MISC_DYNAMIC_MINOR;
1283 priv->miscdev.name = drv_name;
1284 priv->miscdev.fops = &data_fops;
1285
1286 /* Get the physical address of the FPGA registers */
1287 ret = of_address_to_resource(of_node, 0, &res);
1288 if (ret) {
1289 dev_err(&op->dev, "Unable to find FPGA physical address\n");
1290 ret = -ENODEV;
1291 goto out_free_priv;
1292 }
1293
1294 priv->phys_addr = res.start;
1295 priv->phys_size = resource_size(&res);
1296
1297 /* ioremap the registers for use */
1298 priv->regs = of_iomap(of_node, 0);
1299 if (!priv->regs) {
1300 dev_err(&op->dev, "Unable to ioremap registers\n");
1301 ret = -ENOMEM;
1302 goto out_free_priv;
1303 }
1304
1305 dma_cap_zero(mask);
1306 dma_cap_set(DMA_MEMCPY, mask);
1307 dma_cap_set(DMA_INTERRUPT, mask);
1308 dma_cap_set(DMA_SLAVE, mask);
1309 dma_cap_set(DMA_SG, mask);
1310
1311 /* Request a DMA channel */
1312 priv->chan = dma_request_channel(mask, dma_filter, NULL);
1313 if (!priv->chan) {
1314 dev_err(&op->dev, "Unable to request DMA channel\n");
1315 ret = -ENODEV;
1316 goto out_unmap_regs;
1317 }
1318
1319 /* Find the correct IRQ number */
1320 priv->irq = irq_of_parse_and_map(of_node, 0);
1321 if (priv->irq == NO_IRQ) {
1322 dev_err(&op->dev, "Unable to find IRQ line\n");
1323 ret = -ENODEV;
1324 goto out_release_dma;
1325 }
1326
1327 /* Drive the GPIO for FPGA IRQ high (no interrupt) */
1328 iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA);
1329
1330 /* Register the miscdevice */
1331 ret = misc_register(&priv->miscdev);
1332 if (ret) {
1333 dev_err(&op->dev, "Unable to register miscdevice\n");
1334 goto out_irq_dispose_mapping;
1335 }
1336
1337 /* Create the debugfs files */
1338 ret = data_debugfs_init(priv);
1339 if (ret) {
1340 dev_err(&op->dev, "Unable to create debugfs files\n");
1341 goto out_misc_deregister;
1342 }
1343
1344 /* Create the sysfs files */
1345 this_device = priv->miscdev.this_device;
1346 dev_set_drvdata(this_device, priv);
1347 ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group);
1348 if (ret) {
1349 dev_err(&op->dev, "Unable to create sysfs files\n");
1350 goto out_data_debugfs_exit;
1351 }
1352
1353 dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n");
1354 return 0;
1355
1356out_data_debugfs_exit:
1357 data_debugfs_exit(priv);
1358out_misc_deregister:
1359 misc_deregister(&priv->miscdev);
1360out_irq_dispose_mapping:
1361 irq_dispose_mapping(priv->irq);
1362out_release_dma:
1363 dma_release_channel(priv->chan);
1364out_unmap_regs:
1365 iounmap(priv->regs);
1366out_free_priv:
1367 kref_put(&priv->ref, fpga_device_release);
1368out_return:
1369 return ret;
1370}
1371
1372static int data_of_remove(struct platform_device *op)
1373{
1374 struct fpga_device *priv = dev_get_drvdata(&op->dev);
1375 struct device *this_device = priv->miscdev.this_device;
1376
1377 /* remove all sysfs files, now the device cannot be re-enabled */
1378 sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group);
1379
1380 /* remove all debugfs files */
1381 data_debugfs_exit(priv);
1382
1383 /* disable the device from generating data */
1384 data_device_disable(priv);
1385
1386 /* remove the character device to stop new readers from appearing */
1387 misc_deregister(&priv->miscdev);
1388
1389 /* cleanup everything not needed by readers */
1390 irq_dispose_mapping(priv->irq);
1391 dma_release_channel(priv->chan);
1392 iounmap(priv->regs);
1393
1394 /* release our reference */
1395 kref_put(&priv->ref, fpga_device_release);
1396 return 0;
1397}
1398
1399static struct of_device_id data_of_match[] = {
1400 { .compatible = "carma,carma-fpga", },
1401 {},
1402};
1403
1404static struct of_platform_driver data_of_driver = {
1405 .probe = data_of_probe,
1406 .remove = data_of_remove,
1407 .driver = {
1408 .name = drv_name,
1409 .of_match_table = data_of_match,
1410 .owner = THIS_MODULE,
1411 },
1412};
1413
1414/*
1415 * Module Init / Exit
1416 */
1417
1418static int __init data_init(void)
1419{
1420 return of_register_platform_driver(&data_of_driver);
1421}
1422
1423static void __exit data_exit(void)
1424{
1425 of_unregister_platform_driver(&data_of_driver);
1426}
1427
1428MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
1429MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver");
1430MODULE_LICENSE("GPL");
1431
1432module_init(data_init);
1433module_exit(data_exit);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 75b0d3cb7676..9f689f1da0fc 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
56 * Returns a pointer to the interrupt parent node, or NULL if the interrupt 56 * Returns a pointer to the interrupt parent node, or NULL if the interrupt
57 * parent could not be determined. 57 * parent could not be determined.
58 */ 58 */
59static struct device_node *of_irq_find_parent(struct device_node *child) 59struct device_node *of_irq_find_parent(struct device_node *child)
60{ 60{
61 struct device_node *p; 61 struct device_node *p;
62 const __be32 *parp; 62 const __be32 *parp;
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 109e013b1772..e6955f5d1f08 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -68,6 +68,7 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
68extern int of_irq_count(struct device_node *dev); 68extern int of_irq_count(struct device_node *dev);
69extern int of_irq_to_resource_table(struct device_node *dev, 69extern int of_irq_to_resource_table(struct device_node *dev,
70 struct resource *res, int nr_irqs); 70 struct resource *res, int nr_irqs);
71extern struct device_node *of_irq_find_parent(struct device_node *child);
71 72
72#endif /* CONFIG_OF_IRQ */ 73#endif /* CONFIG_OF_IRQ */
73#endif /* CONFIG_OF */ 74#endif /* CONFIG_OF */