aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-12 17:27:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-12 17:27:24 -0500
commit09cea96caa59fabab3030c53bd698b9b568d959a (patch)
treea991cdc0c887fdcda37f4b751ee98d3db9559f4e
parent6eb7365db6f3a4a9d8d9922bb0b800f9cbaad641 (diff)
parente090aa80321b64c3b793f3b047e31ecf1af9538d (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (151 commits) powerpc: Fix usage of 64-bit instruction in 32-bit altivec code MAINTAINERS: Add PowerPC patterns powerpc/pseries: Track previous CPPR values to correctly EOI interrupts powerpc/pseries: Correct pseries/dlpar.c build break without CONFIG_SMP powerpc: Make "intspec" pointers in irq_host->xlate() const powerpc/8xx: DTLB Miss cleanup powerpc/8xx: Remove DIRTY pte handling in DTLB Error. powerpc/8xx: Start using dcbX instructions in various copy routines powerpc/8xx: Restore _PAGE_WRITETHRU powerpc/8xx: Add missing Guarded setting in DTLB Error. powerpc/8xx: Fixup DAR from buggy dcbX instructions. powerpc/8xx: Tag DAR with 0x00f0 to catch buggy instructions. powerpc/8xx: Update TLB asm so it behaves as linux mm expects. powerpc/8xx: Invalidate non present TLBs powerpc/pseries: Serialize cpu hotplug operations during deactivate Vs deallocate pseries/pseries: Add code to online/offline CPUs of a DLPAR node powerpc: stop_this_cpu: remove the cpu from the online map. powerpc/pseries: Add kernel based CPU DLPAR handling sysfs/cpu: Add probe/release files powerpc/pseries: Kernel DLPAR Infrastructure ...
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu15
-rw-r--r--Documentation/cpu-hotplug.txt6
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/board.txt4
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/mpc5200.txt17
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/powerpc/Kconfig49
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts2
-rw-r--r--arch/powerpc/boot/dts/eiger.dts6
-rw-r--r--arch/powerpc/boot/dts/gef_ppc9a.dts6
-rw-r--r--arch/powerpc/boot/dts/gef_sbc310.dts6
-rw-r--r--arch/powerpc/boot/dts/gef_sbc610.dts6
-rw-r--r--arch/powerpc/boot/dts/glacier.dts6
-rw-r--r--arch/powerpc/boot/dts/haleakala.dts2
-rw-r--r--arch/powerpc/boot/dts/katmai.dts36
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts4
-rw-r--r--arch/powerpc/boot/dts/kmeter1.dts7
-rw-r--r--arch/powerpc/boot/dts/makalu.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc832x_mds.dts9
-rw-r--r--arch/powerpc/boot/dts/mpc832x_rdb.dts9
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts9
-rw-r--r--arch/powerpc/boot/dts/mpc836x_rdk.dts9
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts119
-rw-r--r--arch/powerpc/boot/dts/mpc8569mds.dts111
-rw-r--r--arch/powerpc/boot/dts/mpc8610_hpcd.dts26
-rw-r--r--arch/powerpc/boot/dts/p1020rdb.dts477
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core0.dts363
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core1.dts184
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts554
-rw-r--r--arch/powerpc/boot/dts/redwood.dts1
-rw-r--r--arch/powerpc/boot/dts/yosemite.dts14
-rw-r--r--arch/powerpc/configs/86xx/gef_ppc9a_defconfig2
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc310_defconfig2
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig4
-rw-r--r--arch/powerpc/include/asm/cpm.h82
-rw-r--r--arch/powerpc/include/asm/cpm1.h45
-rw-r--r--arch/powerpc/include/asm/cpm2.h47
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/hugetlb.h27
-rw-r--r--arch/powerpc/include/asm/hvcall.h13
-rw-r--r--arch/powerpc/include/asm/hw_irq.h5
-rw-r--r--arch/powerpc/include/asm/immap_cpm2.h2
-rw-r--r--arch/powerpc/include/asm/immap_qe.h8
-rw-r--r--arch/powerpc/include/asm/irq.h13
-rw-r--r--arch/powerpc/include/asm/kvm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h40
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h139
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64_asm.h58
-rw-r--r--arch/powerpc/include/asm/kvm_host.h79
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h1
-rw-r--r--arch/powerpc/include/asm/lppaca.h9
-rw-r--r--arch/powerpc/include/asm/machdep.h5
-rw-r--r--arch/powerpc/include/asm/macio.h6
-rw-r--r--arch/powerpc/include/asm/mediabay.h27
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h50
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h47
-rw-r--r--arch/powerpc/include/asm/nvram.h1
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h1
-rw-r--r--arch/powerpc/include/asm/paca.h9
-rw-r--r--arch/powerpc/include/asm/page.h14
-rw-r--r--arch/powerpc/include/asm/page_64.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h10
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h63
-rw-r--r--arch/powerpc/include/asm/pgalloc.h30
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h14
-rw-r--r--arch/powerpc/include/asm/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h14
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h37
-rw-r--r--arch/powerpc/include/asm/qe.h43
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/asm-offsets.c21
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S8
-rw-r--r--arch/powerpc/kernel/head_64.S7
-rw-r--r--arch/powerpc/kernel/head_8xx.S313
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S22
-rw-r--r--arch/powerpc/kernel/io.c4
-rw-r--r--arch/powerpc/kernel/irq.c137
-rw-r--r--arch/powerpc/kernel/lparcfg.c4
-rw-r--r--arch/powerpc/kernel/misc_32.S18
-rw-r--r--arch/powerpc/kernel/nvram_64.c56
-rw-r--r--arch/powerpc/kernel/perf_callchain.c20
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c (renamed from arch/powerpc/kernel/proc_ppc64.c)102
-rw-r--r--arch/powerpc/kernel/rtas_flash.c10
-rw-r--r--arch/powerpc/kernel/rtasd.c (renamed from arch/powerpc/platforms/pseries/rtasd.c)48
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/smp.c3
-rw-r--r--arch/powerpc/kernel/sysfs.c19
-rw-r--r--arch/powerpc/kernel/time.c1
-rw-r--r--arch/powerpc/kernel/traps.c22
-rw-r--r--arch/powerpc/kernel/vector.S2
-rw-r--r--arch/powerpc/kvm/Kconfig17
-rw-r--r--arch/powerpc/kvm/Makefile27
-rw-r--r--arch/powerpc/kvm/book3s.c974
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c372
-rw-r--r--arch/powerpc/kvm/book3s_64_emulate.c345
-rw-r--r--arch/powerpc/kvm/book3s_64_exports.c24
-rw-r--r--arch/powerpc/kvm/book3s_64_interrupts.S392
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c478
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c408
-rw-r--r--arch/powerpc/kvm/book3s_64_rmhandlers.S131
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S262
-rw-r--r--arch/powerpc/kvm/booke.c5
-rw-r--r--arch/powerpc/kvm/emulate.c66
-rw-r--r--arch/powerpc/kvm/powerpc.c28
-rw-r--r--arch/powerpc/kvm/timing.c1
-rw-r--r--arch/powerpc/kvm/trace.h6
-rw-r--r--arch/powerpc/lib/copy_32.S24
-rw-r--r--arch/powerpc/mm/Makefile5
-rw-r--r--arch/powerpc/mm/fault.c8
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c132
-rw-r--r--arch/powerpc/mm/gup.c149
-rw-r--r--arch/powerpc/mm/hash_utils_64.c46
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c139
-rw-r--r--arch/powerpc/mm/hugetlbpage.c792
-rw-r--r--arch/powerpc/mm/init_64.c76
-rw-r--r--arch/powerpc/mm/mem.c17
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c26
-rw-r--r--arch/powerpc/mm/mmu_decl.h11
-rw-r--r--arch/powerpc/mm/pgtable.c25
-rw-r--r--arch/powerpc/mm/subpage-prot.c15
-rw-r--r--arch/powerpc/mm/tlb_hash64.c8
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c4
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig5
-rw-r--r--arch/powerpc/platforms/52xx/Makefile1
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c6
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c432
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c560
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c10
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c3
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig23
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c125
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.h19
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c34
-rw-r--r--arch/powerpc/platforms/85xx/p4080_ds.c74
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c6
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig3
-rw-r--r--arch/powerpc/platforms/86xx/gef_pic.c6
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c5
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c5
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c5
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c48
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/Kconfig9
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype17
-rw-r--r--arch/powerpc/platforms/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c2
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c8
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c6
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c2
-rw-r--r--arch/powerpc/platforms/chrp/Kconfig2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c50
-rw-r--r--arch/powerpc/platforms/iseries/htab.c8
-rw-r--r--arch/powerpc/platforms/iseries/irq.c4
-rw-r--r--arch/powerpc/platforms/powermac/pic.c12
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c2
-rw-r--r--arch/powerpc/platforms/ps3/mm.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig3
-rw-r--r--arch/powerpc/platforms/pseries/Makefile4
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c29
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c558
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c18
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c182
-rw-r--r--arch/powerpc/platforms/pseries/offline_states.h18
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h22
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c8
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c4
-rw-r--r--arch/powerpc/platforms/pseries/smp.c19
-rw-r--r--arch/powerpc/platforms/pseries/xics.c70
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/cpm1.c4
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c14
-rw-r--r--arch/powerpc/sysdev/cpm_common.c5
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c4
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c18
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c88
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c2
-rw-r--r--arch/powerpc/sysdev/i8259.c8
-rw-r--r--arch/powerpc/sysdev/ipic.c8
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c6
-rw-r--r--arch/powerpc/sysdev/mpic.c26
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c2
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c2
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c61
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c8
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c6
-rw-r--r--arch/powerpc/sysdev/uic.c10
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c8
-rw-r--r--arch/powerpc/xmon/xmon.c3
-rw-r--r--drivers/ata/Kconfig10
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/pata_macio.c1427
-rw-r--r--drivers/base/cpu.c34
-rw-r--r--drivers/block/swim3.c39
-rw-r--r--drivers/char/agp/uninorth-agp.c77
-rw-r--r--drivers/char/hvc_console.c1
-rw-r--r--drivers/ide/pmac.c92
-rw-r--r--drivers/macintosh/macio_asic.c52
-rw-r--r--drivers/macintosh/mediabay.c328
-rw-r--r--drivers/macintosh/nvram.c11
-rw-r--r--drivers/macintosh/therm_adt746x.c15
-rw-r--r--drivers/macintosh/via-pmu.c160
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c2
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/net/ehea/ehea_hcall.h51
-rw-r--r--drivers/net/ehea/ehea_phyp.h1
-rw-r--r--drivers/of/platform.c305
-rw-r--r--drivers/spi/Kconfig11
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c25
-rw-r--r--drivers/spi/mpc52xx_spi.c520
-rw-r--r--drivers/spi/spi_mpc8xxx.c618
-rw-r--r--drivers/spi/xilinx_spi.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.h15
-rw-r--r--drivers/video/offb.c15
-rw-r--r--drivers/watchdog/Kconfig4
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/mpc5200_wdt.c293
-rw-r--r--fs/proc/proc_devtree.c41
-rw-r--r--include/linux/cpu.h15
-rw-r--r--include/linux/fsl_devices.h11
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/pci_ids.h14
-rw-r--r--include/linux/spi/mpc52xx_spi.h10
-rw-r--r--virt/kvm/kvm_main.c5
234 files changed, 13126 insertions, 2713 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 974e29f5da86..2aae06fcbed7 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -62,6 +62,21 @@ Description: CPU topology files that describe kernel limits related to
62 See Documentation/cputopology.txt for more information. 62 See Documentation/cputopology.txt for more information.
63 63
64 64
65What: /sys/devices/system/cpu/probe
66 /sys/devices/system/cpu/release
67Date: November 2009
68Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
69Description: Dynamic addition and removal of CPU's. This is not hotplug
70 removal, this is meant complete removal/addition of the CPU
71 from the system.
72
73 probe: writes to this file will dynamically add a CPU to the
74 system. Information written to the file to add CPU's is
75 architecture specific.
76
77 release: writes to this file dynamically remove a CPU from
78 the system. Information writtento the file to remove CPU's
79 is architecture specific.
65 80
66What: /sys/devices/system/cpu/cpu#/node 81What: /sys/devices/system/cpu/cpu#/node
67Date: October 2009 82Date: October 2009
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index 9d620c153b04..4d4a644b505e 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -49,6 +49,12 @@ maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using
49additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets 49additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets
50 cpu_possible_map = cpu_present_map + additional_cpus 50 cpu_possible_map = cpu_present_map + additional_cpus
51 51
52cede_offline={"off","on"} Use this option to disable/enable putting offlined
53 processors to an extended H_CEDE state on
54 supported pseries platforms.
55 If nothing is specified,
56 cede_offline is set to "on".
57
52(*) Option valid only for following architectures 58(*) Option valid only for following architectures
53- ia64 59- ia64
54 60
diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/powerpc/dts-bindings/fsl/board.txt
index e8b5bc24d0ac..39e941515a36 100644
--- a/Documentation/powerpc/dts-bindings/fsl/board.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/board.txt
@@ -20,12 +20,16 @@ Required properities:
20- compatible : should be "fsl,fpga-pixis". 20- compatible : should be "fsl,fpga-pixis".
21- reg : should contain the address and the length of the FPPGA register 21- reg : should contain the address and the length of the FPPGA register
22 set. 22 set.
23- interrupt-parent: should specify phandle for the interrupt controller.
24- interrupts : should specify event (wakeup) IRQ.
23 25
24Example (MPC8610HPCD): 26Example (MPC8610HPCD):
25 27
26 board-control@e8000000 { 28 board-control@e8000000 {
27 compatible = "fsl,fpga-pixis"; 29 compatible = "fsl,fpga-pixis";
28 reg = <0xe8000000 32>; 30 reg = <0xe8000000 32>;
31 interrupt-parent = <&mpic>;
32 interrupts = <8 8>;
29 }; 33 };
30 34
31* Freescale BCSR GPIO banks 35* Freescale BCSR GPIO banks
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
index cabc780f7258..5c6602dbfdc2 100644
--- a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
@@ -103,7 +103,22 @@ fsl,mpc5200-gpt nodes
103--------------------- 103---------------------
104On the mpc5200 and 5200b, GPT0 has a watchdog timer function. If the board 104On the mpc5200 and 5200b, GPT0 has a watchdog timer function. If the board
105design supports the internal wdt, then the device node for GPT0 should 105design supports the internal wdt, then the device node for GPT0 should
106include the empty property 'fsl,has-wdt'. 106include the empty property 'fsl,has-wdt'. Note that this does not activate
107the watchdog. The timer will function as a GPT if the timer api is used, and
108it will function as watchdog if the watchdog device is used. The watchdog
109mode has priority over the gpt mode, i.e. if the watchdog is activated, any
110gpt api call to this timer will fail with -EBUSY.
111
112If you add the property
113 fsl,wdt-on-boot = <n>;
114GPT0 will be marked as in-use watchdog, i.e. blocking every gpt access to it.
115If n>0, the watchdog is started with a timeout of n seconds. If n=0, the
116configuration of the watchdog is not touched. This is useful in two cases:
117- just mark GPT0 as watchdog, blocking gpt accesses, and configure it later;
118- do not touch a configuration assigned by the boot loader which supervises
119 the boot process itself.
120
121The watchdog will respect the CONFIG_WATCHDOG_NOWAYOUT option.
107 122
108An mpc5200-gpt can be used as a single line GPIO controller. To do so, 123An mpc5200-gpt can be used as a single line GPIO controller. To do so,
109add the following properties to the gpt node: 124add the following properties to the gpt node:
diff --git a/MAINTAINERS b/MAINTAINERS
index d58fa703ec16..cff133be42c6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3260,6 +3260,7 @@ LINUX FOR IBM pSERIES (RS/6000)
3260M: Paul Mackerras <paulus@au.ibm.com> 3260M: Paul Mackerras <paulus@au.ibm.com>
3261W: http://www.ibm.com/linux/ltc/projects/ppc 3261W: http://www.ibm.com/linux/ltc/projects/ppc
3262S: Supported 3262S: Supported
3263F: arch/powerpc/boot/rs6000.h
3263 3264
3264LINUX FOR POWERPC (32-BIT AND 64-BIT) 3265LINUX FOR POWERPC (32-BIT AND 64-BIT)
3265M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 3266M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
@@ -3268,18 +3269,24 @@ W: http://www.penguinppc.org/
3268L: linuxppc-dev@ozlabs.org 3269L: linuxppc-dev@ozlabs.org
3269T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git 3270T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git
3270S: Supported 3271S: Supported
3272F: Documentation/powerpc/
3273F: arch/powerpc/
3271 3274
3272LINUX FOR POWER MACINTOSH 3275LINUX FOR POWER MACINTOSH
3273M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 3276M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
3274W: http://www.penguinppc.org/ 3277W: http://www.penguinppc.org/
3275L: linuxppc-dev@ozlabs.org 3278L: linuxppc-dev@ozlabs.org
3276S: Maintained 3279S: Maintained
3280F: arch/powerpc/platforms/powermac/
3281F: drivers/macintosh/
3277 3282
3278LINUX FOR POWERPC EMBEDDED MPC5XXX 3283LINUX FOR POWERPC EMBEDDED MPC5XXX
3279M: Grant Likely <grant.likely@secretlab.ca> 3284M: Grant Likely <grant.likely@secretlab.ca>
3280L: linuxppc-dev@ozlabs.org 3285L: linuxppc-dev@ozlabs.org
3281T: git git://git.secretlab.ca/git/linux-2.6.git 3286T: git git://git.secretlab.ca/git/linux-2.6.git
3282S: Maintained 3287S: Maintained
3288F: arch/powerpc/platforms/512x/
3289F: arch/powerpc/platforms/52xx/
3283 3290
3284LINUX FOR POWERPC EMBEDDED PPC4XX 3291LINUX FOR POWERPC EMBEDDED PPC4XX
3285M: Josh Boyer <jwboyer@linux.vnet.ibm.com> 3292M: Josh Boyer <jwboyer@linux.vnet.ibm.com>
@@ -3288,6 +3295,8 @@ W: http://www.penguinppc.org/
3288L: linuxppc-dev@ozlabs.org 3295L: linuxppc-dev@ozlabs.org
3289T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git 3296T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git
3290S: Maintained 3297S: Maintained
3298F: arch/powerpc/platforms/40x/
3299F: arch/powerpc/platforms/44x/
3291 3300
3292LINUX FOR POWERPC EMBEDDED XILINX VIRTEX 3301LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
3293M: Grant Likely <grant.likely@secretlab.ca> 3302M: Grant Likely <grant.likely@secretlab.ca>
@@ -3295,6 +3304,8 @@ W: http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex
3295L: linuxppc-dev@ozlabs.org 3304L: linuxppc-dev@ozlabs.org
3296T: git git://git.secretlab.ca/git/linux-2.6.git 3305T: git git://git.secretlab.ca/git/linux-2.6.git
3297S: Maintained 3306S: Maintained
3307F: arch/powerpc/*/*virtex*
3308F: arch/powerpc/*/*/*virtex*
3298 3309
3299LINUX FOR POWERPC EMBEDDED PPC8XX 3310LINUX FOR POWERPC EMBEDDED PPC8XX
3300M: Vitaly Bordug <vitb@kernel.crashing.org> 3311M: Vitaly Bordug <vitb@kernel.crashing.org>
@@ -3308,12 +3319,16 @@ M: Kumar Gala <galak@kernel.crashing.org>
3308W: http://www.penguinppc.org/ 3319W: http://www.penguinppc.org/
3309L: linuxppc-dev@ozlabs.org 3320L: linuxppc-dev@ozlabs.org
3310S: Maintained 3321S: Maintained
3322F: arch/powerpc/platforms/83xx/
3311 3323
3312LINUX FOR POWERPC PA SEMI PWRFICIENT 3324LINUX FOR POWERPC PA SEMI PWRFICIENT
3313M: Olof Johansson <olof@lixom.net> 3325M: Olof Johansson <olof@lixom.net>
3314W: http://www.pasemi.com/ 3326W: http://www.pasemi.com/
3315L: linuxppc-dev@ozlabs.org 3327L: linuxppc-dev@ozlabs.org
3316S: Supported 3328S: Supported
3329F: arch/powerpc/platforms/pasemi/
3330F: drivers/*/*pasemi*
3331F: drivers/*/*/*pasemi*
3317 3332
3318LINUX SECURITY MODULE (LSM) FRAMEWORK 3333LINUX SECURITY MODULE (LSM) FRAMEWORK
3319M: Chris Wright <chrisw@sous-sol.org> 3334M: Chris Wright <chrisw@sous-sol.org>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2ba14e77296c..0df57466e783 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -56,6 +56,16 @@ config IRQ_PER_CPU
56 bool 56 bool
57 default y 57 default y
58 58
59config NR_IRQS
60 int "Number of virtual interrupt numbers"
61 range 32 512
62 default "512"
63 help
64 This defines the number of virtual interrupt numbers the kernel
65 can manage. Virtual interrupt numbers are what you see in
66 /proc/interrupts. If you configure your system to have too few,
67 drivers will fail to load or worse - handle with care.
68
59config STACKTRACE_SUPPORT 69config STACKTRACE_SUPPORT
60 bool 70 bool
61 default y 71 default y
@@ -199,24 +209,14 @@ config DEFAULT_UIMAGE
199config REDBOOT 209config REDBOOT
200 bool 210 bool
201 211
202config HIBERNATE_32
203 bool
204 depends on (PPC_PMAC && !SMP) || BROKEN
205 default y
206
207config HIBERNATE_64
208 bool
209 depends on BROKEN || (PPC_PMAC64 && EXPERIMENTAL)
210 default y
211
212config ARCH_HIBERNATION_POSSIBLE 212config ARCH_HIBERNATION_POSSIBLE
213 bool 213 bool
214 depends on (PPC64 && HIBERNATE_64) || (PPC32 && HIBERNATE_32)
215 default y 214 default y
216 215
217config ARCH_SUSPEND_POSSIBLE 216config ARCH_SUSPEND_POSSIBLE
218 def_bool y 217 def_bool y
219 depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx 218 depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
219 PPC_85xx || PPC_86xx
220 220
221config PPC_DCR_NATIVE 221config PPC_DCR_NATIVE
222 bool 222 bool
@@ -320,6 +320,10 @@ config HOTPLUG_CPU
320 320
321 Say N if you are unsure. 321 Say N if you are unsure.
322 322
323config ARCH_CPU_PROBE_RELEASE
324 def_bool y
325 depends on HOTPLUG_CPU
326
323config ARCH_ENABLE_MEMORY_HOTPLUG 327config ARCH_ENABLE_MEMORY_HOTPLUG
324 def_bool y 328 def_bool y
325 329
@@ -378,6 +382,19 @@ config IRQ_ALL_CPUS
378 CPU. Generally saying Y is safe, although some problems have been 382 CPU. Generally saying Y is safe, although some problems have been
379 reported with SMP Power Macintoshes with this option enabled. 383 reported with SMP Power Macintoshes with this option enabled.
380 384
385config SPARSE_IRQ
386 bool "Support sparse irq numbering"
387 default y
388 help
389 This enables support for sparse irqs. This is useful for distro
390 kernels that want to define a high CONFIG_NR_CPUS value but still
391 want to have low kernel memory footprint on smaller machines.
392
393 ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
394 out the irq_desc[] array in a more NUMA-friendly way. )
395
396 If you don't know what to do here, say Y.
397
381config NUMA 398config NUMA
382 bool "NUMA support" 399 bool "NUMA support"
383 depends on PPC64 400 depends on PPC64
@@ -652,6 +669,14 @@ config FSL_PCI
652 select PPC_INDIRECT_PCI 669 select PPC_INDIRECT_PCI
653 select PCI_QUIRKS 670 select PCI_QUIRKS
654 671
672config FSL_PMC
673 bool
674 default y
675 depends on SUSPEND && (PPC_85xx || PPC_86xx)
676 help
677 Freescale MPC85xx/MPC86xx power management controller support
678 (suspend/resume). For MPC83xx see platforms/83xx/suspend.c
679
655config 4xx_SOC 680config 4xx_SOC
656 bool 681 bool
657 682
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index c920170b7dfe..cd56bb5b347b 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -352,6 +352,7 @@
352 max-frame-size = <9000>; 352 max-frame-size = <9000>;
353 rx-fifo-size = <4096>; 353 rx-fifo-size = <4096>;
354 tx-fifo-size = <2048>; 354 tx-fifo-size = <2048>;
355 rx-fifo-size-gige = <16384>;
355 phy-mode = "rgmii"; 356 phy-mode = "rgmii";
356 phy-map = <0x00000000>; 357 phy-map = <0x00000000>;
357 rgmii-device = <&RGMII0>; 358 rgmii-device = <&RGMII0>;
@@ -381,6 +382,7 @@
381 max-frame-size = <9000>; 382 max-frame-size = <9000>;
382 rx-fifo-size = <4096>; 383 rx-fifo-size = <4096>;
383 tx-fifo-size = <2048>; 384 tx-fifo-size = <2048>;
385 rx-fifo-size-gige = <16384>;
384 phy-mode = "rgmii"; 386 phy-mode = "rgmii";
385 phy-map = <0x00000000>; 387 phy-map = <0x00000000>;
386 rgmii-device = <&RGMII0>; 388 rgmii-device = <&RGMII0>;
diff --git a/arch/powerpc/boot/dts/eiger.dts b/arch/powerpc/boot/dts/eiger.dts
index c4a934f2e886..48bcf7187924 100644
--- a/arch/powerpc/boot/dts/eiger.dts
+++ b/arch/powerpc/boot/dts/eiger.dts
@@ -316,6 +316,7 @@
316 max-frame-size = <9000>; 316 max-frame-size = <9000>;
317 rx-fifo-size = <4096>; 317 rx-fifo-size = <4096>;
318 tx-fifo-size = <2048>; 318 tx-fifo-size = <2048>;
319 rx-fifo-size-gige = <16384>;
319 phy-mode = "rgmii"; 320 phy-mode = "rgmii";
320 phy-map = <0x00000000>; 321 phy-map = <0x00000000>;
321 rgmii-device = <&RGMII0>; 322 rgmii-device = <&RGMII0>;
@@ -345,6 +346,7 @@
345 max-frame-size = <9000>; 346 max-frame-size = <9000>;
346 rx-fifo-size = <4096>; 347 rx-fifo-size = <4096>;
347 tx-fifo-size = <2048>; 348 tx-fifo-size = <2048>;
349 rx-fifo-size-gige = <16384>;
348 phy-mode = "rgmii"; 350 phy-mode = "rgmii";
349 phy-map = <0x00000000>; 351 phy-map = <0x00000000>;
350 rgmii-device = <&RGMII0>; 352 rgmii-device = <&RGMII0>;
@@ -375,6 +377,8 @@
375 max-frame-size = <9000>; 377 max-frame-size = <9000>;
376 rx-fifo-size = <4096>; 378 rx-fifo-size = <4096>;
377 tx-fifo-size = <2048>; 379 tx-fifo-size = <2048>;
380 rx-fifo-size-gige = <16384>;
381 tx-fifo-size-gige = <16384>; /* emac2&3 only */
378 phy-mode = "rgmii"; 382 phy-mode = "rgmii";
379 phy-map = <0x00000000>; 383 phy-map = <0x00000000>;
380 rgmii-device = <&RGMII1>; 384 rgmii-device = <&RGMII1>;
@@ -403,6 +407,8 @@
403 max-frame-size = <9000>; 407 max-frame-size = <9000>;
404 rx-fifo-size = <4096>; 408 rx-fifo-size = <4096>;
405 tx-fifo-size = <2048>; 409 tx-fifo-size = <2048>;
410 rx-fifo-size-gige = <16384>;
411 tx-fifo-size-gige = <16384>; /* emac2&3 only */
406 phy-mode = "rgmii"; 412 phy-mode = "rgmii";
407 phy-map = <0x00000000>; 413 phy-map = <0x00000000>;
408 rgmii-device = <&RGMII1>; 414 rgmii-device = <&RGMII1>;
diff --git a/arch/powerpc/boot/dts/gef_ppc9a.dts b/arch/powerpc/boot/dts/gef_ppc9a.dts
index 910944edd886..c86114e93f1e 100644
--- a/arch/powerpc/boot/dts/gef_ppc9a.dts
+++ b/arch/powerpc/boot/dts/gef_ppc9a.dts
@@ -118,6 +118,12 @@
118 }; 118 };
119 }; 119 };
120 120
121 nvram@3,0 {
122 device_type = "nvram";
123 compatible = "simtek,stk14ca8";
124 reg = <0x3 0x0 0x20000>;
125 };
126
121 fpga@4,0 { 127 fpga@4,0 {
122 compatible = "gef,ppc9a-fpga-regs"; 128 compatible = "gef,ppc9a-fpga-regs";
123 reg = <0x4 0x0 0x40>; 129 reg = <0x4 0x0 0x40>;
diff --git a/arch/powerpc/boot/dts/gef_sbc310.dts b/arch/powerpc/boot/dts/gef_sbc310.dts
index 2107d3c7cfe1..820c2b355ab1 100644
--- a/arch/powerpc/boot/dts/gef_sbc310.dts
+++ b/arch/powerpc/boot/dts/gef_sbc310.dts
@@ -115,6 +115,12 @@
115 }; 115 };
116 }; 116 };
117 117
118 nvram@3,0 {
119 device_type = "nvram";
120 compatible = "simtek,stk14ca8";
121 reg = <0x3 0x0 0x20000>;
122 };
123
118 fpga@4,0 { 124 fpga@4,0 {
119 compatible = "gef,fpga-regs"; 125 compatible = "gef,fpga-regs";
120 reg = <0x4 0x0 0x40>; 126 reg = <0x4 0x0 0x40>;
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts
index 35a63183eecc..30911adefc8e 100644
--- a/arch/powerpc/boot/dts/gef_sbc610.dts
+++ b/arch/powerpc/boot/dts/gef_sbc610.dts
@@ -84,6 +84,12 @@
84 6 0 0xfd000000 0x00800000 // IO FPGA (8-bit) 84 6 0 0xfd000000 0x00800000 // IO FPGA (8-bit)
85 7 0 0xfd800000 0x00800000>; // IO FPGA (32-bit) 85 7 0 0xfd800000 0x00800000>; // IO FPGA (32-bit)
86 86
87 nvram@3,0 {
88 device_type = "nvram";
89 compatible = "simtek,stk14ca8";
90 reg = <0x3 0x0 0x20000>;
91 };
92
87 fpga@4,0 { 93 fpga@4,0 {
88 compatible = "gef,fpga-regs"; 94 compatible = "gef,fpga-regs";
89 reg = <0x4 0x0 0x40>; 95 reg = <0x4 0x0 0x40>;
diff --git a/arch/powerpc/boot/dts/glacier.dts b/arch/powerpc/boot/dts/glacier.dts
index f3787a27f634..f6f618939293 100644
--- a/arch/powerpc/boot/dts/glacier.dts
+++ b/arch/powerpc/boot/dts/glacier.dts
@@ -292,6 +292,7 @@
292 max-frame-size = <9000>; 292 max-frame-size = <9000>;
293 rx-fifo-size = <4096>; 293 rx-fifo-size = <4096>;
294 tx-fifo-size = <2048>; 294 tx-fifo-size = <2048>;
295 rx-fifo-size-gige = <16384>;
295 phy-mode = "rgmii"; 296 phy-mode = "rgmii";
296 phy-map = <0x00000000>; 297 phy-map = <0x00000000>;
297 rgmii-device = <&RGMII0>; 298 rgmii-device = <&RGMII0>;
@@ -321,6 +322,7 @@
321 max-frame-size = <9000>; 322 max-frame-size = <9000>;
322 rx-fifo-size = <4096>; 323 rx-fifo-size = <4096>;
323 tx-fifo-size = <2048>; 324 tx-fifo-size = <2048>;
325 rx-fifo-size-gige = <16384>;
324 phy-mode = "rgmii"; 326 phy-mode = "rgmii";
325 phy-map = <0x00000000>; 327 phy-map = <0x00000000>;
326 rgmii-device = <&RGMII0>; 328 rgmii-device = <&RGMII0>;
@@ -351,6 +353,8 @@
351 max-frame-size = <9000>; 353 max-frame-size = <9000>;
352 rx-fifo-size = <4096>; 354 rx-fifo-size = <4096>;
353 tx-fifo-size = <2048>; 355 tx-fifo-size = <2048>;
356 rx-fifo-size-gige = <16384>;
357 tx-fifo-size-gige = <16384>; /* emac2&3 only */
354 phy-mode = "rgmii"; 358 phy-mode = "rgmii";
355 phy-map = <0x00000000>; 359 phy-map = <0x00000000>;
356 rgmii-device = <&RGMII1>; 360 rgmii-device = <&RGMII1>;
@@ -379,6 +383,8 @@
379 max-frame-size = <9000>; 383 max-frame-size = <9000>;
380 rx-fifo-size = <4096>; 384 rx-fifo-size = <4096>;
381 tx-fifo-size = <2048>; 385 tx-fifo-size = <2048>;
386 rx-fifo-size-gige = <16384>;
387 tx-fifo-size-gige = <16384>; /* emac2&3 only */
382 phy-mode = "rgmii"; 388 phy-mode = "rgmii";
383 phy-map = <0x00000000>; 389 phy-map = <0x00000000>;
384 rgmii-device = <&RGMII1>; 390 rgmii-device = <&RGMII1>;
diff --git a/arch/powerpc/boot/dts/haleakala.dts b/arch/powerpc/boot/dts/haleakala.dts
index 5b2a4947bf82..2b256694eca6 100644
--- a/arch/powerpc/boot/dts/haleakala.dts
+++ b/arch/powerpc/boot/dts/haleakala.dts
@@ -226,6 +226,8 @@
226 max-frame-size = <9000>; 226 max-frame-size = <9000>;
227 rx-fifo-size = <4096>; 227 rx-fifo-size = <4096>;
228 tx-fifo-size = <2048>; 228 tx-fifo-size = <2048>;
229 rx-fifo-size-gige = <16384>;
230 tx-fifo-size-gige = <16384>;
229 phy-mode = "rgmii"; 231 phy-mode = "rgmii";
230 phy-map = <0x00000000>; 232 phy-map = <0x00000000>;
231 rgmii-device = <&RGMII0>; 233 rgmii-device = <&RGMII0>;
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts
index 077819bc3cbd..51eb6ed5da2d 100644
--- a/arch/powerpc/boot/dts/katmai.dts
+++ b/arch/powerpc/boot/dts/katmai.dts
@@ -16,7 +16,7 @@
16 16
17/ { 17/ {
18 #address-cells = <2>; 18 #address-cells = <2>;
19 #size-cells = <1>; 19 #size-cells = <2>;
20 model = "amcc,katmai"; 20 model = "amcc,katmai";
21 compatible = "amcc,katmai"; 21 compatible = "amcc,katmai";
22 dcr-parent = <&{/cpus/cpu@0}>; 22 dcr-parent = <&{/cpus/cpu@0}>;
@@ -49,7 +49,7 @@
49 49
50 memory { 50 memory {
51 device_type = "memory"; 51 device_type = "memory";
52 reg = <0x00000000 0x00000000 0x00000000>; /* Filled in by zImage */ 52 reg = <0x0 0x00000000 0x0 0x00000000>; /* Filled in by U-Boot */
53 }; 53 };
54 54
55 UIC0: interrupt-controller0 { 55 UIC0: interrupt-controller0 {
@@ -112,7 +112,15 @@
112 compatible = "ibm,plb-440spe", "ibm,plb-440gp", "ibm,plb4"; 112 compatible = "ibm,plb-440spe", "ibm,plb-440gp", "ibm,plb4";
113 #address-cells = <2>; 113 #address-cells = <2>;
114 #size-cells = <1>; 114 #size-cells = <1>;
115 ranges; 115 /* addr-child addr-parent size */
116 ranges = <0x4 0xe0000000 0x4 0xe0000000 0x20000000
117 0xc 0x00000000 0xc 0x00000000 0x20000000
118 0xd 0x00000000 0xd 0x00000000 0x80000000
119 0xd 0x80000000 0xd 0x80000000 0x80000000
120 0xe 0x00000000 0xe 0x00000000 0x80000000
121 0xe 0x80000000 0xe 0x80000000 0x80000000
122 0xf 0x00000000 0xf 0x00000000 0x80000000
123 0xf 0x80000000 0xf 0x80000000 0x80000000>;
116 clock-frequency = <0>; /* Filled in by zImage */ 124 clock-frequency = <0>; /* Filled in by zImage */
117 125
118 SDRAM0: sdram { 126 SDRAM0: sdram {
@@ -245,8 +253,8 @@
245 ranges = <0x02000000 0x00000000 0x80000000 0x0000000d 0x80000000 0x00000000 0x80000000 253 ranges = <0x02000000 0x00000000 0x80000000 0x0000000d 0x80000000 0x00000000 0x80000000
246 0x01000000 0x00000000 0x00000000 0x0000000c 0x08000000 0x00000000 0x00010000>; 254 0x01000000 0x00000000 0x00000000 0x0000000c 0x08000000 0x00000000 0x00010000>;
247 255
248 /* Inbound 2GB range starting at 0 */ 256 /* Inbound 4GB range starting at 0 */
249 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>; 257 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
250 258
251 /* This drives busses 0 to 0xf */ 259 /* This drives busses 0 to 0xf */
252 bus-range = <0x0 0xf>; 260 bus-range = <0x0 0xf>;
@@ -289,10 +297,10 @@
289 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000 297 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000
290 0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>; 298 0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>;
291 299
292 /* Inbound 2GB range starting at 0 */ 300 /* Inbound 4GB range starting at 0 */
293 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>; 301 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
294 302
295 /* This drives busses 10 to 0x1f */ 303 /* This drives busses 0x10 to 0x1f */
296 bus-range = <0x10 0x1f>; 304 bus-range = <0x10 0x1f>;
297 305
298 /* Legacy interrupts (note the weird polarity, the bridge seems 306 /* Legacy interrupts (note the weird polarity, the bridge seems
@@ -330,10 +338,10 @@
330 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000 338 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000
331 0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>; 339 0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>;
332 340
333 /* Inbound 2GB range starting at 0 */ 341 /* Inbound 4GB range starting at 0 */
334 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>; 342 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
335 343
336 /* This drives busses 10 to 0x1f */ 344 /* This drives busses 0x20 to 0x2f */
337 bus-range = <0x20 0x2f>; 345 bus-range = <0x20 0x2f>;
338 346
339 /* Legacy interrupts (note the weird polarity, the bridge seems 347 /* Legacy interrupts (note the weird polarity, the bridge seems
@@ -371,10 +379,10 @@
371 ranges = <0x02000000 0x00000000 0x80000000 0x0000000f 0x00000000 0x00000000 0x80000000 379 ranges = <0x02000000 0x00000000 0x80000000 0x0000000f 0x00000000 0x00000000 0x80000000
372 0x01000000 0x00000000 0x00000000 0x0000000f 0x80020000 0x00000000 0x00010000>; 380 0x01000000 0x00000000 0x00000000 0x0000000f 0x80020000 0x00000000 0x00010000>;
373 381
374 /* Inbound 2GB range starting at 0 */ 382 /* Inbound 4GB range starting at 0 */
375 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>; 383 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
376 384
377 /* This drives busses 10 to 0x1f */ 385 /* This drives busses 0x30 to 0x3f */
378 bus-range = <0x30 0x3f>; 386 bus-range = <0x30 0x3f>;
379 387
380 /* Legacy interrupts (note the weird polarity, the bridge seems 388 /* Legacy interrupts (note the weird polarity, the bridge seems
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index c46561456ede..083e68eeaca4 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -272,6 +272,8 @@
272 max-frame-size = <9000>; 272 max-frame-size = <9000>;
273 rx-fifo-size = <4096>; 273 rx-fifo-size = <4096>;
274 tx-fifo-size = <2048>; 274 tx-fifo-size = <2048>;
275 rx-fifo-size-gige = <16384>;
276 tx-fifo-size-gige = <16384>;
275 phy-mode = "rgmii"; 277 phy-mode = "rgmii";
276 phy-map = <0x00000000>; 278 phy-map = <0x00000000>;
277 rgmii-device = <&RGMII0>; 279 rgmii-device = <&RGMII0>;
@@ -300,6 +302,8 @@
300 max-frame-size = <9000>; 302 max-frame-size = <9000>;
301 rx-fifo-size = <4096>; 303 rx-fifo-size = <4096>;
302 tx-fifo-size = <2048>; 304 tx-fifo-size = <2048>;
305 rx-fifo-size-gige = <16384>;
306 tx-fifo-size-gige = <16384>;
303 phy-mode = "rgmii"; 307 phy-mode = "rgmii";
304 phy-map = <0x00000000>; 308 phy-map = <0x00000000>;
305 rgmii-device = <&RGMII0>; 309 rgmii-device = <&RGMII0>;
diff --git a/arch/powerpc/boot/dts/kmeter1.dts b/arch/powerpc/boot/dts/kmeter1.dts
index 167044f7de1d..65b8b4f27efe 100644
--- a/arch/powerpc/boot/dts/kmeter1.dts
+++ b/arch/powerpc/boot/dts/kmeter1.dts
@@ -59,6 +59,13 @@
59 reg = <0xe0000000 0x00000200>; 59 reg = <0xe0000000 0x00000200>;
60 bus-frequency = <0>; /* Filled in by U-Boot */ 60 bus-frequency = <0>; /* Filled in by U-Boot */
61 61
62 pmc: power@b00 {
63 compatible = "fsl,mpc8360-pmc", "fsl,mpc8349-pmc";
64 reg = <0xb00 0x100 0xa00 0x100>;
65 interrupts = <80 0x8>;
66 interrupt-parent = <&ipic>;
67 };
68
62 i2c@3000 { 69 i2c@3000 {
63 #address-cells = <1>; 70 #address-cells = <1>;
64 #size-cells = <0>; 71 #size-cells = <0>;
diff --git a/arch/powerpc/boot/dts/makalu.dts b/arch/powerpc/boot/dts/makalu.dts
index ffc246e72670..63d48b632c84 100644
--- a/arch/powerpc/boot/dts/makalu.dts
+++ b/arch/powerpc/boot/dts/makalu.dts
@@ -227,6 +227,8 @@
227 max-frame-size = <9000>; 227 max-frame-size = <9000>;
228 rx-fifo-size = <4096>; 228 rx-fifo-size = <4096>;
229 tx-fifo-size = <2048>; 229 tx-fifo-size = <2048>;
230 rx-fifo-size-gige = <16384>;
231 tx-fifo-size-gige = <16384>;
230 phy-mode = "rgmii"; 232 phy-mode = "rgmii";
231 phy-map = <0x0000003f>; /* Start at 6 */ 233 phy-map = <0x0000003f>; /* Start at 6 */
232 rgmii-device = <&RGMII0>; 234 rgmii-device = <&RGMII0>;
@@ -255,6 +257,8 @@
255 max-frame-size = <9000>; 257 max-frame-size = <9000>;
256 rx-fifo-size = <4096>; 258 rx-fifo-size = <4096>;
257 tx-fifo-size = <2048>; 259 tx-fifo-size = <2048>;
260 rx-fifo-size-gige = <16384>;
261 tx-fifo-size-gige = <16384>;
258 phy-mode = "rgmii"; 262 phy-mode = "rgmii";
259 phy-map = <0x00000000>; 263 phy-map = <0x00000000>;
260 rgmii-device = <&RGMII0>; 264 rgmii-device = <&RGMII0>;
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
index 436c9c671dd9..05ad8c98e527 100644
--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
@@ -79,6 +79,13 @@
79 reg = <0x200 0x100>; 79 reg = <0x200 0x100>;
80 }; 80 };
81 81
82 pmc: power@b00 {
83 compatible = "fsl,mpc8323-pmc", "fsl,mpc8349-pmc";
84 reg = <0xb00 0x100 0xa00 0x100>;
85 interrupts = <80 0x8>;
86 interrupt-parent = <&ipic>;
87 };
88
82 i2c@3000 { 89 i2c@3000 {
83 #address-cells = <1>; 90 #address-cells = <1>;
84 #size-cells = <0>; 91 #size-cells = <0>;
@@ -163,6 +170,7 @@
163 fsl,channel-fifo-len = <24>; 170 fsl,channel-fifo-len = <24>;
164 fsl,exec-units-mask = <0x4c>; 171 fsl,exec-units-mask = <0x4c>;
165 fsl,descriptor-types-mask = <0x0122003f>; 172 fsl,descriptor-types-mask = <0x0122003f>;
173 sleep = <&pmc 0x03000000>;
166 }; 174 };
167 175
168 ipic: pic@700 { 176 ipic: pic@700 {
@@ -428,5 +436,6 @@
428 0xe0008300 0x8>; /* config space access registers */ 436 0xe0008300 0x8>; /* config space access registers */
429 compatible = "fsl,mpc8349-pci"; 437 compatible = "fsl,mpc8349-pci";
430 device_type = "pci"; 438 device_type = "pci";
439 sleep = <&pmc 0x00010000>;
431 }; 440 };
432}; 441};
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index 9a0952f74b81..f4fadb23ad6f 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -62,6 +62,13 @@
62 reg = <0x200 0x100>; 62 reg = <0x200 0x100>;
63 }; 63 };
64 64
65 pmc: power@b00 {
66 compatible = "fsl,mpc8323-pmc", "fsl,mpc8349-pmc";
67 reg = <0xb00 0x100 0xa00 0x100>;
68 interrupts = <80 0x8>;
69 interrupt-parent = <&ipic>;
70 };
71
65 i2c@3000 { 72 i2c@3000 {
66 #address-cells = <1>; 73 #address-cells = <1>;
67 #size-cells = <0>; 74 #size-cells = <0>;
@@ -141,6 +148,7 @@
141 fsl,channel-fifo-len = <24>; 148 fsl,channel-fifo-len = <24>;
142 fsl,exec-units-mask = <0x4c>; 149 fsl,exec-units-mask = <0x4c>;
143 fsl,descriptor-types-mask = <0x0122003f>; 150 fsl,descriptor-types-mask = <0x0122003f>;
151 sleep = <&pmc 0x03000000>;
144 }; 152 };
145 153
146 ipic:pic@700 { 154 ipic:pic@700 {
@@ -360,5 +368,6 @@
360 0xe0008300 0x8>; /* config space access registers */ 368 0xe0008300 0x8>; /* config space access registers */
361 compatible = "fsl,mpc8349-pci"; 369 compatible = "fsl,mpc8349-pci";
362 device_type = "pci"; 370 device_type = "pci";
371 sleep = <&pmc 0x00010000>;
363 }; 372 };
364}; 373};
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 39ff4c829caf..45cfa1c50a2a 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -99,6 +99,13 @@
99 reg = <0x200 0x100>; 99 reg = <0x200 0x100>;
100 }; 100 };
101 101
102 pmc: power@b00 {
103 compatible = "fsl,mpc8360-pmc", "fsl,mpc8349-pmc";
104 reg = <0xb00 0x100 0xa00 0x100>;
105 interrupts = <80 0x8>;
106 interrupt-parent = <&ipic>;
107 };
108
102 i2c@3000 { 109 i2c@3000 {
103 #address-cells = <1>; 110 #address-cells = <1>;
104 #size-cells = <0>; 111 #size-cells = <0>;
@@ -194,6 +201,7 @@
194 fsl,channel-fifo-len = <24>; 201 fsl,channel-fifo-len = <24>;
195 fsl,exec-units-mask = <0x7e>; 202 fsl,exec-units-mask = <0x7e>;
196 fsl,descriptor-types-mask = <0x01010ebf>; 203 fsl,descriptor-types-mask = <0x01010ebf>;
204 sleep = <&pmc 0x03000000>;
197 }; 205 };
198 206
199 ipic: pic@700 { 207 ipic: pic@700 {
@@ -470,5 +478,6 @@
470 0xe0008300 0x8>; /* config space access registers */ 478 0xe0008300 0x8>; /* config space access registers */
471 compatible = "fsl,mpc8349-pci"; 479 compatible = "fsl,mpc8349-pci";
472 device_type = "pci"; 480 device_type = "pci";
481 sleep = <&pmc 0x00010000>;
473 }; 482 };
474}; 483};
diff --git a/arch/powerpc/boot/dts/mpc836x_rdk.dts b/arch/powerpc/boot/dts/mpc836x_rdk.dts
index 6315d6fcc58a..bdf4459677b1 100644
--- a/arch/powerpc/boot/dts/mpc836x_rdk.dts
+++ b/arch/powerpc/boot/dts/mpc836x_rdk.dts
@@ -71,6 +71,13 @@
71 reg = <0x200 0x100>; 71 reg = <0x200 0x100>;
72 }; 72 };
73 73
74 pmc: power@b00 {
75 compatible = "fsl,mpc8360-pmc", "fsl,mpc8349-pmc";
76 reg = <0xb00 0x100 0xa00 0x100>;
77 interrupts = <80 0x8>;
78 interrupt-parent = <&ipic>;
79 };
80
74 i2c@3000 { 81 i2c@3000 {
75 #address-cells = <1>; 82 #address-cells = <1>;
76 #size-cells = <0>; 83 #size-cells = <0>;
@@ -161,6 +168,7 @@
161 fsl,channel-fifo-len = <24>; 168 fsl,channel-fifo-len = <24>;
162 fsl,exec-units-mask = <0x7e>; 169 fsl,exec-units-mask = <0x7e>;
163 fsl,descriptor-types-mask = <0x01010ebf>; 170 fsl,descriptor-types-mask = <0x01010ebf>;
171 sleep = <&pmc 0x03000000>;
164 }; 172 };
165 173
166 ipic: interrupt-controller@700 { 174 ipic: interrupt-controller@700 {
@@ -455,6 +463,7 @@
455 0xa800 0 0 2 &ipic 20 8 463 0xa800 0 0 2 &ipic 20 8
456 0xa800 0 0 3 &ipic 21 8 464 0xa800 0 0 3 &ipic 21 8
457 0xa800 0 0 4 &ipic 18 8>; 465 0xa800 0 0 4 &ipic 18 8>;
466 sleep = <&pmc 0x00010000>;
458 /* filled by u-boot */ 467 /* filled by u-boot */
459 bus-range = <0 0>; 468 bus-range = <0 0>;
460 clock-frequency = <0>; 469 clock-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index 00c2bbda7013..6d892ba74e55 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -40,6 +40,8 @@
40 i-cache-line-size = <32>; // 32 bytes 40 i-cache-line-size = <32>; // 32 bytes
41 d-cache-size = <0x8000>; // L1, 32K 41 d-cache-size = <0x8000>; // L1, 32K
42 i-cache-size = <0x8000>; // L1, 32K 42 i-cache-size = <0x8000>; // L1, 32K
43 sleep = <&pmc 0x00008000 // core
44 &pmc 0x00004000>; // timebase
43 timebase-frequency = <0>; 45 timebase-frequency = <0>;
44 bus-frequency = <0>; 46 bus-frequency = <0>;
45 clock-frequency = <0>; 47 clock-frequency = <0>;
@@ -94,31 +96,41 @@
94 interrupts = <16 2>; 96 interrupts = <16 2>;
95 }; 97 };
96 98
97 i2c@3000 { 99 i2c-sleep-nexus {
98 #address-cells = <1>; 100 #address-cells = <1>;
99 #size-cells = <0>; 101 #size-cells = <1>;
100 cell-index = <0>; 102 compatible = "simple-bus";
101 compatible = "fsl-i2c"; 103 sleep = <&pmc 0x00000004>;
102 reg = <0x3000 0x100>; 104 ranges;
103 interrupts = <43 2>;
104 interrupt-parent = <&mpic>;
105 dfsrr;
106 105
107 rtc@68 { 106 i2c@3000 {
108 compatible = "dallas,ds1374"; 107 #address-cells = <1>;
109 reg = <0x68>; 108 #size-cells = <0>;
109 cell-index = <0>;
110 compatible = "fsl-i2c";
111 reg = <0x3000 0x100>;
112 interrupts = <43 2>;
113 interrupt-parent = <&mpic>;
114 dfsrr;
115
116 rtc@68 {
117 compatible = "dallas,ds1374";
118 reg = <0x68>;
119 interrupts = <3 1>;
120 interrupt-parent = <&mpic>;
121 };
110 }; 122 };
111 };
112 123
113 i2c@3100 { 124 i2c@3100 {
114 #address-cells = <1>; 125 #address-cells = <1>;
115 #size-cells = <0>; 126 #size-cells = <0>;
116 cell-index = <1>; 127 cell-index = <1>;
117 compatible = "fsl-i2c"; 128 compatible = "fsl-i2c";
118 reg = <0x3100 0x100>; 129 reg = <0x3100 0x100>;
119 interrupts = <43 2>; 130 interrupts = <43 2>;
120 interrupt-parent = <&mpic>; 131 interrupt-parent = <&mpic>;
121 dfsrr; 132 dfsrr;
133 };
122 }; 134 };
123 135
124 dma@21300 { 136 dma@21300 {
@@ -128,6 +140,8 @@
128 reg = <0x21300 0x4>; 140 reg = <0x21300 0x4>;
129 ranges = <0x0 0x21100 0x200>; 141 ranges = <0x0 0x21100 0x200>;
130 cell-index = <0>; 142 cell-index = <0>;
143 sleep = <&pmc 0x00000400>;
144
131 dma-channel@0 { 145 dma-channel@0 {
132 compatible = "fsl,mpc8568-dma-channel", 146 compatible = "fsl,mpc8568-dma-channel",
133 "fsl,eloplus-dma-channel"; 147 "fsl,eloplus-dma-channel";
@@ -176,6 +190,7 @@
176 interrupt-parent = <&mpic>; 190 interrupt-parent = <&mpic>;
177 tbi-handle = <&tbi0>; 191 tbi-handle = <&tbi0>;
178 phy-handle = <&phy2>; 192 phy-handle = <&phy2>;
193 sleep = <&pmc 0x00000080>;
179 194
180 mdio@520 { 195 mdio@520 {
181 #address-cells = <1>; 196 #address-cells = <1>;
@@ -228,6 +243,7 @@
228 interrupt-parent = <&mpic>; 243 interrupt-parent = <&mpic>;
229 tbi-handle = <&tbi1>; 244 tbi-handle = <&tbi1>;
230 phy-handle = <&phy3>; 245 phy-handle = <&phy3>;
246 sleep = <&pmc 0x00000040>;
231 247
232 mdio@520 { 248 mdio@520 {
233 #address-cells = <1>; 249 #address-cells = <1>;
@@ -242,30 +258,47 @@
242 }; 258 };
243 }; 259 };
244 260
245 serial0: serial@4500 { 261 duart-sleep-nexus {
246 cell-index = <0>; 262 #address-cells = <1>;
247 device_type = "serial"; 263 #size-cells = <1>;
248 compatible = "ns16550"; 264 compatible = "simple-bus";
249 reg = <0x4500 0x100>; 265 sleep = <&pmc 0x00000002>;
250 clock-frequency = <0>; 266 ranges;
251 interrupts = <42 2>; 267
252 interrupt-parent = <&mpic>; 268 serial0: serial@4500 {
269 cell-index = <0>;
270 device_type = "serial";
271 compatible = "ns16550";
272 reg = <0x4500 0x100>;
273 clock-frequency = <0>;
274 interrupts = <42 2>;
275 interrupt-parent = <&mpic>;
276 };
277
278 serial1: serial@4600 {
279 cell-index = <1>;
280 device_type = "serial";
281 compatible = "ns16550";
282 reg = <0x4600 0x100>;
283 clock-frequency = <0>;
284 interrupts = <42 2>;
285 interrupt-parent = <&mpic>;
286 };
253 }; 287 };
254 288
255 global-utilities@e0000 { //global utilities block 289 global-utilities@e0000 {
256 compatible = "fsl,mpc8548-guts"; 290 #address-cells = <1>;
291 #size-cells = <1>;
292 compatible = "fsl,mpc8568-guts", "fsl,mpc8548-guts";
257 reg = <0xe0000 0x1000>; 293 reg = <0xe0000 0x1000>;
294 ranges = <0 0xe0000 0x1000>;
258 fsl,has-rstcr; 295 fsl,has-rstcr;
259 };
260 296
261 serial1: serial@4600 { 297 pmc: power@70 {
262 cell-index = <1>; 298 compatible = "fsl,mpc8568-pmc",
263 device_type = "serial"; 299 "fsl,mpc8548-pmc";
264 compatible = "ns16550"; 300 reg = <0x70 0x20>;
265 reg = <0x4600 0x100>; 301 };
266 clock-frequency = <0>;
267 interrupts = <42 2>;
268 interrupt-parent = <&mpic>;
269 }; 302 };
270 303
271 crypto@30000 { 304 crypto@30000 {
@@ -277,6 +310,7 @@
277 fsl,channel-fifo-len = <24>; 310 fsl,channel-fifo-len = <24>;
278 fsl,exec-units-mask = <0xfe>; 311 fsl,exec-units-mask = <0xfe>;
279 fsl,descriptor-types-mask = <0x12b0ebf>; 312 fsl,descriptor-types-mask = <0x12b0ebf>;
313 sleep = <&pmc 0x01000000>;
280 }; 314 };
281 315
282 mpic: pic@40000 { 316 mpic: pic@40000 {
@@ -376,6 +410,7 @@
376 compatible = "fsl,qe"; 410 compatible = "fsl,qe";
377 ranges = <0x0 0xe0080000 0x40000>; 411 ranges = <0x0 0xe0080000 0x40000>;
378 reg = <0xe0080000 0x480>; 412 reg = <0xe0080000 0x480>;
413 sleep = <&pmc 0x00000800>;
379 brg-frequency = <0>; 414 brg-frequency = <0>;
380 bus-frequency = <396000000>; 415 bus-frequency = <396000000>;
381 fsl,qe-num-riscs = <2>; 416 fsl,qe-num-riscs = <2>;
@@ -509,6 +544,7 @@
509 bus-range = <0 255>; 544 bus-range = <0 255>;
510 ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000 545 ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
511 0x1000000 0x0 0x0 0xe2000000 0x0 0x800000>; 546 0x1000000 0x0 0x0 0xe2000000 0x0 0x800000>;
547 sleep = <&pmc 0x80000000>;
512 clock-frequency = <66666666>; 548 clock-frequency = <66666666>;
513 #interrupt-cells = <1>; 549 #interrupt-cells = <1>;
514 #size-cells = <2>; 550 #size-cells = <2>;
@@ -534,6 +570,7 @@
534 bus-range = <0 255>; 570 bus-range = <0 255>;
535 ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000 571 ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
536 0x1000000 0x0 0x0 0xe2800000 0x0 0x800000>; 572 0x1000000 0x0 0x0 0xe2800000 0x0 0x800000>;
573 sleep = <&pmc 0x20000000>;
537 clock-frequency = <33333333>; 574 clock-frequency = <33333333>;
538 #interrupt-cells = <1>; 575 #interrupt-cells = <1>;
539 #size-cells = <2>; 576 #size-cells = <2>;
@@ -570,5 +607,7 @@
570 55 2 /* msg2_tx */ 607 55 2 /* msg2_tx */
571 56 2 /* msg2_rx */>; 608 56 2 /* msg2_rx */>;
572 interrupt-parent = <&mpic>; 609 interrupt-parent = <&mpic>;
610 sleep = <&pmc 0x00080000 /* controller */
611 &pmc 0x00040000>; /* message unit */
573 }; 612 };
574}; 613};
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index 1e3ec8f059bf..795eb362fcf9 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -41,6 +41,8 @@
41 i-cache-line-size = <32>; // 32 bytes 41 i-cache-line-size = <32>; // 32 bytes
42 d-cache-size = <0x8000>; // L1, 32K 42 d-cache-size = <0x8000>; // L1, 32K
43 i-cache-size = <0x8000>; // L1, 32K 43 i-cache-size = <0x8000>; // L1, 32K
44 sleep = <&pmc 0x00008000 // core
45 &pmc 0x00004000>; // timebase
44 timebase-frequency = <0>; 46 timebase-frequency = <0>;
45 bus-frequency = <0>; 47 bus-frequency = <0>;
46 clock-frequency = <0>; 48 clock-frequency = <0>;
@@ -59,6 +61,7 @@
59 reg = <0xe0005000 0x1000>; 61 reg = <0xe0005000 0x1000>;
60 interrupts = <19 2>; 62 interrupts = <19 2>;
61 interrupt-parent = <&mpic>; 63 interrupt-parent = <&mpic>;
64 sleep = <&pmc 0x08000000>;
62 65
63 ranges = <0x0 0x0 0xfe000000 0x02000000 66 ranges = <0x0 0x0 0xfe000000 0x02000000
64 0x1 0x0 0xf8000000 0x00008000 67 0x1 0x0 0xf8000000 0x00008000
@@ -158,51 +161,69 @@
158 interrupts = <18 2>; 161 interrupts = <18 2>;
159 }; 162 };
160 163
161 i2c@3000 { 164 i2c-sleep-nexus {
162 #address-cells = <1>; 165 #address-cells = <1>;
163 #size-cells = <0>; 166 #size-cells = <1>;
164 cell-index = <0>; 167 compatible = "simple-bus";
165 compatible = "fsl-i2c"; 168 sleep = <&pmc 0x00000004>;
166 reg = <0x3000 0x100>; 169 ranges;
167 interrupts = <43 2>; 170
168 interrupt-parent = <&mpic>; 171 i2c@3000 {
169 dfsrr; 172 #address-cells = <1>;
173 #size-cells = <0>;
174 cell-index = <0>;
175 compatible = "fsl-i2c";
176 reg = <0x3000 0x100>;
177 interrupts = <43 2>;
178 interrupt-parent = <&mpic>;
179 dfsrr;
180
181 rtc@68 {
182 compatible = "dallas,ds1374";
183 reg = <0x68>;
184 interrupts = <3 1>;
185 interrupt-parent = <&mpic>;
186 };
187 };
170 188
171 rtc@68 { 189 i2c@3100 {
172 compatible = "dallas,ds1374"; 190 #address-cells = <1>;
173 reg = <0x68>; 191 #size-cells = <0>;
192 cell-index = <1>;
193 compatible = "fsl-i2c";
194 reg = <0x3100 0x100>;
195 interrupts = <43 2>;
196 interrupt-parent = <&mpic>;
197 dfsrr;
174 }; 198 };
175 }; 199 };
176 200
177 i2c@3100 { 201 duart-sleep-nexus {
178 #address-cells = <1>; 202 #address-cells = <1>;
179 #size-cells = <0>; 203 #size-cells = <1>;
180 cell-index = <1>; 204 compatible = "simple-bus";
181 compatible = "fsl-i2c"; 205 sleep = <&pmc 0x00000002>;
182 reg = <0x3100 0x100>; 206 ranges;
183 interrupts = <43 2>;
184 interrupt-parent = <&mpic>;
185 dfsrr;
186 };
187 207
188 serial0: serial@4500 { 208 serial0: serial@4500 {
189 cell-index = <0>; 209 cell-index = <0>;
190 device_type = "serial"; 210 device_type = "serial";
191 compatible = "ns16550"; 211 compatible = "ns16550";
192 reg = <0x4500 0x100>; 212 reg = <0x4500 0x100>;
193 clock-frequency = <0>; 213 clock-frequency = <0>;
194 interrupts = <42 2>; 214 interrupts = <42 2>;
195 interrupt-parent = <&mpic>; 215 interrupt-parent = <&mpic>;
196 }; 216 };
197 217
198 serial1: serial@4600 { 218 serial1: serial@4600 {
199 cell-index = <1>; 219 cell-index = <1>;
200 device_type = "serial"; 220 device_type = "serial";
201 compatible = "ns16550"; 221 compatible = "ns16550";
202 reg = <0x4600 0x100>; 222 reg = <0x4600 0x100>;
203 clock-frequency = <0>; 223 clock-frequency = <0>;
204 interrupts = <42 2>; 224 interrupts = <42 2>;
205 interrupt-parent = <&mpic>; 225 interrupt-parent = <&mpic>;
226 };
206 }; 227 };
207 228
208 L2: l2-cache-controller@20000 { 229 L2: l2-cache-controller@20000 {
@@ -260,6 +281,7 @@
260 reg = <0x2e000 0x1000>; 281 reg = <0x2e000 0x1000>;
261 interrupts = <72 0x8>; 282 interrupts = <72 0x8>;
262 interrupt-parent = <&mpic>; 283 interrupt-parent = <&mpic>;
284 sleep = <&pmc 0x00200000>;
263 /* Filled in by U-Boot */ 285 /* Filled in by U-Boot */
264 clock-frequency = <0>; 286 clock-frequency = <0>;
265 status = "disabled"; 287 status = "disabled";
@@ -276,6 +298,7 @@
276 fsl,channel-fifo-len = <24>; 298 fsl,channel-fifo-len = <24>;
277 fsl,exec-units-mask = <0xbfe>; 299 fsl,exec-units-mask = <0xbfe>;
278 fsl,descriptor-types-mask = <0x3ab0ebf>; 300 fsl,descriptor-types-mask = <0x3ab0ebf>;
301 sleep = <&pmc 0x01000000>;
279 }; 302 };
280 303
281 mpic: pic@40000 { 304 mpic: pic@40000 {
@@ -304,9 +327,18 @@
304 }; 327 };
305 328
306 global-utilities@e0000 { 329 global-utilities@e0000 {
307 compatible = "fsl,mpc8569-guts"; 330 #address-cells = <1>;
331 #size-cells = <1>;
332 compatible = "fsl,mpc8569-guts", "fsl,mpc8548-guts";
308 reg = <0xe0000 0x1000>; 333 reg = <0xe0000 0x1000>;
334 ranges = <0 0xe0000 0x1000>;
309 fsl,has-rstcr; 335 fsl,has-rstcr;
336
337 pmc: power@70 {
338 compatible = "fsl,mpc8569-pmc",
339 "fsl,mpc8548-pmc";
340 reg = <0x70 0x20>;
341 };
310 }; 342 };
311 343
312 par_io@e0100 { 344 par_io@e0100 {
@@ -422,6 +454,7 @@
422 compatible = "fsl,qe"; 454 compatible = "fsl,qe";
423 ranges = <0x0 0xe0080000 0x40000>; 455 ranges = <0x0 0xe0080000 0x40000>;
424 reg = <0xe0080000 0x480>; 456 reg = <0xe0080000 0x480>;
457 sleep = <&pmc 0x00000800>;
425 brg-frequency = <0>; 458 brg-frequency = <0>;
426 bus-frequency = <0>; 459 bus-frequency = <0>;
427 fsl,qe-num-riscs = <4>; 460 fsl,qe-num-riscs = <4>;
@@ -684,6 +717,7 @@
684 bus-range = <0 255>; 717 bus-range = <0 255>;
685 ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000 718 ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
686 0x1000000 0x0 0x00000000 0xe2800000 0x0 0x00800000>; 719 0x1000000 0x0 0x00000000 0xe2800000 0x0 0x00800000>;
720 sleep = <&pmc 0x20000000>;
687 clock-frequency = <33333333>; 721 clock-frequency = <33333333>;
688 pcie@0 { 722 pcie@0 {
689 reg = <0x0 0x0 0x0 0x0 0x0>; 723 reg = <0x0 0x0 0x0 0x0 0x0>;
@@ -714,5 +748,6 @@
714 55 2 /* msg2_tx */ 748 55 2 /* msg2_tx */
715 56 2 /* msg2_rx */>; 749 56 2 /* msg2_rx */>;
716 interrupt-parent = <&mpic>; 750 interrupt-parent = <&mpic>;
751 sleep = <&pmc 0x00080000>;
717 }; 752 };
718}; 753};
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index f468d215f716..9535ce68caae 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -35,6 +35,8 @@
35 i-cache-line-size = <32>; 35 i-cache-line-size = <32>;
36 d-cache-size = <32768>; // L1 36 d-cache-size = <32768>; // L1
37 i-cache-size = <32768>; // L1 37 i-cache-size = <32768>; // L1
38 sleep = <&pmc 0x00008000 0 // core
39 &pmc 0x00004000 0>; // timebase
38 timebase-frequency = <0>; // From uboot 40 timebase-frequency = <0>; // From uboot
39 bus-frequency = <0>; // From uboot 41 bus-frequency = <0>; // From uboot
40 clock-frequency = <0>; // From uboot 42 clock-frequency = <0>; // From uboot
@@ -60,6 +62,7 @@
60 5 0 0xe8480000 0x00008000 62 5 0 0xe8480000 0x00008000
61 6 0 0xe84c0000 0x00008000 63 6 0 0xe84c0000 0x00008000
62 3 0 0xe8000000 0x00000020>; 64 3 0 0xe8000000 0x00000020>;
65 sleep = <&pmc 0x08000000 0>;
63 66
64 flash@0,0 { 67 flash@0,0 {
65 compatible = "cfi-flash"; 68 compatible = "cfi-flash";
@@ -105,6 +108,8 @@
105 compatible = "fsl,fpga-pixis"; 108 compatible = "fsl,fpga-pixis";
106 reg = <3 0 0x20>; 109 reg = <3 0 0x20>;
107 ranges = <0 3 0 0x20>; 110 ranges = <0 3 0 0x20>;
111 interrupt-parent = <&mpic>;
112 interrupts = <8 8>;
108 113
109 sdcsr_pio: gpio-controller@a { 114 sdcsr_pio: gpio-controller@a {
110 #gpio-cells = <2>; 115 #gpio-cells = <2>;
@@ -163,6 +168,7 @@
163 reg = <0x3100 0x100>; 168 reg = <0x3100 0x100>;
164 interrupts = <43 2>; 169 interrupts = <43 2>;
165 interrupt-parent = <&mpic>; 170 interrupt-parent = <&mpic>;
171 sleep = <&pmc 0x00000004 0>;
166 dfsrr; 172 dfsrr;
167 }; 173 };
168 174
@@ -174,6 +180,7 @@
174 clock-frequency = <0>; 180 clock-frequency = <0>;
175 interrupts = <42 2>; 181 interrupts = <42 2>;
176 interrupt-parent = <&mpic>; 182 interrupt-parent = <&mpic>;
183 sleep = <&pmc 0x00000002 0>;
177 }; 184 };
178 185
179 serial1: serial@4600 { 186 serial1: serial@4600 {
@@ -184,6 +191,7 @@
184 clock-frequency = <0>; 191 clock-frequency = <0>;
185 interrupts = <42 2>; 192 interrupts = <42 2>;
186 interrupt-parent = <&mpic>; 193 interrupt-parent = <&mpic>;
194 sleep = <&pmc 0x00000008 0>;
187 }; 195 };
188 196
189 spi@7000 { 197 spi@7000 {
@@ -196,6 +204,7 @@
196 interrupt-parent = <&mpic>; 204 interrupt-parent = <&mpic>;
197 mode = "cpu"; 205 mode = "cpu";
198 gpios = <&sdcsr_pio 7 0>; 206 gpios = <&sdcsr_pio 7 0>;
207 sleep = <&pmc 0x00000800 0>;
199 208
200 mmc-slot@0 { 209 mmc-slot@0 {
201 compatible = "fsl,mpc8610hpcd-mmc-slot", 210 compatible = "fsl,mpc8610hpcd-mmc-slot",
@@ -213,6 +222,7 @@
213 reg = <0x2c000 100>; 222 reg = <0x2c000 100>;
214 interrupts = <72 2>; 223 interrupts = <72 2>;
215 interrupt-parent = <&mpic>; 224 interrupt-parent = <&mpic>;
225 sleep = <&pmc 0x04000000 0>;
216 }; 226 };
217 227
218 mpic: interrupt-controller@40000 { 228 mpic: interrupt-controller@40000 {
@@ -241,9 +251,18 @@
241 }; 251 };
242 252
243 global-utilities@e0000 { 253 global-utilities@e0000 {
254 #address-cells = <1>;
255 #size-cells = <1>;
244 compatible = "fsl,mpc8610-guts"; 256 compatible = "fsl,mpc8610-guts";
245 reg = <0xe0000 0x1000>; 257 reg = <0xe0000 0x1000>;
258 ranges = <0 0xe0000 0x1000>;
246 fsl,has-rstcr; 259 fsl,has-rstcr;
260
261 pmc: power@70 {
262 compatible = "fsl,mpc8610-pmc",
263 "fsl,mpc8641d-pmc";
264 reg = <0x70 0x20>;
265 };
247 }; 266 };
248 267
249 wdt@e4000 { 268 wdt@e4000 {
@@ -262,6 +281,7 @@
262 fsl,playback-dma = <&dma00>; 281 fsl,playback-dma = <&dma00>;
263 fsl,capture-dma = <&dma01>; 282 fsl,capture-dma = <&dma01>;
264 fsl,fifo-depth = <8>; 283 fsl,fifo-depth = <8>;
284 sleep = <&pmc 0 0x08000000>;
265 }; 285 };
266 286
267 ssi@16100 { 287 ssi@16100 {
@@ -271,6 +291,7 @@
271 interrupt-parent = <&mpic>; 291 interrupt-parent = <&mpic>;
272 interrupts = <63 2>; 292 interrupts = <63 2>;
273 fsl,fifo-depth = <8>; 293 fsl,fifo-depth = <8>;
294 sleep = <&pmc 0 0x04000000>;
274 }; 295 };
275 296
276 dma@21300 { 297 dma@21300 {
@@ -280,6 +301,7 @@
280 cell-index = <0>; 301 cell-index = <0>;
281 reg = <0x21300 0x4>; /* DMA general status register */ 302 reg = <0x21300 0x4>; /* DMA general status register */
282 ranges = <0x0 0x21100 0x200>; 303 ranges = <0x0 0x21100 0x200>;
304 sleep = <&pmc 0x00000400 0>;
283 305
284 dma00: dma-channel@0 { 306 dma00: dma-channel@0 {
285 compatible = "fsl,mpc8610-dma-channel", 307 compatible = "fsl,mpc8610-dma-channel",
@@ -322,6 +344,7 @@
322 cell-index = <1>; 344 cell-index = <1>;
323 reg = <0xc300 0x4>; /* DMA general status register */ 345 reg = <0xc300 0x4>; /* DMA general status register */
324 ranges = <0x0 0xc100 0x200>; 346 ranges = <0x0 0xc100 0x200>;
347 sleep = <&pmc 0x00000200 0>;
325 348
326 dma-channel@0 { 349 dma-channel@0 {
327 compatible = "fsl,mpc8610-dma-channel", 350 compatible = "fsl,mpc8610-dma-channel",
@@ -369,6 +392,7 @@
369 bus-range = <0 0>; 392 bus-range = <0 0>;
370 ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x10000000 393 ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x10000000
371 0x01000000 0x0 0x00000000 0xe1000000 0x0 0x00100000>; 394 0x01000000 0x0 0x00000000 0xe1000000 0x0 0x00100000>;
395 sleep = <&pmc 0x80000000 0>;
372 clock-frequency = <33333333>; 396 clock-frequency = <33333333>;
373 interrupt-parent = <&mpic>; 397 interrupt-parent = <&mpic>;
374 interrupts = <24 2>; 398 interrupts = <24 2>;
@@ -398,6 +422,7 @@
398 bus-range = <1 3>; 422 bus-range = <1 3>;
399 ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000 423 ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
400 0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>; 424 0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>;
425 sleep = <&pmc 0x40000000 0>;
401 clock-frequency = <33333333>; 426 clock-frequency = <33333333>;
402 interrupt-parent = <&mpic>; 427 interrupt-parent = <&mpic>;
403 interrupts = <26 2>; 428 interrupts = <26 2>;
@@ -474,6 +499,7 @@
474 0x0000 0 0 4 &mpic 7 1>; 499 0x0000 0 0 4 &mpic 7 1>;
475 interrupt-parent = <&mpic>; 500 interrupt-parent = <&mpic>;
476 interrupts = <25 2>; 501 interrupts = <25 2>;
502 sleep = <&pmc 0x20000000 0>;
477 clock-frequency = <33333333>; 503 clock-frequency = <33333333>;
478 }; 504 };
479}; 505};
diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts
new file mode 100644
index 000000000000..df5269093af8
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb.dts
@@ -0,0 +1,477 @@
1/*
2 * P1020 RDB Device Tree Source
3 *
4 * Copyright 2009 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/dts-v1/;
13/ {
14 model = "fsl,P1020";
15 compatible = "fsl,P1020RDB";
16 #address-cells = <2>;
17 #size-cells = <2>;
18
19 aliases {
20 serial0 = &serial0;
21 serial1 = &serial1;
22 pci0 = &pci0;
23 pci1 = &pci1;
24 };
25
26 cpus {
27 #address-cells = <1>;
28 #size-cells = <0>;
29
30 PowerPC,P1020@0 {
31 device_type = "cpu";
32 reg = <0x0>;
33 next-level-cache = <&L2>;
34 };
35
36 PowerPC,P1020@1 {
37 device_type = "cpu";
38 reg = <0x1>;
39 next-level-cache = <&L2>;
40 };
41 };
42
43 memory {
44 device_type = "memory";
45 };
46
47 localbus@ffe05000 {
48 #address-cells = <2>;
49 #size-cells = <1>;
50 compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
51 reg = <0 0xffe05000 0 0x1000>;
52 interrupts = <19 2>;
53 interrupt-parent = <&mpic>;
54
55 /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
56 ranges = <0x0 0x0 0x0 0xef000000 0x01000000
57 0x1 0x0 0x0 0xffa00000 0x00040000
58 0x2 0x0 0x0 0xffb00000 0x00020000>;
59
60 nor@0,0 {
61 #address-cells = <1>;
62 #size-cells = <1>;
63 compatible = "cfi-flash";
64 reg = <0x0 0x0 0x1000000>;
65 bank-width = <2>;
66 device-width = <1>;
67
68 partition@0 {
69 /* This location must not be altered */
70 /* 256KB for Vitesse 7385 Switch firmware */
71 reg = <0x0 0x00040000>;
72 label = "NOR (RO) Vitesse-7385 Firmware";
73 read-only;
74 };
75
76 partition@40000 {
77 /* 256KB for DTB Image */
78 reg = <0x00040000 0x00040000>;
79 label = "NOR (RO) DTB Image";
80 read-only;
81 };
82
83 partition@80000 {
84 /* 3.5 MB for Linux Kernel Image */
85 reg = <0x00080000 0x00380000>;
86 label = "NOR (RO) Linux Kernel Image";
87 read-only;
88 };
89
90 partition@400000 {
91 /* 11MB for JFFS2 based Root file System */
92 reg = <0x00400000 0x00b00000>;
93 label = "NOR (RW) JFFS2 Root File System";
94 };
95
96 partition@f00000 {
97 /* This location must not be altered */
98 /* 512KB for u-boot Bootloader Image */
99 /* 512KB for u-boot Environment Variables */
100 reg = <0x00f00000 0x00100000>;
101 label = "NOR (RO) U-Boot Image";
102 read-only;
103 };
104 };
105
106 nand@1,0 {
107 #address-cells = <1>;
108 #size-cells = <1>;
109 compatible = "fsl,p1020-fcm-nand",
110 "fsl,elbc-fcm-nand";
111 reg = <0x1 0x0 0x40000>;
112
113 partition@0 {
114 /* This location must not be altered */
115 /* 1MB for u-boot Bootloader Image */
116 reg = <0x0 0x00100000>;
117 label = "NAND (RO) U-Boot Image";
118 read-only;
119 };
120
121 partition@100000 {
122 /* 1MB for DTB Image */
123 reg = <0x00100000 0x00100000>;
124 label = "NAND (RO) DTB Image";
125 read-only;
126 };
127
128 partition@200000 {
129 /* 4MB for Linux Kernel Image */
130 reg = <0x00200000 0x00400000>;
131 label = "NAND (RO) Linux Kernel Image";
132 read-only;
133 };
134
135 partition@600000 {
136 /* 4MB for Compressed Root file System Image */
137 reg = <0x00600000 0x00400000>;
138 label = "NAND (RO) Compressed RFS Image";
139 read-only;
140 };
141
142 partition@a00000 {
143 /* 7MB for JFFS2 based Root file System */
144 reg = <0x00a00000 0x00700000>;
145 label = "NAND (RW) JFFS2 Root File System";
146 };
147
148 partition@1100000 {
149 /* 15MB for JFFS2 based Root file System */
150 reg = <0x01100000 0x00f00000>;
151 label = "NAND (RW) Writable User area";
152 };
153 };
154
155 L2switch@2,0 {
156 #address-cells = <1>;
157 #size-cells = <1>;
158 compatible = "vitesse-7385";
159 reg = <0x2 0x0 0x20000>;
160 };
161
162 };
163
164 soc@ffe00000 {
165 #address-cells = <1>;
166 #size-cells = <1>;
167 device_type = "soc";
168 compatible = "fsl,p1020-immr", "simple-bus";
169 ranges = <0x0 0x0 0xffe00000 0x100000>;
170 bus-frequency = <0>; // Filled out by uboot.
171
172 ecm-law@0 {
173 compatible = "fsl,ecm-law";
174 reg = <0x0 0x1000>;
175 fsl,num-laws = <12>;
176 };
177
178 ecm@1000 {
179 compatible = "fsl,p1020-ecm", "fsl,ecm";
180 reg = <0x1000 0x1000>;
181 interrupts = <16 2>;
182 interrupt-parent = <&mpic>;
183 };
184
185 memory-controller@2000 {
186 compatible = "fsl,p1020-memory-controller";
187 reg = <0x2000 0x1000>;
188 interrupt-parent = <&mpic>;
189 interrupts = <16 2>;
190 };
191
192 i2c@3000 {
193 #address-cells = <1>;
194 #size-cells = <0>;
195 cell-index = <0>;
196 compatible = "fsl-i2c";
197 reg = <0x3000 0x100>;
198 interrupts = <43 2>;
199 interrupt-parent = <&mpic>;
200 dfsrr;
201 rtc@68 {
202 compatible = "dallas,ds1339";
203 reg = <0x68>;
204 };
205 };
206
207 i2c@3100 {
208 #address-cells = <1>;
209 #size-cells = <0>;
210 cell-index = <1>;
211 compatible = "fsl-i2c";
212 reg = <0x3100 0x100>;
213 interrupts = <43 2>;
214 interrupt-parent = <&mpic>;
215 dfsrr;
216 };
217
218 serial0: serial@4500 {
219 cell-index = <0>;
220 device_type = "serial";
221 compatible = "ns16550";
222 reg = <0x4500 0x100>;
223 clock-frequency = <0>;
224 interrupts = <42 2>;
225 interrupt-parent = <&mpic>;
226 };
227
228 serial1: serial@4600 {
229 cell-index = <1>;
230 device_type = "serial";
231 compatible = "ns16550";
232 reg = <0x4600 0x100>;
233 clock-frequency = <0>;
234 interrupts = <42 2>;
235 interrupt-parent = <&mpic>;
236 };
237
238 spi@7000 {
239 cell-index = <0>;
240 #address-cells = <1>;
241 #size-cells = <0>;
242 compatible = "fsl,espi";
243 reg = <0x7000 0x1000>;
244 interrupts = <59 0x2>;
245 interrupt-parent = <&mpic>;
246 mode = "cpu";
247
248 fsl_m25p80@0 {
249 #address-cells = <1>;
250 #size-cells = <1>;
251 compatible = "fsl,espi-flash";
252 reg = <0>;
253 linux,modalias = "fsl_m25p80";
254 modal = "s25sl128b";
255 spi-max-frequency = <50000000>;
256 mode = <0>;
257
258 partition@0 {
259 /* 512KB for u-boot Bootloader Image */
260 reg = <0x0 0x00080000>;
261 label = "SPI (RO) U-Boot Image";
262 read-only;
263 };
264
265 partition@80000 {
266 /* 512KB for DTB Image */
267 reg = <0x00080000 0x00080000>;
268 label = "SPI (RO) DTB Image";
269 read-only;
270 };
271
272 partition@100000 {
273 /* 4MB for Linux Kernel Image */
274 reg = <0x00100000 0x00400000>;
275 label = "SPI (RO) Linux Kernel Image";
276 read-only;
277 };
278
279 partition@500000 {
280 /* 4MB for Compressed RFS Image */
281 reg = <0x00500000 0x00400000>;
282 label = "SPI (RO) Compressed RFS Image";
283 read-only;
284 };
285
286 partition@900000 {
287 /* 7MB for JFFS2 based RFS */
288 reg = <0x00900000 0x00700000>;
289 label = "SPI (RW) JFFS2 RFS";
290 };
291 };
292 };
293
294 gpio: gpio-controller@f000 {
295 #gpio-cells = <2>;
296 compatible = "fsl,mpc8572-gpio";
297 reg = <0xf000 0x100>;
298 interrupts = <47 0x2>;
299 interrupt-parent = <&mpic>;
300 gpio-controller;
301 };
302
303 L2: l2-cache-controller@20000 {
304 compatible = "fsl,p1020-l2-cache-controller";
305 reg = <0x20000 0x1000>;
306 cache-line-size = <32>; // 32 bytes
307 cache-size = <0x40000>; // L2,256K
308 interrupt-parent = <&mpic>;
309 interrupts = <16 2>;
310 };
311
312 dma@21300 {
313 #address-cells = <1>;
314 #size-cells = <1>;
315 compatible = "fsl,eloplus-dma";
316 reg = <0x21300 0x4>;
317 ranges = <0x0 0x21100 0x200>;
318 cell-index = <0>;
319 dma-channel@0 {
320 compatible = "fsl,eloplus-dma-channel";
321 reg = <0x0 0x80>;
322 cell-index = <0>;
323 interrupt-parent = <&mpic>;
324 interrupts = <20 2>;
325 };
326 dma-channel@80 {
327 compatible = "fsl,eloplus-dma-channel";
328 reg = <0x80 0x80>;
329 cell-index = <1>;
330 interrupt-parent = <&mpic>;
331 interrupts = <21 2>;
332 };
333 dma-channel@100 {
334 compatible = "fsl,eloplus-dma-channel";
335 reg = <0x100 0x80>;
336 cell-index = <2>;
337 interrupt-parent = <&mpic>;
338 interrupts = <22 2>;
339 };
340 dma-channel@180 {
341 compatible = "fsl,eloplus-dma-channel";
342 reg = <0x180 0x80>;
343 cell-index = <3>;
344 interrupt-parent = <&mpic>;
345 interrupts = <23 2>;
346 };
347 };
348
349 usb@22000 {
350 #address-cells = <1>;
351 #size-cells = <0>;
352 compatible = "fsl-usb2-dr";
353 reg = <0x22000 0x1000>;
354 interrupt-parent = <&mpic>;
355 interrupts = <28 0x2>;
356 phy_type = "ulpi";
357 };
358
359 usb@23000 {
360 #address-cells = <1>;
361 #size-cells = <0>;
362 compatible = "fsl-usb2-dr";
363 reg = <0x23000 0x1000>;
364 interrupt-parent = <&mpic>;
365 interrupts = <46 0x2>;
366 phy_type = "ulpi";
367 };
368
369 sdhci@2e000 {
370 compatible = "fsl,p1020-esdhc", "fsl,esdhc";
371 reg = <0x2e000 0x1000>;
372 interrupts = <72 0x2>;
373 interrupt-parent = <&mpic>;
374 /* Filled in by U-Boot */
375 clock-frequency = <0>;
376 };
377
378 crypto@30000 {
379 compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
380 "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
381 reg = <0x30000 0x10000>;
382 interrupts = <45 2 58 2>;
383 interrupt-parent = <&mpic>;
384 fsl,num-channels = <4>;
385 fsl,channel-fifo-len = <24>;
386 fsl,exec-units-mask = <0xbfe>;
387 fsl,descriptor-types-mask = <0x3ab0ebf>;
388 };
389
390 mpic: pic@40000 {
391 interrupt-controller;
392 #address-cells = <0>;
393 #interrupt-cells = <2>;
394 reg = <0x40000 0x40000>;
395 compatible = "chrp,open-pic";
396 device_type = "open-pic";
397 };
398
399 msi@41600 {
400 compatible = "fsl,p1020-msi", "fsl,mpic-msi";
401 reg = <0x41600 0x80>;
402 msi-available-ranges = <0 0x100>;
403 interrupts = <
404 0xe0 0
405 0xe1 0
406 0xe2 0
407 0xe3 0
408 0xe4 0
409 0xe5 0
410 0xe6 0
411 0xe7 0>;
412 interrupt-parent = <&mpic>;
413 };
414
415 global-utilities@e0000 { //global utilities block
416 compatible = "fsl,p1020-guts";
417 reg = <0xe0000 0x1000>;
418 fsl,has-rstcr;
419 };
420 };
421
422 pci0: pcie@ffe09000 {
423 compatible = "fsl,mpc8548-pcie";
424 device_type = "pci";
425 #interrupt-cells = <1>;
426 #size-cells = <2>;
427 #address-cells = <3>;
428 reg = <0 0xffe09000 0 0x1000>;
429 bus-range = <0 255>;
430 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
431 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>;
432 clock-frequency = <33333333>;
433 interrupt-parent = <&mpic>;
434 interrupts = <16 2>;
435 pcie@0 {
436 reg = <0x0 0x0 0x0 0x0 0x0>;
437 #size-cells = <2>;
438 #address-cells = <3>;
439 device_type = "pci";
440 ranges = <0x2000000 0x0 0xa0000000
441 0x2000000 0x0 0xa0000000
442 0x0 0x20000000
443
444 0x1000000 0x0 0x0
445 0x1000000 0x0 0x0
446 0x0 0x100000>;
447 };
448 };
449
450 pci1: pcie@ffe0a000 {
451 compatible = "fsl,mpc8548-pcie";
452 device_type = "pci";
453 #interrupt-cells = <1>;
454 #size-cells = <2>;
455 #address-cells = <3>;
456 reg = <0 0xffe0a000 0 0x1000>;
457 bus-range = <0 255>;
458 ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
459 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
460 clock-frequency = <33333333>;
461 interrupt-parent = <&mpic>;
462 interrupts = <16 2>;
463 pcie@0 {
464 reg = <0x0 0x0 0x0 0x0 0x0>;
465 #size-cells = <2>;
466 #address-cells = <3>;
467 device_type = "pci";
468 ranges = <0x2000000 0x0 0xc0000000
469 0x2000000 0x0 0xc0000000
470 0x0 0x20000000
471
472 0x1000000 0x0 0x0
473 0x1000000 0x0 0x0
474 0x0 0x100000>;
475 };
476 };
477};
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
new file mode 100644
index 000000000000..0fe93d0c8b2e
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
@@ -0,0 +1,363 @@
1/*
2 * P2020 RDB Core0 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts file allows core0 to have memory, l2, i2c, spi, gpio, dma1, usb,
7 * eth1, eth2, sdhc, crypto, global-util, pci0.
8 *
9 * Copyright 2009 Freescale Semiconductor Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16
17/dts-v1/;
18/ {
19 model = "fsl,P2020";
20 compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
21 #address-cells = <2>;
22 #size-cells = <2>;
23
24 aliases {
25 ethernet1 = &enet1;
26 ethernet2 = &enet2;
27 serial0 = &serial0;
28 pci0 = &pci0;
29 };
30
31 cpus {
32 #address-cells = <1>;
33 #size-cells = <0>;
34
35 PowerPC,P2020@0 {
36 device_type = "cpu";
37 reg = <0x0>;
38 next-level-cache = <&L2>;
39 };
40 };
41
42 memory {
43 device_type = "memory";
44 };
45
46 soc@ffe00000 {
47 #address-cells = <1>;
48 #size-cells = <1>;
49 device_type = "soc";
50 compatible = "fsl,p2020-immr", "simple-bus";
51 ranges = <0x0 0x0 0xffe00000 0x100000>;
52 bus-frequency = <0>; // Filled out by uboot.
53
54 ecm-law@0 {
55 compatible = "fsl,ecm-law";
56 reg = <0x0 0x1000>;
57 fsl,num-laws = <12>;
58 };
59
60 ecm@1000 {
61 compatible = "fsl,p2020-ecm", "fsl,ecm";
62 reg = <0x1000 0x1000>;
63 interrupts = <17 2>;
64 interrupt-parent = <&mpic>;
65 };
66
67 memory-controller@2000 {
68 compatible = "fsl,p2020-memory-controller";
69 reg = <0x2000 0x1000>;
70 interrupt-parent = <&mpic>;
71 interrupts = <18 2>;
72 };
73
74 i2c@3000 {
75 #address-cells = <1>;
76 #size-cells = <0>;
77 cell-index = <0>;
78 compatible = "fsl-i2c";
79 reg = <0x3000 0x100>;
80 interrupts = <43 2>;
81 interrupt-parent = <&mpic>;
82 dfsrr;
83 rtc@68 {
84 compatible = "dallas,ds1339";
85 reg = <0x68>;
86 };
87 };
88
89 i2c@3100 {
90 #address-cells = <1>;
91 #size-cells = <0>;
92 cell-index = <1>;
93 compatible = "fsl-i2c";
94 reg = <0x3100 0x100>;
95 interrupts = <43 2>;
96 interrupt-parent = <&mpic>;
97 dfsrr;
98 };
99
100 serial0: serial@4500 {
101 cell-index = <0>;
102 device_type = "serial";
103 compatible = "ns16550";
104 reg = <0x4500 0x100>;
105 clock-frequency = <0>;
106 };
107
108 spi@7000 {
109 cell-index = <0>;
110 #address-cells = <1>;
111 #size-cells = <0>;
112 compatible = "fsl,espi";
113 reg = <0x7000 0x1000>;
114 interrupts = <59 0x2>;
115 interrupt-parent = <&mpic>;
116 mode = "cpu";
117
118 fsl_m25p80@0 {
119 #address-cells = <1>;
120 #size-cells = <1>;
121 compatible = "fsl,espi-flash";
122 reg = <0>;
123 linux,modalias = "fsl_m25p80";
124 modal = "s25sl128b";
125 spi-max-frequency = <50000000>;
126 mode = <0>;
127
128 partition@0 {
129 /* 512KB for u-boot Bootloader Image */
130 reg = <0x0 0x00080000>;
131 label = "SPI (RO) U-Boot Image";
132 read-only;
133 };
134
135 partition@80000 {
136 /* 512KB for DTB Image */
137 reg = <0x00080000 0x00080000>;
138 label = "SPI (RO) DTB Image";
139 read-only;
140 };
141
142 partition@100000 {
143 /* 4MB for Linux Kernel Image */
144 reg = <0x00100000 0x00400000>;
145 label = "SPI (RO) Linux Kernel Image";
146 read-only;
147 };
148
149 partition@500000 {
150 /* 4MB for Compressed RFS Image */
151 reg = <0x00500000 0x00400000>;
152 label = "SPI (RO) Compressed RFS Image";
153 read-only;
154 };
155
156 partition@900000 {
157 /* 7MB for JFFS2 based RFS */
158 reg = <0x00900000 0x00700000>;
159 label = "SPI (RW) JFFS2 RFS";
160 };
161 };
162 };
163
164 gpio: gpio-controller@f000 {
165 #gpio-cells = <2>;
166 compatible = "fsl,mpc8572-gpio";
167 reg = <0xf000 0x100>;
168 interrupts = <47 0x2>;
169 interrupt-parent = <&mpic>;
170 gpio-controller;
171 };
172
173 L2: l2-cache-controller@20000 {
174 compatible = "fsl,p2020-l2-cache-controller";
175 reg = <0x20000 0x1000>;
176 cache-line-size = <32>; // 32 bytes
177 cache-size = <0x80000>; // L2,512K
178 interrupt-parent = <&mpic>;
179 interrupts = <16 2>;
180 };
181
182 dma@21300 {
183 #address-cells = <1>;
184 #size-cells = <1>;
185 compatible = "fsl,eloplus-dma";
186 reg = <0x21300 0x4>;
187 ranges = <0x0 0x21100 0x200>;
188 cell-index = <0>;
189 dma-channel@0 {
190 compatible = "fsl,eloplus-dma-channel";
191 reg = <0x0 0x80>;
192 cell-index = <0>;
193 interrupt-parent = <&mpic>;
194 interrupts = <20 2>;
195 };
196 dma-channel@80 {
197 compatible = "fsl,eloplus-dma-channel";
198 reg = <0x80 0x80>;
199 cell-index = <1>;
200 interrupt-parent = <&mpic>;
201 interrupts = <21 2>;
202 };
203 dma-channel@100 {
204 compatible = "fsl,eloplus-dma-channel";
205 reg = <0x100 0x80>;
206 cell-index = <2>;
207 interrupt-parent = <&mpic>;
208 interrupts = <22 2>;
209 };
210 dma-channel@180 {
211 compatible = "fsl,eloplus-dma-channel";
212 reg = <0x180 0x80>;
213 cell-index = <3>;
214 interrupt-parent = <&mpic>;
215 interrupts = <23 2>;
216 };
217 };
218
219 usb@22000 {
220 #address-cells = <1>;
221 #size-cells = <0>;
222 compatible = "fsl-usb2-dr";
223 reg = <0x22000 0x1000>;
224 interrupt-parent = <&mpic>;
225 interrupts = <28 0x2>;
226 phy_type = "ulpi";
227 };
228
229 mdio@24520 {
230 #address-cells = <1>;
231 #size-cells = <0>;
232 compatible = "fsl,gianfar-mdio";
233 reg = <0x24520 0x20>;
234
235 phy0: ethernet-phy@0 {
236 interrupt-parent = <&mpic>;
237 interrupts = <3 1>;
238 reg = <0x0>;
239 };
240 phy1: ethernet-phy@1 {
241 interrupt-parent = <&mpic>;
242 interrupts = <3 1>;
243 reg = <0x1>;
244 };
245 };
246
247 mdio@25520 {
248 #address-cells = <1>;
249 #size-cells = <0>;
250 compatible = "fsl,gianfar-tbi";
251 reg = <0x26520 0x20>;
252
253 tbi0: tbi-phy@11 {
254 reg = <0x11>;
255 device_type = "tbi-phy";
256 };
257 };
258
259 enet1: ethernet@25000 {
260 #address-cells = <1>;
261 #size-cells = <1>;
262 cell-index = <1>;
263 device_type = "network";
264 model = "eTSEC";
265 compatible = "gianfar";
266 reg = <0x25000 0x1000>;
267 ranges = <0x0 0x25000 0x1000>;
268 local-mac-address = [ 00 00 00 00 00 00 ];
269 interrupts = <35 2 36 2 40 2>;
270 interrupt-parent = <&mpic>;
271 tbi-handle = <&tbi0>;
272 phy-handle = <&phy0>;
273 phy-connection-type = "sgmii";
274
275 };
276
277 enet2: ethernet@26000 {
278 #address-cells = <1>;
279 #size-cells = <1>;
280 cell-index = <2>;
281 device_type = "network";
282 model = "eTSEC";
283 compatible = "gianfar";
284 reg = <0x26000 0x1000>;
285 ranges = <0x0 0x26000 0x1000>;
286 local-mac-address = [ 00 00 00 00 00 00 ];
287 interrupts = <31 2 32 2 33 2>;
288 interrupt-parent = <&mpic>;
289 phy-handle = <&phy1>;
290 phy-connection-type = "rgmii-id";
291 };
292
293 sdhci@2e000 {
294 compatible = "fsl,p2020-esdhc", "fsl,esdhc";
295 reg = <0x2e000 0x1000>;
296 interrupts = <72 0x2>;
297 interrupt-parent = <&mpic>;
298 /* Filled in by U-Boot */
299 clock-frequency = <0>;
300 };
301
302 crypto@30000 {
303 compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
304 "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
305 reg = <0x30000 0x10000>;
306 interrupts = <45 2 58 2>;
307 interrupt-parent = <&mpic>;
308 fsl,num-channels = <4>;
309 fsl,channel-fifo-len = <24>;
310 fsl,exec-units-mask = <0xbfe>;
311 fsl,descriptor-types-mask = <0x3ab0ebf>;
312 };
313
314 mpic: pic@40000 {
315 interrupt-controller;
316 #address-cells = <0>;
317 #interrupt-cells = <2>;
318 reg = <0x40000 0x40000>;
319 compatible = "chrp,open-pic";
320 device_type = "open-pic";
321 protected-sources = <
322 42 76 77 78 79 /* serial1 , dma2 */
323 29 30 34 26 /* enet0, pci1 */
324 0xe0 0xe1 0xe2 0xe3 /* msi */
325 0xe4 0xe5 0xe6 0xe7
326 >;
327 };
328
329 global-utilities@e0000 {
330 compatible = "fsl,p2020-guts";
331 reg = <0xe0000 0x1000>;
332 fsl,has-rstcr;
333 };
334 };
335
336 pci0: pcie@ffe09000 {
337 compatible = "fsl,mpc8548-pcie";
338 device_type = "pci";
339 #interrupt-cells = <1>;
340 #size-cells = <2>;
341 #address-cells = <3>;
342 reg = <0 0xffe09000 0 0x1000>;
343 bus-range = <0 255>;
344 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
345 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>;
346 clock-frequency = <33333333>;
347 interrupt-parent = <&mpic>;
348 interrupts = <25 2>;
349 pcie@0 {
350 reg = <0x0 0x0 0x0 0x0 0x0>;
351 #size-cells = <2>;
352 #address-cells = <3>;
353 device_type = "pci";
354 ranges = <0x2000000 0x0 0xa0000000
355 0x2000000 0x0 0xa0000000
356 0x0 0x20000000
357
358 0x1000000 0x0 0x0
359 0x1000000 0x0 0x0
360 0x0 0x100000>;
361 };
362 };
363};
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
new file mode 100644
index 000000000000..e95a51285328
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
@@ -0,0 +1,184 @@
1/*
2 * P2020 RDB Core1 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts allows core1 to have l2, dma2, eth0, pci1, msi.
7 *
8 * Please note to add "-b 1" for core1's dts compiling.
9 *
10 * Copyright 2009 Freescale Semiconductor Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18/dts-v1/;
19/ {
20 model = "fsl,P2020";
21 compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
22 #address-cells = <2>;
23 #size-cells = <2>;
24
25 aliases {
26 ethernet0 = &enet0;
27 serial0 = &serial0;
28 pci1 = &pci1;
29 };
30
31 cpus {
32 #address-cells = <1>;
33 #size-cells = <0>;
34
35 PowerPC,P2020@1 {
36 device_type = "cpu";
37 reg = <0x1>;
38 next-level-cache = <&L2>;
39 };
40 };
41
42 memory {
43 device_type = "memory";
44 };
45
46 soc@ffe00000 {
47 #address-cells = <1>;
48 #size-cells = <1>;
49 device_type = "soc";
50 compatible = "fsl,p2020-immr", "simple-bus";
51 ranges = <0x0 0x0 0xffe00000 0x100000>;
52 bus-frequency = <0>; // Filled out by uboot.
53
54 serial0: serial@4600 {
55 cell-index = <1>;
56 device_type = "serial";
57 compatible = "ns16550";
58 reg = <0x4600 0x100>;
59 clock-frequency = <0>;
60 };
61
62 dma@c300 {
63 #address-cells = <1>;
64 #size-cells = <1>;
65 compatible = "fsl,eloplus-dma";
66 reg = <0xc300 0x4>;
67 ranges = <0x0 0xc100 0x200>;
68 cell-index = <1>;
69 dma-channel@0 {
70 compatible = "fsl,eloplus-dma-channel";
71 reg = <0x0 0x80>;
72 cell-index = <0>;
73 interrupt-parent = <&mpic>;
74 interrupts = <76 2>;
75 };
76 dma-channel@80 {
77 compatible = "fsl,eloplus-dma-channel";
78 reg = <0x80 0x80>;
79 cell-index = <1>;
80 interrupt-parent = <&mpic>;
81 interrupts = <77 2>;
82 };
83 dma-channel@100 {
84 compatible = "fsl,eloplus-dma-channel";
85 reg = <0x100 0x80>;
86 cell-index = <2>;
87 interrupt-parent = <&mpic>;
88 interrupts = <78 2>;
89 };
90 dma-channel@180 {
91 compatible = "fsl,eloplus-dma-channel";
92 reg = <0x180 0x80>;
93 cell-index = <3>;
94 interrupt-parent = <&mpic>;
95 interrupts = <79 2>;
96 };
97 };
98
99 L2: l2-cache-controller@20000 {
100 compatible = "fsl,p2020-l2-cache-controller";
101 reg = <0x20000 0x1000>;
102 cache-line-size = <32>; // 32 bytes
103 cache-size = <0x80000>; // L2,512K
104 interrupt-parent = <&mpic>;
105 };
106
107
108 enet0: ethernet@24000 {
109 #address-cells = <1>;
110 #size-cells = <1>;
111 cell-index = <0>;
112 device_type = "network";
113 model = "eTSEC";
114 compatible = "gianfar";
115 reg = <0x24000 0x1000>;
116 ranges = <0x0 0x24000 0x1000>;
117 local-mac-address = [ 00 00 00 00 00 00 ];
118 interrupts = <29 2 30 2 34 2>;
119 interrupt-parent = <&mpic>;
120 fixed-link = <1 1 1000 0 0>;
121 phy-connection-type = "rgmii-id";
122
123 };
124
125 mpic: pic@40000 {
126 interrupt-controller;
127 #address-cells = <0>;
128 #interrupt-cells = <2>;
129 reg = <0x40000 0x40000>;
130 compatible = "chrp,open-pic";
131 device_type = "open-pic";
132 protected-sources = <
133 17 18 43 42 59 47 /*ecm, mem, i2c, serial0, spi,gpio */
134 16 20 21 22 23 28 /* L2, dma1, USB */
135 03 35 36 40 31 32 33 /* mdio, enet1, enet2 */
136 72 45 58 25 /* sdhci, crypto , pci */
137 >;
138 };
139
140 msi@41600 {
141 compatible = "fsl,p2020-msi", "fsl,mpic-msi";
142 reg = <0x41600 0x80>;
143 msi-available-ranges = <0 0x100>;
144 interrupts = <
145 0xe0 0
146 0xe1 0
147 0xe2 0
148 0xe3 0
149 0xe4 0
150 0xe5 0
151 0xe6 0
152 0xe7 0>;
153 interrupt-parent = <&mpic>;
154 };
155 };
156
157 pci1: pcie@ffe0a000 {
158 compatible = "fsl,mpc8548-pcie";
159 device_type = "pci";
160 #interrupt-cells = <1>;
161 #size-cells = <2>;
162 #address-cells = <3>;
163 reg = <0 0xffe0a000 0 0x1000>;
164 bus-range = <0 255>;
165 ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
166 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
167 clock-frequency = <33333333>;
168 interrupt-parent = <&mpic>;
169 interrupts = <26 2>;
170 pcie@0 {
171 reg = <0x0 0x0 0x0 0x0 0x0>;
172 #size-cells = <2>;
173 #address-cells = <3>;
174 device_type = "pci";
175 ranges = <0x2000000 0x0 0xc0000000
176 0x2000000 0x0 0xc0000000
177 0x0 0x20000000
178
179 0x1000000 0x0 0x0
180 0x1000000 0x0 0x0
181 0x0 0x100000>;
182 };
183 };
184};
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
new file mode 100644
index 000000000000..6b29eab05362
--- /dev/null
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -0,0 +1,554 @@
1/*
2 * P4080DS Device Tree Source
3 *
4 * Copyright 2009 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/dts-v1/;
13
14/ {
15 model = "fsl,P4080DS";
16 compatible = "fsl,P4080DS";
17 #address-cells = <2>;
18 #size-cells = <2>;
19
20 aliases {
21 ccsr = &soc;
22
23 serial0 = &serial0;
24 serial1 = &serial1;
25 serial2 = &serial2;
26 serial3 = &serial3;
27 pci0 = &pci0;
28 pci1 = &pci1;
29 pci2 = &pci2;
30 usb0 = &usb0;
31 usb1 = &usb1;
32 dma0 = &dma0;
33 dma1 = &dma1;
34 sdhc = &sdhc;
35
36 rio0 = &rapidio0;
37 };
38
39 cpus {
40 #address-cells = <1>;
41 #size-cells = <0>;
42
43 cpu0: PowerPC,4080@0 {
44 device_type = "cpu";
45 reg = <0>;
46 next-level-cache = <&L2_0>;
47 L2_0: l2-cache {
48 };
49 };
50 cpu1: PowerPC,4080@1 {
51 device_type = "cpu";
52 reg = <1>;
53 next-level-cache = <&L2_1>;
54 L2_1: l2-cache {
55 };
56 };
57 cpu2: PowerPC,4080@2 {
58 device_type = "cpu";
59 reg = <2>;
60 next-level-cache = <&L2_2>;
61 L2_2: l2-cache {
62 };
63 };
64 cpu3: PowerPC,4080@3 {
65 device_type = "cpu";
66 reg = <3>;
67 next-level-cache = <&L2_3>;
68 L2_3: l2-cache {
69 };
70 };
71 cpu4: PowerPC,4080@4 {
72 device_type = "cpu";
73 reg = <4>;
74 next-level-cache = <&L2_4>;
75 L2_4: l2-cache {
76 };
77 };
78 cpu5: PowerPC,4080@5 {
79 device_type = "cpu";
80 reg = <5>;
81 next-level-cache = <&L2_5>;
82 L2_5: l2-cache {
83 };
84 };
85 cpu6: PowerPC,4080@6 {
86 device_type = "cpu";
87 reg = <6>;
88 next-level-cache = <&L2_6>;
89 L2_6: l2-cache {
90 };
91 };
92 cpu7: PowerPC,4080@7 {
93 device_type = "cpu";
94 reg = <7>;
95 next-level-cache = <&L2_7>;
96 L2_7: l2-cache {
97 };
98 };
99 };
100
101 memory {
102 device_type = "memory";
103 };
104
105 soc: soc@ffe000000 {
106 #address-cells = <1>;
107 #size-cells = <1>;
108 device_type = "soc";
109 compatible = "simple-bus";
110 ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
111 reg = <0xf 0xfe000000 0 0x00001000>;
112
113 corenet-law@0 {
114 compatible = "fsl,corenet-law";
115 reg = <0x0 0x1000>;
116 fsl,num-laws = <32>;
117 };
118
119 memory-controller@8000 {
120 compatible = "fsl,p4080-memory-controller";
121 reg = <0x8000 0x1000>;
122 interrupt-parent = <&mpic>;
123 interrupts = <0x12 2>;
124 };
125
126 memory-controller@9000 {
127 compatible = "fsl,p4080-memory-controller";
128 reg = <0x9000 0x1000>;
129 interrupt-parent = <&mpic>;
130 interrupts = <0x12 2>;
131 };
132
133 corenet-cf@18000 {
134 compatible = "fsl,corenet-cf";
135 reg = <0x18000 0x1000>;
136 fsl,ccf-num-csdids = <32>;
137 fsl,ccf-num-snoopids = <32>;
138 };
139
140 iommu@20000 {
141 compatible = "fsl,p4080-pamu";
142 reg = <0x20000 0x10000>;
143 interrupts = <24 2>;
144 interrupt-parent = <&mpic>;
145 };
146
147 mpic: pic@40000 {
148 interrupt-controller;
149 #address-cells = <0>;
150 #interrupt-cells = <2>;
151 reg = <0x40000 0x40000>;
152 compatible = "chrp,open-pic";
153 device_type = "open-pic";
154 };
155
156 dma0: dma@100300 {
157 #address-cells = <1>;
158 #size-cells = <1>;
159 compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
160 reg = <0x100300 0x4>;
161 ranges = <0x0 0x100100 0x200>;
162 cell-index = <0>;
163 dma-channel@0 {
164 compatible = "fsl,p4080-dma-channel",
165 "fsl,eloplus-dma-channel";
166 reg = <0x0 0x80>;
167 cell-index = <0>;
168 interrupt-parent = <&mpic>;
169 interrupts = <28 2>;
170 };
171 dma-channel@80 {
172 compatible = "fsl,p4080-dma-channel",
173 "fsl,eloplus-dma-channel";
174 reg = <0x80 0x80>;
175 cell-index = <1>;
176 interrupt-parent = <&mpic>;
177 interrupts = <29 2>;
178 };
179 dma-channel@100 {
180 compatible = "fsl,p4080-dma-channel",
181 "fsl,eloplus-dma-channel";
182 reg = <0x100 0x80>;
183 cell-index = <2>;
184 interrupt-parent = <&mpic>;
185 interrupts = <30 2>;
186 };
187 dma-channel@180 {
188 compatible = "fsl,p4080-dma-channel",
189 "fsl,eloplus-dma-channel";
190 reg = <0x180 0x80>;
191 cell-index = <3>;
192 interrupt-parent = <&mpic>;
193 interrupts = <31 2>;
194 };
195 };
196
197 dma1: dma@101300 {
198 #address-cells = <1>;
199 #size-cells = <1>;
200 compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
201 reg = <0x101300 0x4>;
202 ranges = <0x0 0x101100 0x200>;
203 cell-index = <1>;
204 dma-channel@0 {
205 compatible = "fsl,p4080-dma-channel",
206 "fsl,eloplus-dma-channel";
207 reg = <0x0 0x80>;
208 cell-index = <0>;
209 interrupt-parent = <&mpic>;
210 interrupts = <32 2>;
211 };
212 dma-channel@80 {
213 compatible = "fsl,p4080-dma-channel",
214 "fsl,eloplus-dma-channel";
215 reg = <0x80 0x80>;
216 cell-index = <1>;
217 interrupt-parent = <&mpic>;
218 interrupts = <33 2>;
219 };
220 dma-channel@100 {
221 compatible = "fsl,p4080-dma-channel",
222 "fsl,eloplus-dma-channel";
223 reg = <0x100 0x80>;
224 cell-index = <2>;
225 interrupt-parent = <&mpic>;
226 interrupts = <34 2>;
227 };
228 dma-channel@180 {
229 compatible = "fsl,p4080-dma-channel",
230 "fsl,eloplus-dma-channel";
231 reg = <0x180 0x80>;
232 cell-index = <3>;
233 interrupt-parent = <&mpic>;
234 interrupts = <35 2>;
235 };
236 };
237
238 spi@110000 {
239 cell-index = <0>;
240 #address-cells = <1>;
241 #size-cells = <0>;
242 compatible = "fsl,espi";
243 reg = <0x110000 0x1000>;
244 interrupts = <53 0x2>;
245 interrupt-parent = <&mpic>;
246 espi,num-ss-bits = <4>;
247 mode = "cpu";
248
249 fsl_m25p80@0 {
250 #address-cells = <1>;
251 #size-cells = <1>;
252 compatible = "fsl,espi-flash";
253 reg = <0>;
254 linux,modalias = "fsl_m25p80";
255 spi-max-frequency = <40000000>; /* input clock */
256 partition@u-boot {
257 label = "u-boot";
258 reg = <0x00000000 0x00100000>;
259 read-only;
260 };
261 partition@kernel {
262 label = "kernel";
263 reg = <0x00100000 0x00500000>;
264 read-only;
265 };
266 partition@dtb {
267 label = "dtb";
268 reg = <0x00600000 0x00100000>;
269 read-only;
270 };
271 partition@fs {
272 label = "file system";
273 reg = <0x00700000 0x00900000>;
274 };
275 };
276 };
277
278 sdhc: sdhc@114000 {
279 compatible = "fsl,p4080-esdhc", "fsl,esdhc";
280 reg = <0x114000 0x1000>;
281 interrupts = <48 2>;
282 interrupt-parent = <&mpic>;
283 };
284
285 i2c@118000 {
286 #address-cells = <1>;
287 #size-cells = <0>;
288 cell-index = <0>;
289 compatible = "fsl-i2c";
290 reg = <0x118000 0x100>;
291 interrupts = <38 2>;
292 interrupt-parent = <&mpic>;
293 dfsrr;
294 };
295
296 i2c@118100 {
297 #address-cells = <1>;
298 #size-cells = <0>;
299 cell-index = <1>;
300 compatible = "fsl-i2c";
301 reg = <0x118100 0x100>;
302 interrupts = <38 2>;
303 interrupt-parent = <&mpic>;
304 dfsrr;
305 eeprom@51 {
306 compatible = "at24,24c256";
307 reg = <0x51>;
308 };
309 eeprom@52 {
310 compatible = "at24,24c256";
311 reg = <0x52>;
312 };
313 rtc@68 {
314 compatible = "dallas,ds3232";
315 reg = <0x68>;
316 interrupts = <0 0x1>;
317 interrupt-parent = <&mpic>;
318 };
319 };
320
321 i2c@119000 {
322 #address-cells = <1>;
323 #size-cells = <0>;
324 cell-index = <2>;
325 compatible = "fsl-i2c";
326 reg = <0x119000 0x100>;
327 interrupts = <39 2>;
328 interrupt-parent = <&mpic>;
329 dfsrr;
330 };
331
332 i2c@119100 {
333 #address-cells = <1>;
334 #size-cells = <0>;
335 cell-index = <3>;
336 compatible = "fsl-i2c";
337 reg = <0x119100 0x100>;
338 interrupts = <39 2>;
339 interrupt-parent = <&mpic>;
340 dfsrr;
341 };
342
343 serial0: serial@11c500 {
344 cell-index = <0>;
345 device_type = "serial";
346 compatible = "ns16550";
347 reg = <0x11c500 0x100>;
348 clock-frequency = <0>;
349 interrupts = <36 2>;
350 interrupt-parent = <&mpic>;
351 };
352
353 serial1: serial@11c600 {
354 cell-index = <1>;
355 device_type = "serial";
356 compatible = "ns16550";
357 reg = <0x11c600 0x100>;
358 clock-frequency = <0>;
359 interrupts = <36 2>;
360 interrupt-parent = <&mpic>;
361 };
362
363 serial2: serial@11d500 {
364 cell-index = <2>;
365 device_type = "serial";
366 compatible = "ns16550";
367 reg = <0x11d500 0x100>;
368 clock-frequency = <0>;
369 interrupts = <37 2>;
370 interrupt-parent = <&mpic>;
371 };
372
373 serial3: serial@11d600 {
374 cell-index = <3>;
375 device_type = "serial";
376 compatible = "ns16550";
377 reg = <0x11d600 0x100>;
378 clock-frequency = <0>;
379 interrupts = <37 2>;
380 interrupt-parent = <&mpic>;
381 };
382
383 gpio0: gpio@130000 {
384 compatible = "fsl,p4080-gpio";
385 reg = <0x130000 0x1000>;
386 interrupts = <55 2>;
387 interrupt-parent = <&mpic>;
388 #gpio-cells = <2>;
389 gpio-controller;
390 };
391
392 usb0: usb@210000 {
393 compatible = "fsl,p4080-usb2-mph",
394 "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
395 reg = <0x210000 0x1000>;
396 #address-cells = <1>;
397 #size-cells = <0>;
398 interrupt-parent = <&mpic>;
399 interrupts = <44 0x2>;
400 phy_type = "ulpi";
401 };
402
403 usb1: usb@211000 {
404 compatible = "fsl,p4080-usb2-dr",
405 "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
406 reg = <0x211000 0x1000>;
407 #address-cells = <1>;
408 #size-cells = <0>;
409 interrupt-parent = <&mpic>;
410 interrupts = <45 0x2>;
411 dr_mode = "host";
412 phy_type = "ulpi";
413 };
414 };
415
416 rapidio0: rapidio@ffe0c0000 {
417 #address-cells = <2>;
418 #size-cells = <2>;
419 compatible = "fsl,rapidio-delta";
420 reg = <0xf 0xfe0c0000 0 0x20000>;
421 ranges = <0 0 0xf 0xf5000000 0 0x01000000>;
422 interrupt-parent = <&mpic>;
423 /* err_irq bell_outb_irq bell_inb_irq
424 msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq */
425 interrupts = <16 2 56 2 57 2 60 2 61 2 62 2 63 2>;
426 };
427
428 localbus@ffe124000 {
429 compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
430 reg = <0xf 0xfe124000 0 0x1000>;
431 interrupts = <25 2>;
432 #address-cells = <2>;
433 #size-cells = <1>;
434
435 ranges = <0 0 0xf 0xe8000000 0x08000000>;
436
437 flash@0,0 {
438 compatible = "cfi-flash";
439 reg = <0 0 0x08000000>;
440 bank-width = <2>;
441 device-width = <2>;
442 };
443 };
444
445 pci0: pcie@ffe200000 {
446 compatible = "fsl,p4080-pcie";
447 device_type = "pci";
448 #interrupt-cells = <1>;
449 #size-cells = <2>;
450 #address-cells = <3>;
451 reg = <0xf 0xfe200000 0 0x1000>;
452 bus-range = <0x0 0xff>;
453 ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
454 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
455 clock-frequency = <0x1fca055>;
456 interrupt-parent = <&mpic>;
457 interrupts = <16 2>;
458
459 interrupt-map-mask = <0xf800 0 0 7>;
460 interrupt-map = <
461 /* IDSEL 0x0 */
462 0000 0 0 1 &mpic 40 1
463 0000 0 0 2 &mpic 1 1
464 0000 0 0 3 &mpic 2 1
465 0000 0 0 4 &mpic 3 1
466 >;
467 pcie@0 {
468 reg = <0 0 0 0 0>;
469 #size-cells = <2>;
470 #address-cells = <3>;
471 device_type = "pci";
472 ranges = <0x02000000 0 0xe0000000
473 0x02000000 0 0xe0000000
474 0 0x20000000
475
476 0x01000000 0 0x00000000
477 0x01000000 0 0x00000000
478 0 0x00010000>;
479 };
480 };
481
482 pci1: pcie@ffe201000 {
483 compatible = "fsl,p4080-pcie";
484 device_type = "pci";
485 #interrupt-cells = <1>;
486 #size-cells = <2>;
487 #address-cells = <3>;
488 reg = <0xf 0xfe201000 0 0x1000>;
489 bus-range = <0 0xff>;
490 ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
491 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
492 clock-frequency = <0x1fca055>;
493 interrupt-parent = <&mpic>;
494 interrupts = <16 2>;
495 interrupt-map-mask = <0xf800 0 0 7>;
496 interrupt-map = <
497 /* IDSEL 0x0 */
498 0000 0 0 1 &mpic 41 1
499 0000 0 0 2 &mpic 5 1
500 0000 0 0 3 &mpic 6 1
501 0000 0 0 4 &mpic 7 1
502 >;
503 pcie@0 {
504 reg = <0 0 0 0 0>;
505 #size-cells = <2>;
506 #address-cells = <3>;
507 device_type = "pci";
508 ranges = <0x02000000 0 0xe0000000
509 0x02000000 0 0xe0000000
510 0 0x20000000
511
512 0x01000000 0 0x00000000
513 0x01000000 0 0x00000000
514 0 0x00010000>;
515 };
516 };
517
518 pci2: pcie@ffe202000 {
519 compatible = "fsl,p4080-pcie";
520 device_type = "pci";
521 #interrupt-cells = <1>;
522 #size-cells = <2>;
523 #address-cells = <3>;
524 reg = <0xf 0xfe202000 0 0x1000>;
525 bus-range = <0x0 0xff>;
526 ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
527 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
528 clock-frequency = <0x1fca055>;
529 interrupt-parent = <&mpic>;
530 interrupts = <16 2>;
531 interrupt-map-mask = <0xf800 0 0 7>;
532 interrupt-map = <
533 /* IDSEL 0x0 */
534 0000 0 0 1 &mpic 42 1
535 0000 0 0 2 &mpic 9 1
536 0000 0 0 3 &mpic 10 1
537 0000 0 0 4 &mpic 11 1
538 >;
539 pcie@0 {
540 reg = <0 0 0 0 0>;
541 #size-cells = <2>;
542 #address-cells = <3>;
543 device_type = "pci";
544 ranges = <0x02000000 0 0xe0000000
545 0x02000000 0 0xe0000000
546 0 0x20000000
547
548 0x01000000 0 0x00000000
549 0x01000000 0 0x00000000
550 0 0x00010000>;
551 };
552 };
553
554};
diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts
index ad402c488741..d2af32e2bf7a 100644
--- a/arch/powerpc/boot/dts/redwood.dts
+++ b/arch/powerpc/boot/dts/redwood.dts
@@ -226,6 +226,7 @@
226 max-frame-size = <9000>; 226 max-frame-size = <9000>;
227 rx-fifo-size = <4096>; 227 rx-fifo-size = <4096>;
228 tx-fifo-size = <2048>; 228 tx-fifo-size = <2048>;
229 rx-fifo-size-gige = <16384>;
229 phy-mode = "rgmii"; 230 phy-mode = "rgmii";
230 phy-map = <0x00000000>; 231 phy-map = <0x00000000>;
231 rgmii-device = <&RGMII0>; 232 rgmii-device = <&RGMII0>;
diff --git a/arch/powerpc/boot/dts/yosemite.dts b/arch/powerpc/boot/dts/yosemite.dts
index 1fa3cb4c4ebb..64923245f0e5 100644
--- a/arch/powerpc/boot/dts/yosemite.dts
+++ b/arch/powerpc/boot/dts/yosemite.dts
@@ -282,20 +282,10 @@
282 /* Inbound 2GB range starting at 0 */ 282 /* Inbound 2GB range starting at 0 */
283 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>; 283 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
284 284
285 /* Bamboo has all 4 IRQ pins tied together per slot */
286 interrupt-map-mask = <0xf800 0x0 0x0 0x0>; 285 interrupt-map-mask = <0xf800 0x0 0x0 0x0>;
287 interrupt-map = < 286 interrupt-map = <
288 /* IDSEL 1 */ 287 /* IDSEL 12 */
289 0x800 0x0 0x0 0x0 &UIC0 0x1c 0x8 288 0x6000 0x0 0x0 0x0 &UIC0 0x19 0x8
290
291 /* IDSEL 2 */
292 0x1000 0x0 0x0 0x0 &UIC0 0x1b 0x8
293
294 /* IDSEL 3 */
295 0x1800 0x0 0x0 0x0 &UIC0 0x1a 0x8
296
297 /* IDSEL 4 */
298 0x2000 0x0 0x0 0x0 &UIC0 0x19 0x8
299 >; 289 >;
300 }; 290 };
301 }; 291 };
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
index 28980738776c..6cd2cd65c2cd 100644
--- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
+++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
@@ -218,7 +218,7 @@ CONFIG_MPIC=y
218# CONFIG_MPIC_WEIRD is not set 218# CONFIG_MPIC_WEIRD is not set
219# CONFIG_PPC_I8259 is not set 219# CONFIG_PPC_I8259 is not set
220# CONFIG_PPC_RTAS is not set 220# CONFIG_PPC_RTAS is not set
221# CONFIG_MMIO_NVRAM is not set 221CONFIG_MMIO_NVRAM=y
222# CONFIG_PPC_MPC106 is not set 222# CONFIG_PPC_MPC106 is not set
223# CONFIG_PPC_970_NAP is not set 223# CONFIG_PPC_970_NAP is not set
224# CONFIG_PPC_INDIRECT_IO is not set 224# CONFIG_PPC_INDIRECT_IO is not set
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
index e199d1cacbaf..a6a3768f7304 100644
--- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
@@ -218,7 +218,7 @@ CONFIG_MPIC=y
218# CONFIG_MPIC_WEIRD is not set 218# CONFIG_MPIC_WEIRD is not set
219# CONFIG_PPC_I8259 is not set 219# CONFIG_PPC_I8259 is not set
220# CONFIG_PPC_RTAS is not set 220# CONFIG_PPC_RTAS is not set
221# CONFIG_MMIO_NVRAM is not set 221CONFIG_MMIO_NVRAM=y
222# CONFIG_PPC_MPC106 is not set 222# CONFIG_PPC_MPC106 is not set
223# CONFIG_PPC_970_NAP is not set 223# CONFIG_PPC_970_NAP is not set
224# CONFIG_PPC_INDIRECT_IO is not set 224# CONFIG_PPC_INDIRECT_IO is not set
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 3b0fbfb28efd..1975d41e0763 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -219,7 +219,7 @@ CONFIG_MPIC=y
219# CONFIG_MPIC_WEIRD is not set 219# CONFIG_MPIC_WEIRD is not set
220# CONFIG_PPC_I8259 is not set 220# CONFIG_PPC_I8259 is not set
221# CONFIG_PPC_RTAS is not set 221# CONFIG_PPC_RTAS is not set
222# CONFIG_MMIO_NVRAM is not set 222CONFIG_MMIO_NVRAM=y
223# CONFIG_PPC_MPC106 is not set 223# CONFIG_PPC_MPC106 is not set
224# CONFIG_PPC_970_NAP is not set 224# CONFIG_PPC_970_NAP is not set
225# CONFIG_PPC_INDIRECT_IO is not set 225# CONFIG_PPC_INDIRECT_IO is not set
@@ -1124,7 +1124,7 @@ CONFIG_UNIX98_PTYS=y
1124# CONFIG_IPMI_HANDLER is not set 1124# CONFIG_IPMI_HANDLER is not set
1125CONFIG_HW_RANDOM=y 1125CONFIG_HW_RANDOM=y
1126# CONFIG_HW_RANDOM_TIMERIOMEM is not set 1126# CONFIG_HW_RANDOM_TIMERIOMEM is not set
1127# CONFIG_NVRAM is not set 1127CONFIG_NVRAM=y
1128# CONFIG_R3964 is not set 1128# CONFIG_R3964 is not set
1129# CONFIG_APPLICOM is not set 1129# CONFIG_APPLICOM is not set
1130# CONFIG_RAW_DRIVER is not set 1130# CONFIG_RAW_DRIVER is not set
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h
index 24d79e3abd8e..0835eb977ba9 100644
--- a/arch/powerpc/include/asm/cpm.h
+++ b/arch/powerpc/include/asm/cpm.h
@@ -3,8 +3,47 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/errno.h>
6#include <linux/of.h> 7#include <linux/of.h>
7 8
9/*
10 * USB Controller pram common to QE and CPM.
11 */
12struct usb_ctlr {
13 u8 usb_usmod;
14 u8 usb_usadr;
15 u8 usb_uscom;
16 u8 res1[1];
17 __be16 usb_usep[4];
18 u8 res2[4];
19 __be16 usb_usber;
20 u8 res3[2];
21 __be16 usb_usbmr;
22 u8 res4[1];
23 u8 usb_usbs;
24 /* Fields down below are QE-only */
25 __be16 usb_ussft;
26 u8 res5[2];
27 __be16 usb_usfrn;
28 u8 res6[0x22];
29} __attribute__ ((packed));
30
31/*
32 * Function code bits, usually generic to devices.
33 */
34#ifdef CONFIG_CPM1
35#define CPMFCR_GBL ((u_char)0x00) /* Flag doesn't exist in CPM1 */
36#define CPMFCR_TC2 ((u_char)0x00) /* Flag doesn't exist in CPM1 */
37#define CPMFCR_DTB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
38#define CPMFCR_BDB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
39#else
40#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */
41#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */
42#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */
43#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */
44#endif
45#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */
46
8/* Opcodes common to CPM1 and CPM2 47/* Opcodes common to CPM1 and CPM2
9*/ 48*/
10#define CPM_CR_INIT_TRX ((ushort)0x0000) 49#define CPM_CR_INIT_TRX ((ushort)0x0000)
@@ -93,13 +132,56 @@ typedef struct cpm_buf_desc {
93#define BD_I2C_START (0x0400) 132#define BD_I2C_START (0x0400)
94 133
95int cpm_muram_init(void); 134int cpm_muram_init(void);
135
136#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
96unsigned long cpm_muram_alloc(unsigned long size, unsigned long align); 137unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
97int cpm_muram_free(unsigned long offset); 138int cpm_muram_free(unsigned long offset);
98unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size); 139unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
99void __iomem *cpm_muram_addr(unsigned long offset); 140void __iomem *cpm_muram_addr(unsigned long offset);
100unsigned long cpm_muram_offset(void __iomem *addr); 141unsigned long cpm_muram_offset(void __iomem *addr);
101dma_addr_t cpm_muram_dma(void __iomem *addr); 142dma_addr_t cpm_muram_dma(void __iomem *addr);
143#else
144static inline unsigned long cpm_muram_alloc(unsigned long size,
145 unsigned long align)
146{
147 return -ENOSYS;
148}
149
150static inline int cpm_muram_free(unsigned long offset)
151{
152 return -ENOSYS;
153}
154
155static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset,
156 unsigned long size)
157{
158 return -ENOSYS;
159}
160
161static inline void __iomem *cpm_muram_addr(unsigned long offset)
162{
163 return NULL;
164}
165
166static inline unsigned long cpm_muram_offset(void __iomem *addr)
167{
168 return -ENOSYS;
169}
170
171static inline dma_addr_t cpm_muram_dma(void __iomem *addr)
172{
173 return 0;
174}
175#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */
176
177#ifdef CONFIG_CPM
102int cpm_command(u32 command, u8 opcode); 178int cpm_command(u32 command, u8 opcode);
179#else
180static inline int cpm_command(u32 command, u8 opcode)
181{
182 return -ENOSYS;
183}
184#endif /* CONFIG_CPM */
103 185
104int cpm2_gpiochip_add32(struct device_node *np); 186int cpm2_gpiochip_add32(struct device_node *np);
105 187
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 7685ffde8821..81b01192f440 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -478,51 +478,6 @@ typedef struct iic {
478 char res2[2]; /* Reserved */ 478 char res2[2]; /* Reserved */
479} iic_t; 479} iic_t;
480 480
481/* SPI parameter RAM.
482*/
483typedef struct spi {
484 ushort spi_rbase; /* Rx Buffer descriptor base address */
485 ushort spi_tbase; /* Tx Buffer descriptor base address */
486 u_char spi_rfcr; /* Rx function code */
487 u_char spi_tfcr; /* Tx function code */
488 ushort spi_mrblr; /* Max receive buffer length */
489 uint spi_rstate; /* Internal */
490 uint spi_rdp; /* Internal */
491 ushort spi_rbptr; /* Internal */
492 ushort spi_rbc; /* Internal */
493 uint spi_rxtmp; /* Internal */
494 uint spi_tstate; /* Internal */
495 uint spi_tdp; /* Internal */
496 ushort spi_tbptr; /* Internal */
497 ushort spi_tbc; /* Internal */
498 uint spi_txtmp; /* Internal */
499 uint spi_res;
500 ushort spi_rpbase; /* Relocation pointer */
501 ushort spi_res2;
502} spi_t;
503
504/* SPI Mode register.
505*/
506#define SPMODE_LOOP ((ushort)0x4000) /* Loopback */
507#define SPMODE_CI ((ushort)0x2000) /* Clock Invert */
508#define SPMODE_CP ((ushort)0x1000) /* Clock Phase */
509#define SPMODE_DIV16 ((ushort)0x0800) /* BRG/16 mode */
510#define SPMODE_REV ((ushort)0x0400) /* Reversed Data */
511#define SPMODE_MSTR ((ushort)0x0200) /* SPI Master */
512#define SPMODE_EN ((ushort)0x0100) /* Enable */
513#define SPMODE_LENMSK ((ushort)0x00f0) /* character length */
514#define SPMODE_LEN4 ((ushort)0x0030) /* 4 bits per char */
515#define SPMODE_LEN8 ((ushort)0x0070) /* 8 bits per char */
516#define SPMODE_LEN16 ((ushort)0x00f0) /* 16 bits per char */
517#define SPMODE_PMMSK ((ushort)0x000f) /* prescale modulus */
518
519/* SPIE fields */
520#define SPIE_MME 0x20
521#define SPIE_TXE 0x10
522#define SPIE_BSY 0x04
523#define SPIE_TXB 0x02
524#define SPIE_RXB 0x01
525
526/* 481/*
527 * RISC Controller Configuration Register definitons 482 * RISC Controller Configuration Register definitons
528 */ 483 */
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index 990ff191da8b..f42e9baf3a4e 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -124,14 +124,6 @@ static inline void cpm2_fastbrg(uint brg, uint rate, int div16)
124 __cpm2_setbrg(brg, rate, CPM2_BRG_INT_CLK, div16, CPM_BRG_EXTC_INT); 124 __cpm2_setbrg(brg, rate, CPM2_BRG_INT_CLK, div16, CPM_BRG_EXTC_INT);
125} 125}
126 126
127/* Function code bits, usually generic to devices.
128*/
129#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */
130#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */
131#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */
132#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */
133#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */
134
135/* Parameter RAM offsets from the base. 127/* Parameter RAM offsets from the base.
136*/ 128*/
137#define PROFF_SCC1 ((uint)0x8000) 129#define PROFF_SCC1 ((uint)0x8000)
@@ -654,45 +646,6 @@ typedef struct iic {
654 uint iic_txtmp; /* Internal */ 646 uint iic_txtmp; /* Internal */
655} iic_t; 647} iic_t;
656 648
657/* SPI parameter RAM.
658*/
659typedef struct spi {
660 ushort spi_rbase; /* Rx Buffer descriptor base address */
661 ushort spi_tbase; /* Tx Buffer descriptor base address */
662 u_char spi_rfcr; /* Rx function code */
663 u_char spi_tfcr; /* Tx function code */
664 ushort spi_mrblr; /* Max receive buffer length */
665 uint spi_rstate; /* Internal */
666 uint spi_rdp; /* Internal */
667 ushort spi_rbptr; /* Internal */
668 ushort spi_rbc; /* Internal */
669 uint spi_rxtmp; /* Internal */
670 uint spi_tstate; /* Internal */
671 uint spi_tdp; /* Internal */
672 ushort spi_tbptr; /* Internal */
673 ushort spi_tbc; /* Internal */
674 uint spi_txtmp; /* Internal */
675 uint spi_res; /* Tx temp. */
676 uint spi_res1[4]; /* SDMA temp. */
677} spi_t;
678
679/* SPI Mode register.
680*/
681#define SPMODE_LOOP ((ushort)0x4000) /* Loopback */
682#define SPMODE_CI ((ushort)0x2000) /* Clock Invert */
683#define SPMODE_CP ((ushort)0x1000) /* Clock Phase */
684#define SPMODE_DIV16 ((ushort)0x0800) /* BRG/16 mode */
685#define SPMODE_REV ((ushort)0x0400) /* Reversed Data */
686#define SPMODE_MSTR ((ushort)0x0200) /* SPI Master */
687#define SPMODE_EN ((ushort)0x0100) /* Enable */
688#define SPMODE_LENMSK ((ushort)0x00f0) /* character length */
689#define SPMODE_PMMSK ((ushort)0x000f) /* prescale modulus */
690
691#define SPMODE_LEN(x) ((((x)-1)&0xF)<<4)
692#define SPMODE_PM(x) ((x) &0xF)
693
694#define SPI_EB ((u_char)0x10) /* big endian byte order */
695
696/* IDMA parameter RAM 649/* IDMA parameter RAM
697*/ 650*/
698typedef struct idma { 651typedef struct idma {
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index a98653b26231..57c400071995 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -147,6 +147,7 @@
147 .globl label##_pSeries; \ 147 .globl label##_pSeries; \
148label##_pSeries: \ 148label##_pSeries: \
149 HMT_MEDIUM; \ 149 HMT_MEDIUM; \
150 DO_KVM n; \
150 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ 151 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
151 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 152 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
152 153
@@ -170,6 +171,7 @@ label##_pSeries: \
170 .globl label##_pSeries; \ 171 .globl label##_pSeries; \
171label##_pSeries: \ 172label##_pSeries: \
172 HMT_MEDIUM; \ 173 HMT_MEDIUM; \
174 DO_KVM n; \
173 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ 175 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
174 mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \ 176 mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \
175 std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \ 177 std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index b1dafb6a9743..5856a66ab404 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -3,6 +3,10 @@
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5
6pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
7 unsigned long addr, unsigned *shift);
8
9void flush_dcache_icache_hugepage(struct page *page);
6 10
7int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 11int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
8 unsigned long len); 12 unsigned long len);
@@ -11,12 +15,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
11 unsigned long end, unsigned long floor, 15 unsigned long end, unsigned long floor,
12 unsigned long ceiling); 16 unsigned long ceiling);
13 17
14void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
15 pte_t *ptep, pte_t pte);
16
17pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
18 pte_t *ptep);
19
20/* 18/*
21 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs 19 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
22 * to override the version in mm/hugetlb.c 20 * to override the version in mm/hugetlb.c
@@ -42,9 +40,26 @@ static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
42{ 40{
43} 41}
44 42
43
44static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep, pte_t pte)
46{
47 set_pte_at(mm, addr, ptep, pte);
48}
49
50static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
51 unsigned long addr, pte_t *ptep)
52{
53 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
54 return __pte(old);
55}
56
45static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 57static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
46 unsigned long addr, pte_t *ptep) 58 unsigned long addr, pte_t *ptep)
47{ 59{
60 pte_t pte;
61 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
62 flush_tlb_page(vma, addr);
48} 63}
49 64
50static inline int huge_pte_none(pte_t pte) 65static inline int huge_pte_none(pte_t pte)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index c27caac47ad1..f0275818b95c 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -212,6 +212,19 @@
212#define H_QUERY_INT_STATE 0x1E4 212#define H_QUERY_INT_STATE 0x1E4
213#define H_POLL_PENDING 0x1D8 213#define H_POLL_PENDING 0x1D8
214#define H_ILLAN_ATTRIBUTES 0x244 214#define H_ILLAN_ATTRIBUTES 0x244
215#define H_MODIFY_HEA_QP 0x250
216#define H_QUERY_HEA_QP 0x254
217#define H_QUERY_HEA 0x258
218#define H_QUERY_HEA_PORT 0x25C
219#define H_MODIFY_HEA_PORT 0x260
220#define H_REG_BCMC 0x264
221#define H_DEREG_BCMC 0x268
222#define H_REGISTER_HEA_RPAGES 0x26C
223#define H_DISABLE_AND_GET_HEA 0x270
224#define H_GET_HEA_INFO 0x274
225#define H_ALLOC_HEA_RESOURCE 0x278
226#define H_ADD_CONN 0x284
227#define H_DEL_CONN 0x288
215#define H_JOIN 0x298 228#define H_JOIN 0x298
216#define H_VASI_STATE 0x2A4 229#define H_VASI_STATE 0x2A4
217#define H_ENABLE_CRQ 0x2B0 230#define H_ENABLE_CRQ 0x2B0
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index abbc2aaaced5..9f4c9d4f5803 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -64,11 +64,6 @@ extern void iseries_handle_interrupts(void);
64 get_paca()->hard_enabled = 0; \ 64 get_paca()->hard_enabled = 0; \
65 } while(0) 65 } while(0)
66 66
67static inline int irqs_disabled_flags(unsigned long flags)
68{
69 return flags == 0;
70}
71
72#else 67#else
73 68
74#if defined(CONFIG_BOOKE) 69#if defined(CONFIG_BOOKE)
diff --git a/arch/powerpc/include/asm/immap_cpm2.h b/arch/powerpc/include/asm/immap_cpm2.h
index d4f069bf0e57..7c64fda5357b 100644
--- a/arch/powerpc/include/asm/immap_cpm2.h
+++ b/arch/powerpc/include/asm/immap_cpm2.h
@@ -549,7 +549,7 @@ typedef struct comm_proc {
549 549
550/* USB Controller. 550/* USB Controller.
551*/ 551*/
552typedef struct usb_ctlr { 552typedef struct cpm_usb_ctlr {
553 u8 usb_usmod; 553 u8 usb_usmod;
554 u8 usb_usadr; 554 u8 usb_usadr;
555 u8 usb_uscom; 555 u8 usb_uscom;
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h
index c346d0bcd230..4e10f508570a 100644
--- a/arch/powerpc/include/asm/immap_qe.h
+++ b/arch/powerpc/include/asm/immap_qe.h
@@ -210,7 +210,7 @@ struct sir {
210} __attribute__ ((packed)); 210} __attribute__ ((packed));
211 211
212/* USB Controller */ 212/* USB Controller */
213struct usb_ctlr { 213struct qe_usb_ctlr {
214 u8 usb_usmod; 214 u8 usb_usmod;
215 u8 usb_usadr; 215 u8 usb_usadr;
216 u8 usb_uscom; 216 u8 usb_uscom;
@@ -229,7 +229,7 @@ struct usb_ctlr {
229} __attribute__ ((packed)); 229} __attribute__ ((packed));
230 230
231/* MCC */ 231/* MCC */
232struct mcc { 232struct qe_mcc {
233 __be32 mcce; /* MCC event register */ 233 __be32 mcce; /* MCC event register */
234 __be32 mccm; /* MCC mask register */ 234 __be32 mccm; /* MCC mask register */
235 __be32 mccf; /* MCC configuration register */ 235 __be32 mccf; /* MCC configuration register */
@@ -431,9 +431,9 @@ struct qe_immap {
431 struct qe_mux qmx; /* QE Multiplexer */ 431 struct qe_mux qmx; /* QE Multiplexer */
432 struct qe_timers qet; /* QE Timers */ 432 struct qe_timers qet; /* QE Timers */
433 struct spi spi[0x2]; /* spi */ 433 struct spi spi[0x2]; /* spi */
434 struct mcc mcc; /* mcc */ 434 struct qe_mcc mcc; /* mcc */
435 struct qe_brg brg; /* brg */ 435 struct qe_brg brg; /* brg */
436 struct usb_ctlr usb; /* USB */ 436 struct qe_usb_ctlr usb; /* USB */
437 struct si1 si1; /* SI */ 437 struct si1 si1; /* SI */
438 u8 res11[0x800]; 438 u8 res11[0x800];
439 struct sir sir; /* SI Routing Tables */ 439 struct sir sir; /* SI Routing Tables */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index bbcd1aaf3dfd..e054baef1845 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -17,8 +17,6 @@
17#include <asm/atomic.h> 17#include <asm/atomic.h>
18 18
19 19
20#define get_irq_desc(irq) (&irq_desc[(irq)])
21
22/* Define a way to iterate across irqs. */ 20/* Define a way to iterate across irqs. */
23#define for_each_irq(i) \ 21#define for_each_irq(i) \
24 for ((i) = 0; (i) < NR_IRQS; ++(i)) 22 for ((i) = 0; (i) < NR_IRQS; ++(i))
@@ -34,12 +32,15 @@ extern atomic_t ppc_n_lost_interrupts;
34 */ 32 */
35#define NO_IRQ_IGNORE ((unsigned int)-1) 33#define NO_IRQ_IGNORE ((unsigned int)-1)
36 34
37/* Total number of virq in the platform (make it a CONFIG_* option ? */ 35/* Total number of virq in the platform */
38#define NR_IRQS 512 36#define NR_IRQS CONFIG_NR_IRQS
39 37
40/* Number of irqs reserved for the legacy controller */ 38/* Number of irqs reserved for the legacy controller */
41#define NUM_ISA_INTERRUPTS 16 39#define NUM_ISA_INTERRUPTS 16
42 40
41/* Same thing, used by the generic IRQ code */
42#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
43
43/* This type is the placeholder for a hardware interrupt number. It has to 44/* This type is the placeholder for a hardware interrupt number. It has to
44 * be big enough to enclose whatever representation is used by a given 45 * be big enough to enclose whatever representation is used by a given
45 * platform. 46 * platform.
@@ -99,7 +100,7 @@ struct irq_host_ops {
99 * interrupt controller has for that line) 100 * interrupt controller has for that line)
100 */ 101 */
101 int (*xlate)(struct irq_host *h, struct device_node *ctrler, 102 int (*xlate)(struct irq_host *h, struct device_node *ctrler,
102 u32 *intspec, unsigned int intsize, 103 const u32 *intspec, unsigned int intsize,
103 irq_hw_number_t *out_hwirq, unsigned int *out_type); 104 irq_hw_number_t *out_hwirq, unsigned int *out_type);
104}; 105};
105 106
@@ -313,7 +314,7 @@ extern void irq_free_virt(unsigned int virq, unsigned int count);
313 * of the of_irq_map_*() functions. 314 * of the of_irq_map_*() functions.
314 */ 315 */
315extern unsigned int irq_create_of_mapping(struct device_node *controller, 316extern unsigned int irq_create_of_mapping(struct device_node *controller,
316 u32 *intspec, unsigned int intsize); 317 const u32 *intspec, unsigned int intsize);
317 318
318/** 319/**
319 * irq_of_parse_and_map - Parse and Map an interrupt into linux virq space 320 * irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index bb2de6aa5ce0..81f3b0b5601e 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -46,6 +46,24 @@ struct kvm_regs {
46}; 46};
47 47
48struct kvm_sregs { 48struct kvm_sregs {
49 __u32 pvr;
50 union {
51 struct {
52 __u64 sdr1;
53 struct {
54 struct {
55 __u64 slbe;
56 __u64 slbv;
57 } slb[64];
58 } ppc64;
59 struct {
60 __u32 sr[16];
61 __u64 ibat[8];
62 __u64 dbat[8];
63 } ppc32;
64 } s;
65 __u8 pad[1020];
66 } u;
49}; 67};
50 68
51struct kvm_fpu { 69struct kvm_fpu {
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 56bfae59837f..af2abe74f544 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -49,6 +49,46 @@
49#define BOOKE_INTERRUPT_SPE_FP_ROUND 34 49#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
50#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 50#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
51 51
52/* book3s */
53
54#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100
55#define BOOK3S_INTERRUPT_MACHINE_CHECK 0x200
56#define BOOK3S_INTERRUPT_DATA_STORAGE 0x300
57#define BOOK3S_INTERRUPT_DATA_SEGMENT 0x380
58#define BOOK3S_INTERRUPT_INST_STORAGE 0x400
59#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480
60#define BOOK3S_INTERRUPT_EXTERNAL 0x500
61#define BOOK3S_INTERRUPT_ALIGNMENT 0x600
62#define BOOK3S_INTERRUPT_PROGRAM 0x700
63#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
64#define BOOK3S_INTERRUPT_DECREMENTER 0x900
65#define BOOK3S_INTERRUPT_SYSCALL 0xc00
66#define BOOK3S_INTERRUPT_TRACE 0xd00
67#define BOOK3S_INTERRUPT_PERFMON 0xf00
68#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
69#define BOOK3S_INTERRUPT_VSX 0xf40
70
71#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
72#define BOOK3S_IRQPRIO_DATA_SEGMENT 1
73#define BOOK3S_IRQPRIO_INST_SEGMENT 2
74#define BOOK3S_IRQPRIO_DATA_STORAGE 3
75#define BOOK3S_IRQPRIO_INST_STORAGE 4
76#define BOOK3S_IRQPRIO_ALIGNMENT 5
77#define BOOK3S_IRQPRIO_PROGRAM 6
78#define BOOK3S_IRQPRIO_FP_UNAVAIL 7
79#define BOOK3S_IRQPRIO_ALTIVEC 8
80#define BOOK3S_IRQPRIO_VSX 9
81#define BOOK3S_IRQPRIO_SYSCALL 10
82#define BOOK3S_IRQPRIO_MACHINE_CHECK 11
83#define BOOK3S_IRQPRIO_DEBUG 12
84#define BOOK3S_IRQPRIO_EXTERNAL 13
85#define BOOK3S_IRQPRIO_DECREMENTER 14
86#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15
87#define BOOK3S_IRQPRIO_MAX 16
88
89#define BOOK3S_HFLAG_DCBZ32 0x1
90#define BOOK3S_HFLAG_SLB 0x2
91
52#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ 92#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
53#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 93#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
54 94
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
new file mode 100644
index 000000000000..74b7369770d0
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -0,0 +1,139 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_H__
21#define __ASM_KVM_BOOK3S_H__
22
23#include <linux/types.h>
24#include <linux/kvm_host.h>
25#include <asm/kvm_ppc.h>
26
27struct kvmppc_slb {
28 u64 esid;
29 u64 vsid;
30 u64 orige;
31 u64 origv;
32 bool valid;
33 bool Ks;
34 bool Kp;
35 bool nx;
36 bool large;
37 bool class;
38};
39
40struct kvmppc_sr {
41 u32 raw;
42 u32 vsid;
43 bool Ks;
44 bool Kp;
45 bool nx;
46};
47
48struct kvmppc_bat {
49 u64 raw;
50 u32 bepi;
51 u32 bepi_mask;
52 bool vs;
53 bool vp;
54 u32 brpn;
55 u8 wimg;
56 u8 pp;
57};
58
59struct kvmppc_sid_map {
60 u64 guest_vsid;
61 u64 guest_esid;
62 u64 host_vsid;
63 bool valid;
64};
65
66#define SID_MAP_BITS 9
67#define SID_MAP_NUM (1 << SID_MAP_BITS)
68#define SID_MAP_MASK (SID_MAP_NUM - 1)
69
70struct kvmppc_vcpu_book3s {
71 struct kvm_vcpu vcpu;
72 struct kvmppc_sid_map sid_map[SID_MAP_NUM];
73 struct kvmppc_slb slb[64];
74 struct {
75 u64 esid;
76 u64 vsid;
77 } slb_shadow[64];
78 u8 slb_shadow_max;
79 struct kvmppc_sr sr[16];
80 struct kvmppc_bat ibat[8];
81 struct kvmppc_bat dbat[8];
82 u64 hid[6];
83 int slb_nr;
84 u64 sdr1;
85 u64 dsisr;
86 u64 hior;
87 u64 msr_mask;
88 u64 vsid_first;
89 u64 vsid_next;
90 u64 vsid_max;
91 int context_id;
92};
93
94#define CONTEXT_HOST 0
95#define CONTEXT_GUEST 1
96#define CONTEXT_GUEST_END 2
97
98#define VSID_REAL 0xfffffffffff00000
99#define VSID_REAL_DR 0xffffffffffe00000
100#define VSID_REAL_IR 0xffffffffffd00000
101#define VSID_BAT 0xffffffffffc00000
102#define VSID_PR 0x8000000000000000
103
104extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask);
105extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
106extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end);
107extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
108extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
109extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
110extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
111extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
112extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
113extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data);
114extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data);
115extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr);
116extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
117extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
118 bool upper, u32 val);
119
120extern u32 kvmppc_trampoline_lowmem;
121extern u32 kvmppc_trampoline_enter;
122
123static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
124{
125 return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
126}
127
128static inline ulong dsisr(void)
129{
130 ulong r;
131 asm ( "mfdsisr %0 " : "=r" (r) );
132 return r;
133}
134
135extern void kvm_return_point(void);
136
137#define INS_DCBZ 0x7c0007ec
138
139#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
new file mode 100644
index 000000000000..2e06ee8184ef
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
@@ -0,0 +1,58 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_ASM_H__
21#define __ASM_KVM_BOOK3S_ASM_H__
22
23#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
24
25#include <asm/kvm_asm.h>
26
27.macro DO_KVM intno
28 .if (\intno == BOOK3S_INTERRUPT_SYSTEM_RESET) || \
29 (\intno == BOOK3S_INTERRUPT_MACHINE_CHECK) || \
30 (\intno == BOOK3S_INTERRUPT_DATA_STORAGE) || \
31 (\intno == BOOK3S_INTERRUPT_INST_STORAGE) || \
32 (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \
33 (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \
34 (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \
35 (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \
36 (\intno == BOOK3S_INTERRUPT_PROGRAM) || \
37 (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \
38 (\intno == BOOK3S_INTERRUPT_DECREMENTER) || \
39 (\intno == BOOK3S_INTERRUPT_SYSCALL) || \
40 (\intno == BOOK3S_INTERRUPT_TRACE) || \
41 (\intno == BOOK3S_INTERRUPT_PERFMON) || \
42 (\intno == BOOK3S_INTERRUPT_ALTIVEC) || \
43 (\intno == BOOK3S_INTERRUPT_VSX)
44
45 b kvmppc_trampoline_\intno
46kvmppc_resume_\intno:
47
48 .endif
49.endm
50
51#else
52
53.macro DO_KVM intno
54.endm
55
56#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
57
58#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index c9c930ed11d7..1201f62d0d73 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -21,7 +21,8 @@
21#define __POWERPC_KVM_HOST_H__ 21#define __POWERPC_KVM_HOST_H__
22 22
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/timer.h> 24#include <linux/hrtimer.h>
25#include <linux/interrupt.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/kvm_types.h> 27#include <linux/kvm_types.h>
27#include <asm/kvm_asm.h> 28#include <asm/kvm_asm.h>
@@ -37,6 +38,8 @@
37#define KVM_NR_PAGE_SIZES 1 38#define KVM_NR_PAGE_SIZES 1
38#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) 39#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
39 40
41#define HPTEG_CACHE_NUM 1024
42
40struct kvm; 43struct kvm;
41struct kvm_run; 44struct kvm_run;
42struct kvm_vcpu; 45struct kvm_vcpu;
@@ -63,6 +66,17 @@ struct kvm_vcpu_stat {
63 u32 dec_exits; 66 u32 dec_exits;
64 u32 ext_intr_exits; 67 u32 ext_intr_exits;
65 u32 halt_wakeup; 68 u32 halt_wakeup;
69#ifdef CONFIG_PPC64
70 u32 pf_storage;
71 u32 pf_instruc;
72 u32 sp_storage;
73 u32 sp_instruc;
74 u32 queue_intr;
75 u32 ld;
76 u32 ld_slow;
77 u32 st;
78 u32 st_slow;
79#endif
66}; 80};
67 81
68enum kvm_exit_types { 82enum kvm_exit_types {
@@ -109,9 +123,53 @@ struct kvmppc_exit_timing {
109struct kvm_arch { 123struct kvm_arch {
110}; 124};
111 125
126struct kvmppc_pte {
127 u64 eaddr;
128 u64 vpage;
129 u64 raddr;
130 bool may_read;
131 bool may_write;
132 bool may_execute;
133};
134
135struct kvmppc_mmu {
136 /* book3s_64 only */
137 void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
138 u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
139 u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
140 void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
141 void (*slbia)(struct kvm_vcpu *vcpu);
142 /* book3s */
143 void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
144 u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
145 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
146 void (*reset_msr)(struct kvm_vcpu *vcpu);
147 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
148 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, u64 esid, u64 *vsid);
149 u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
150 bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
151};
152
153struct hpte_cache {
154 u64 host_va;
155 u64 pfn;
156 ulong slot;
157 struct kvmppc_pte pte;
158};
159
112struct kvm_vcpu_arch { 160struct kvm_vcpu_arch {
113 u32 host_stack; 161 ulong host_stack;
114 u32 host_pid; 162 u32 host_pid;
163#ifdef CONFIG_PPC64
164 ulong host_msr;
165 ulong host_r2;
166 void *host_retip;
167 ulong trampoline_lowmem;
168 ulong trampoline_enter;
169 ulong highmem_handler;
170 ulong host_paca_phys;
171 struct kvmppc_mmu mmu;
172#endif
115 173
116 u64 fpr[32]; 174 u64 fpr[32];
117 ulong gpr[32]; 175 ulong gpr[32];
@@ -123,6 +181,10 @@ struct kvm_vcpu_arch {
123 ulong xer; 181 ulong xer;
124 182
125 ulong msr; 183 ulong msr;
184#ifdef CONFIG_PPC64
185 ulong shadow_msr;
186 ulong hflags;
187#endif
126 u32 mmucr; 188 u32 mmucr;
127 ulong sprg0; 189 ulong sprg0;
128 ulong sprg1; 190 ulong sprg1;
@@ -149,6 +211,7 @@ struct kvm_vcpu_arch {
149 u32 ivor[64]; 211 u32 ivor[64];
150 ulong ivpr; 212 ulong ivpr;
151 u32 pir; 213 u32 pir;
214 u32 pvr;
152 215
153 u32 shadow_pid; 216 u32 shadow_pid;
154 u32 pid; 217 u32 pid;
@@ -174,6 +237,9 @@ struct kvm_vcpu_arch {
174#endif 237#endif
175 238
176 u32 last_inst; 239 u32 last_inst;
240#ifdef CONFIG_PPC64
241 ulong fault_dsisr;
242#endif
177 ulong fault_dear; 243 ulong fault_dear;
178 ulong fault_esr; 244 ulong fault_esr;
179 gpa_t paddr_accessed; 245 gpa_t paddr_accessed;
@@ -185,8 +251,15 @@ struct kvm_vcpu_arch {
185 251
186 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ 252 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
187 253
188 struct timer_list dec_timer; 254 struct hrtimer dec_timer;
255 struct tasklet_struct tasklet;
256 u64 dec_jiffies;
189 unsigned long pending_exceptions; 257 unsigned long pending_exceptions;
258
259#ifdef CONFIG_PPC64
260 struct hpte_cache hpte_cache[HPTEG_CACHE_NUM];
261 int hpte_cache_offset;
262#endif
190}; 263};
191 264
192#endif /* __POWERPC_KVM_HOST_H__ */ 265#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 2c6ee349df5e..269ee46ab028 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -39,6 +39,7 @@ enum emulation_result {
39extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 39extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
40extern char kvmppc_handlers_start[]; 40extern char kvmppc_handlers_start[];
41extern unsigned long kvmppc_handler_len; 41extern unsigned long kvmppc_handler_len;
42extern void kvmppc_handler_highmem(void);
42 43
43extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); 44extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
44extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 45extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index f78f65c38f05..14b592dfb4e8 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -100,7 +100,14 @@ struct lppaca {
100 // Used to pass parms from the OS to PLIC for SetAsrAndRfid 100 // Used to pass parms from the OS to PLIC for SetAsrAndRfid
101 u64 saved_gpr3; // Saved GPR3 x20-x27 101 u64 saved_gpr3; // Saved GPR3 x20-x27
102 u64 saved_gpr4; // Saved GPR4 x28-x2F 102 u64 saved_gpr4; // Saved GPR4 x28-x2F
103 u64 saved_gpr5; // Saved GPR5 x30-x37 103 union {
104 u64 saved_gpr5; /* Saved GPR5 x30-x37 */
105 struct {
106 u8 cede_latency_hint; /* x30 */
107 u8 reserved[7]; /* x31-x36 */
108 } fields;
109 } gpr5_dword;
110
104 111
105 u8 dtl_enable_mask; // Dispatch Trace Log mask x38-x38 112 u8 dtl_enable_mask; // Dispatch Trace Log mask x38-x38
106 u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39 113 u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 9efa2be78331..9f0fc9e6ce0d 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -266,6 +266,11 @@ struct machdep_calls {
266 void (*suspend_disable_irqs)(void); 266 void (*suspend_disable_irqs)(void);
267 void (*suspend_enable_irqs)(void); 267 void (*suspend_enable_irqs)(void);
268#endif 268#endif
269
270#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
271 ssize_t (*cpu_probe)(const char *, size_t);
272 ssize_t (*cpu_release)(const char *, size_t);
273#endif
269}; 274};
270 275
271extern void e500_idle(void); 276extern void e500_idle(void);
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index 079c06eae446..a062c57696d0 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -39,6 +39,7 @@ struct macio_dev
39 struct macio_bus *bus; /* macio bus this device is on */ 39 struct macio_bus *bus; /* macio bus this device is on */
40 struct macio_dev *media_bay; /* Device is part of a media bay */ 40 struct macio_dev *media_bay; /* Device is part of a media bay */
41 struct of_device ofdev; 41 struct of_device ofdev;
42 struct device_dma_parameters dma_parms; /* ide needs that */
42 int n_resources; 43 int n_resources;
43 struct resource resource[MACIO_DEV_COUNT_RESOURCES]; 44 struct resource resource[MACIO_DEV_COUNT_RESOURCES];
44 int n_interrupts; 45 int n_interrupts;
@@ -78,6 +79,8 @@ static inline unsigned long macio_resource_len(struct macio_dev *dev, int resour
78 return res->end - res->start + 1; 79 return res->end - res->start + 1;
79} 80}
80 81
82extern int macio_enable_devres(struct macio_dev *dev);
83
81extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name); 84extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name);
82extern void macio_release_resource(struct macio_dev *dev, int resource_no); 85extern void macio_release_resource(struct macio_dev *dev, int resource_no);
83extern int macio_request_resources(struct macio_dev *dev, const char *name); 86extern int macio_request_resources(struct macio_dev *dev, const char *name);
@@ -131,6 +134,9 @@ struct macio_driver
131 int (*resume)(struct macio_dev* dev); 134 int (*resume)(struct macio_dev* dev);
132 int (*shutdown)(struct macio_dev* dev); 135 int (*shutdown)(struct macio_dev* dev);
133 136
137#ifdef CONFIG_PMAC_MEDIABAY
138 void (*mediabay_event)(struct macio_dev* dev, int mb_state);
139#endif
134 struct device_driver driver; 140 struct device_driver driver;
135}; 141};
136#define to_macio_driver(drv) container_of(drv,struct macio_driver, driver) 142#define to_macio_driver(drv) container_of(drv,struct macio_driver, driver)
diff --git a/arch/powerpc/include/asm/mediabay.h b/arch/powerpc/include/asm/mediabay.h
index b2efb3325808..11037a4133ee 100644
--- a/arch/powerpc/include/asm/mediabay.h
+++ b/arch/powerpc/include/asm/mediabay.h
@@ -17,26 +17,31 @@
17#define MB_POWER 6 /* media bay contains a Power device (???) */ 17#define MB_POWER 6 /* media bay contains a Power device (???) */
18#define MB_NO 7 /* media bay contains nothing */ 18#define MB_NO 7 /* media bay contains nothing */
19 19
20/* Number of bays in the machine or 0 */ 20struct macio_dev;
21extern int media_bay_count;
22 21
23#ifdef CONFIG_BLK_DEV_IDE_PMAC 22#ifdef CONFIG_PMAC_MEDIABAY
24#include <linux/ide.h>
25 23
26int check_media_bay_by_base(unsigned long base, int what); 24/* Check the content type of the bay, returns MB_NO if the bay is still
27/* called by IDE PMAC host driver to register IDE controller for media bay */ 25 * transitionning
28int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base, 26 */
29 int irq, ide_hwif_t *hwif); 27extern int check_media_bay(struct macio_dev *bay);
30 28
31int check_media_bay(struct device_node *which_bay, int what); 29/* The ATA driver uses the calls below to temporarily hold on the
30 * media bay callbacks while initializing the interface
31 */
32extern void lock_media_bay(struct macio_dev *bay);
33extern void unlock_media_bay(struct macio_dev *bay);
32 34
33#else 35#else
34 36
35static inline int check_media_bay(struct device_node *which_bay, int what) 37static inline int check_media_bay(struct macio_dev *bay)
36{ 38{
37 return -ENODEV; 39 return MB_NO;
38} 40}
39 41
42static inline void lock_media_bay(struct macio_dev *bay) { }
43static inline void unlock_media_bay(struct macio_dev *bay) { }
44
40#endif 45#endif
41 46
42#endif /* __KERNEL__ */ 47#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index bebe31c2e907..2102b214a87c 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -173,14 +173,6 @@ extern unsigned long tce_alloc_start, tce_alloc_end;
173 */ 173 */
174extern int mmu_ci_restrictions; 174extern int mmu_ci_restrictions;
175 175
176#ifdef CONFIG_HUGETLB_PAGE
177/*
178 * The page size indexes of the huge pages for use by hugetlbfs
179 */
180extern unsigned int mmu_huge_psizes[MMU_PAGE_COUNT];
181
182#endif /* CONFIG_HUGETLB_PAGE */
183
184/* 176/*
185 * This function sets the AVPN and L fields of the HPTE appropriately 177 * This function sets the AVPN and L fields of the HPTE appropriately
186 * for the page size 178 * for the page size
@@ -253,10 +245,11 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
253 unsigned long vsid, pte_t *ptep, unsigned long trap, 245 unsigned long vsid, pte_t *ptep, unsigned long trap,
254 unsigned int local, int ssize); 246 unsigned int local, int ssize);
255struct mm_struct; 247struct mm_struct;
248unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
256extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); 249extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
257extern int hash_huge_page(struct mm_struct *mm, unsigned long access, 250int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
258 unsigned long ea, unsigned long vsid, int local, 251 pte_t *ptep, unsigned long trap, int local, int ssize,
259 unsigned long trap); 252 unsigned int shift, unsigned int mmu_psize);
260 253
261extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 254extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
262 unsigned long pstart, unsigned long prot, 255 unsigned long pstart, unsigned long prot,
@@ -380,6 +373,38 @@ extern void slb_set_size(u16 size);
380 373
381#ifndef __ASSEMBLY__ 374#ifndef __ASSEMBLY__
382 375
376#ifdef CONFIG_PPC_SUBPAGE_PROT
377/*
378 * For the sub-page protection option, we extend the PGD with one of
379 * these. Basically we have a 3-level tree, with the top level being
380 * the protptrs array. To optimize speed and memory consumption when
381 * only addresses < 4GB are being protected, pointers to the first
382 * four pages of sub-page protection words are stored in the low_prot
383 * array.
384 * Each page of sub-page protection words protects 1GB (4 bytes
385 * protects 64k). For the 3-level tree, each page of pointers then
386 * protects 8TB.
387 */
388struct subpage_prot_table {
389 unsigned long maxaddr; /* only addresses < this are protected */
390 unsigned int **protptrs[2];
391 unsigned int *low_prot[4];
392};
393
394#define SBP_L1_BITS (PAGE_SHIFT - 2)
395#define SBP_L2_BITS (PAGE_SHIFT - 3)
396#define SBP_L1_COUNT (1 << SBP_L1_BITS)
397#define SBP_L2_COUNT (1 << SBP_L2_BITS)
398#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
399#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
400
401extern void subpage_prot_free(struct mm_struct *mm);
402extern void subpage_prot_init_new_context(struct mm_struct *mm);
403#else
404static inline void subpage_prot_free(struct mm_struct *mm) {}
405static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
406#endif /* CONFIG_PPC_SUBPAGE_PROT */
407
383typedef unsigned long mm_context_id_t; 408typedef unsigned long mm_context_id_t;
384 409
385typedef struct { 410typedef struct {
@@ -393,6 +418,9 @@ typedef struct {
393 u16 sllp; /* SLB page size encoding */ 418 u16 sllp; /* SLB page size encoding */
394#endif 419#endif
395 unsigned long vdso_base; 420 unsigned long vdso_base;
421#ifdef CONFIG_PPC_SUBPAGE_PROT
422 struct subpage_prot_table spt;
423#endif /* CONFIG_PPC_SUBPAGE_PROT */
396} mm_context_t; 424} mm_context_t;
397 425
398 426
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b34e94d94435..26383e0778aa 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -23,6 +23,8 @@ extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
23extern void set_context(unsigned long id, pgd_t *pgd); 23extern void set_context(unsigned long id, pgd_t *pgd);
24 24
25#ifdef CONFIG_PPC_BOOK3S_64 25#ifdef CONFIG_PPC_BOOK3S_64
26extern int __init_new_context(void);
27extern void __destroy_context(int context_id);
26static inline void mmu_context_init(void) { } 28static inline void mmu_context_init(void) { }
27#else 29#else
28extern void mmu_context_init(void); 30extern void mmu_context_init(void);
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 1b4f697abbdd..b664ce79a172 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -276,6 +276,53 @@ extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
276extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node); 276extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
277extern void mpc52xx_restart(char *cmd); 277extern void mpc52xx_restart(char *cmd);
278 278
279/* mpc52xx_gpt.c */
280struct mpc52xx_gpt_priv;
281extern struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq);
282extern int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
283 int continuous);
284extern u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt);
285extern int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt);
286
287/* mpc52xx_lpbfifo.c */
288#define MPC52XX_LPBFIFO_FLAG_READ (0)
289#define MPC52XX_LPBFIFO_FLAG_WRITE (1<<0)
290#define MPC52XX_LPBFIFO_FLAG_NO_INCREMENT (1<<1)
291#define MPC52XX_LPBFIFO_FLAG_NO_DMA (1<<2)
292#define MPC52XX_LPBFIFO_FLAG_POLL_DMA (1<<3)
293
294struct mpc52xx_lpbfifo_request {
295 struct list_head list;
296
297 /* localplus bus address */
298 unsigned int cs;
299 size_t offset;
300
301 /* Memory address */
302 void *data;
303 phys_addr_t data_phys;
304
305 /* Details of transfer */
306 size_t size;
307 size_t pos; /* current position of transfer */
308 int flags;
309
310 /* What to do when finished */
311 void (*callback)(struct mpc52xx_lpbfifo_request *);
312
313 void *priv; /* Driver private data */
314
315 /* statistics */
316 int irq_count;
317 int irq_ticks;
318 u8 last_byte;
319 int buffer_not_done_cnt;
320};
321
322extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
323extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
324extern void mpc52xx_lpbfifo_poll(void);
325
279/* mpc52xx_pic.c */ 326/* mpc52xx_pic.c */
280extern void mpc52xx_init_irq(void); 327extern void mpc52xx_init_irq(void);
281extern unsigned int mpc52xx_get_irq(void); 328extern unsigned int mpc52xx_get_irq(void);
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 6c587eddee59..850b72f27445 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -73,7 +73,6 @@ extern int nvram_write_error_log(char * buff, int length,
73extern int nvram_read_error_log(char * buff, int length, 73extern int nvram_read_error_log(char * buff, int length,
74 unsigned int * err_type, unsigned int *err_seq); 74 unsigned int * err_type, unsigned int *err_seq);
75extern int nvram_clear_error_log(void); 75extern int nvram_clear_error_log(void);
76extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
77 76
78extern int pSeries_nvram_init(void); 77extern int pSeries_nvram_init(void);
79 78
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
index e482e5352e69..d4b4bfa26fb3 100644
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ b/arch/powerpc/include/asm/pSeries_reconfig.h
@@ -17,6 +17,7 @@
17#ifdef CONFIG_PPC_PSERIES 17#ifdef CONFIG_PPC_PSERIES
18extern int pSeries_reconfig_notifier_register(struct notifier_block *); 18extern int pSeries_reconfig_notifier_register(struct notifier_block *);
19extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); 19extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
20extern struct blocking_notifier_head pSeries_reconfig_chain;
20#else /* !CONFIG_PPC_PSERIES */ 21#else /* !CONFIG_PPC_PSERIES */
21static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb) 22static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb)
22{ 23{
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 7d8514ceceae..5e9b4ef71415 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -129,6 +129,15 @@ struct paca_struct {
129 u64 system_time; /* accumulated system TB ticks */ 129 u64 system_time; /* accumulated system TB ticks */
130 u64 startpurr; /* PURR/TB value snapshot */ 130 u64 startpurr; /* PURR/TB value snapshot */
131 u64 startspurr; /* SPURR value snapshot */ 131 u64 startspurr; /* SPURR value snapshot */
132
133#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
134 struct {
135 u64 esid;
136 u64 vsid;
137 } kvm_slb[64]; /* guest SLB */
138 u8 kvm_slb_max; /* highest used guest slb entry */
139 u8 kvm_in_guest; /* are we inside the guest? */
140#endif
132}; 141};
133 142
134extern struct paca_struct paca[]; 143extern struct paca_struct paca[];
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index ff24254990e1..e96d52a516ba 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -229,6 +229,20 @@ typedef unsigned long pgprot_t;
229 229
230#endif 230#endif
231 231
232typedef struct { signed long pd; } hugepd_t;
233#define HUGEPD_SHIFT_MASK 0x3f
234
235#ifdef CONFIG_HUGETLB_PAGE
236static inline int hugepd_ok(hugepd_t hpd)
237{
238 return (hpd.pd > 0);
239}
240
241#define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep))))
242#else /* CONFIG_HUGETLB_PAGE */
243#define is_hugepd(pdep) 0
244#endif /* CONFIG_HUGETLB_PAGE */
245
232struct page; 246struct page;
233extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); 247extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
234extern void copy_user_page(void *to, void *from, unsigned long vaddr, 248extern void copy_user_page(void *to, void *from, unsigned long vaddr,
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 3f17b83f55a1..bfc4e027e2ad 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -90,7 +90,7 @@ extern unsigned int HPAGE_SHIFT;
90#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 90#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
91#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 91#define HPAGE_MASK (~(HPAGE_SIZE - 1))
92#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 92#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
93#define HUGE_MAX_HSTATE 3 93#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
94 94
95#endif /* __ASSEMBLY__ */ 95#endif /* __ASSEMBLY__ */
96 96
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index c9500d666a1d..580cf73b96e8 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -3,7 +3,8 @@
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5 5
6#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */ 6/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
7#define MAX_PGTABLE_INDEX_SIZE 0
7 8
8extern void __bad_pte(pmd_t *pmd); 9extern void __bad_pte(pmd_t *pmd);
9 10
@@ -36,11 +37,10 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
36extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); 37extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
37extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); 38extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
38 39
39static inline void pgtable_free(pgtable_free_t pgf) 40static inline void pgtable_free(void *table, unsigned index_size)
40{ 41{
41 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 42 BUG_ON(index_size); /* 32-bit doesn't use this */
42 43 free_page((unsigned long)table);
43 free_page((unsigned long)p);
44} 44}
45 45
46#define check_pgt_cache() do { } while (0) 46#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index e6f069c4f713..605f5c5398d1 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -11,27 +11,34 @@
11#include <linux/cpumask.h> 11#include <linux/cpumask.h>
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13 13
14#ifndef CONFIG_PPC_SUBPAGE_PROT 14/*
15static inline void subpage_prot_free(pgd_t *pgd) {} 15 * Functions that deal with pagetables that could be at any level of
16#endif 16 * the table need to be passed an "index_size" so they know how to
17 * handle allocation. For PTE pages (which are linked to a struct
18 * page for now, and drawn from the main get_free_pages() pool), the
19 * allocation size will be (2^index_size * sizeof(pointer)) and
20 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
21 *
22 * The maximum index size needs to be big enough to allow any
23 * pagetable sizes we need, but small enough to fit in the low bits of
24 * any page table pointer. In other words all pagetables, even tiny
25 * ones, must be aligned to allow at least enough low 0 bits to
26 * contain this value. This value is also used as a mask, so it must
27 * be one less than a power of two.
28 */
29#define MAX_PGTABLE_INDEX_SIZE 0xf
17 30
18extern struct kmem_cache *pgtable_cache[]; 31extern struct kmem_cache *pgtable_cache[];
19 32#define PGT_CACHE(shift) (pgtable_cache[(shift)-1])
20#define PGD_CACHE_NUM 0
21#define PUD_CACHE_NUM 1
22#define PMD_CACHE_NUM 1
23#define HUGEPTE_CACHE_NUM 2
24#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */
25 33
26static inline pgd_t *pgd_alloc(struct mm_struct *mm) 34static inline pgd_t *pgd_alloc(struct mm_struct *mm)
27{ 35{
28 return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL); 36 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
29} 37}
30 38
31static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 39static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
32{ 40{
33 subpage_prot_free(pgd); 41 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
34 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
35} 42}
36 43
37#ifndef CONFIG_PPC_64K_PAGES 44#ifndef CONFIG_PPC_64K_PAGES
@@ -40,13 +47,13 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
40 47
41static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 48static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
42{ 49{
43 return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM], 50 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
44 GFP_KERNEL|__GFP_REPEAT); 51 GFP_KERNEL|__GFP_REPEAT);
45} 52}
46 53
47static inline void pud_free(struct mm_struct *mm, pud_t *pud) 54static inline void pud_free(struct mm_struct *mm, pud_t *pud)
48{ 55{
49 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); 56 kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
50} 57}
51 58
52static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 59static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -78,13 +85,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
78 85
79static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 86static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
80{ 87{
81 return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM], 88 return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
82 GFP_KERNEL|__GFP_REPEAT); 89 GFP_KERNEL|__GFP_REPEAT);
83} 90}
84 91
85static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 92static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
86{ 93{
87 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); 94 kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
88} 95}
89 96
90static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 97static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -107,24 +114,22 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
107 return page; 114 return page;
108} 115}
109 116
110static inline void pgtable_free(pgtable_free_t pgf) 117static inline void pgtable_free(void *table, unsigned index_size)
111{ 118{
112 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 119 if (!index_size)
113 int cachenum = pgf.val & PGF_CACHENUM_MASK; 120 free_page((unsigned long)table);
114 121 else {
115 if (cachenum == PTE_NONCACHE_NUM) 122 BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
116 free_page((unsigned long)p); 123 kmem_cache_free(PGT_CACHE(index_size), table);
117 else 124 }
118 kmem_cache_free(pgtable_cache[cachenum], p);
119} 125}
120 126
121#define __pmd_free_tlb(tlb, pmd,addr) \ 127#define __pmd_free_tlb(tlb, pmd, addr) \
122 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 128 pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE)
123 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
124#ifndef CONFIG_PPC_64K_PAGES 129#ifndef CONFIG_PPC_64K_PAGES
125#define __pud_free_tlb(tlb, pud, addr) \ 130#define __pud_free_tlb(tlb, pud, addr) \
126 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 131 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
127 PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 132
128#endif /* CONFIG_PPC_64K_PAGES */ 133#endif /* CONFIG_PPC_64K_PAGES */
129 134
130#define check_pgt_cache() do { } while (0) 135#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index f2e812de7c3c..abe8532bd14e 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -24,25 +24,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
24 __free_page(ptepage); 24 __free_page(ptepage);
25} 25}
26 26
27typedef struct pgtable_free {
28 unsigned long val;
29} pgtable_free_t;
30
31/* This needs to be big enough to allow for MMU_PAGE_COUNT + 2 to be stored
32 * and small enough to fit in the low bits of any naturally aligned page
33 * table cache entry. Arbitrarily set to 0x1f, that should give us some
34 * room to grow
35 */
36#define PGF_CACHENUM_MASK 0x1f
37
38static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
39 unsigned long mask)
40{
41 BUG_ON(cachenum > PGF_CACHENUM_MASK);
42
43 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
44}
45
46#ifdef CONFIG_PPC64 27#ifdef CONFIG_PPC64
47#include <asm/pgalloc-64.h> 28#include <asm/pgalloc-64.h>
48#else 29#else
@@ -50,12 +31,12 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
50#endif 31#endif
51 32
52#ifdef CONFIG_SMP 33#ifdef CONFIG_SMP
53extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 34extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift);
54extern void pte_free_finish(void); 35extern void pte_free_finish(void);
55#else /* CONFIG_SMP */ 36#else /* CONFIG_SMP */
56static inline void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) 37static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
57{ 38{
58 pgtable_free(pgf); 39 pgtable_free(table, shift);
59} 40}
60static inline void pte_free_finish(void) { } 41static inline void pte_free_finish(void) { }
61#endif /* !CONFIG_SMP */ 42#endif /* !CONFIG_SMP */
@@ -63,12 +44,9 @@ static inline void pte_free_finish(void) { }
63static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, 44static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
64 unsigned long address) 45 unsigned long address)
65{ 46{
66 pgtable_free_t pgf = pgtable_free_cache(page_address(ptepage),
67 PTE_NONCACHE_NUM,
68 PTE_TABLE_SIZE-1);
69 tlb_flush_pgtable(tlb, address); 47 tlb_flush_pgtable(tlb, address);
70 pgtable_page_dtor(ptepage); 48 pgtable_page_dtor(ptepage);
71 pgtable_free_tlb(tlb, pgf); 49 pgtable_free_tlb(tlb, page_address(ptepage), 0);
72} 50}
73 51
74#endif /* __KERNEL__ */ 52#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 806abe7a3fa5..49865045d56f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -354,6 +354,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
354#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) 354#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
355#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) 355#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
356 356
357void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
357void pgtable_cache_init(void); 358void pgtable_cache_init(void);
358 359
359/* 360/*
@@ -378,7 +379,18 @@ void pgtable_cache_init(void);
378 return pt; 379 return pt;
379} 380}
380 381
381pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long address); 382#ifdef CONFIG_HUGETLB_PAGE
383pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
384 unsigned *shift);
385#else
386static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
387 unsigned *shift)
388{
389 if (shift)
390 *shift = 0;
391 return find_linux_pte(pgdir, ea);
392}
393#endif /* !CONFIG_HUGETLB_PAGE */
382 394
383#endif /* __ASSEMBLY__ */ 395#endif /* __ASSEMBLY__ */
384 396
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 2a5da069714e..21207e54825b 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -211,6 +211,9 @@ extern void paging_init(void);
211 */ 211 */
212extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 212extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
213 213
214extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
215 unsigned long end, int write, struct page **pages, int *nr);
216
214#endif /* __ASSEMBLY__ */ 217#endif /* __ASSEMBLY__ */
215 218
216#endif /* __KERNEL__ */ 219#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
index dd5ea95fe61e..d44826e4ff97 100644
--- a/arch/powerpc/include/asm/pte-8xx.h
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -33,21 +33,21 @@
33#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ 33#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
34#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ 34#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
35#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */ 35#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
36#define _PAGE_DIRTY 0x0100 /* C: page changed */
36 37
37/* These five software bits must be masked out when the entry is loaded 38/* These 4 software bits must be masked out when the entry is loaded
38 * into the TLB. 39 * into the TLB, 1 SW bit left(0x0080).
39 */ 40 */
40#define _PAGE_GUARDED 0x0010 /* software: guarded access */ 41#define _PAGE_GUARDED 0x0010 /* software: guarded access */
41#define _PAGE_DIRTY 0x0020 /* software: page changed */ 42#define _PAGE_ACCESSED 0x0020 /* software: page referenced */
42#define _PAGE_RW 0x0040 /* software: user write access allowed */ 43#define _PAGE_WRITETHRU 0x0040 /* software: caching is write through */
43#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
44 44
45/* Setting any bits in the nibble with the follow two controls will 45/* Setting any bits in the nibble with the follow two controls will
46 * require a TLB exception handler change. It is assumed unused bits 46 * require a TLB exception handler change. It is assumed unused bits
47 * are always zero. 47 * are always zero.
48 */ 48 */
49#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */ 49#define _PAGE_RW 0x0400 /* lsb PP bits, inverted in HW */
50#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */ 50#define _PAGE_USER 0x0800 /* msb PP bits */
51 51
52#define _PMD_PRESENT 0x0001 52#define _PMD_PRESENT 0x0001
53#define _PMD_BAD 0x0ff0 53#define _PMD_BAD 0x0ff0
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 82b72207c51c..c4490f9c67c4 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -76,41 +76,4 @@
76 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ 76 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
77 __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) 77 __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
78 78
79
80#ifdef CONFIG_PPC_SUBPAGE_PROT
81/*
82 * For the sub-page protection option, we extend the PGD with one of
83 * these. Basically we have a 3-level tree, with the top level being
84 * the protptrs array. To optimize speed and memory consumption when
85 * only addresses < 4GB are being protected, pointers to the first
86 * four pages of sub-page protection words are stored in the low_prot
87 * array.
88 * Each page of sub-page protection words protects 1GB (4 bytes
89 * protects 64k). For the 3-level tree, each page of pointers then
90 * protects 8TB.
91 */
92struct subpage_prot_table {
93 unsigned long maxaddr; /* only addresses < this are protected */
94 unsigned int **protptrs[2];
95 unsigned int *low_prot[4];
96};
97
98#undef PGD_TABLE_SIZE
99#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
100 sizeof(struct subpage_prot_table))
101
102#define SBP_L1_BITS (PAGE_SHIFT - 2)
103#define SBP_L2_BITS (PAGE_SHIFT - 3)
104#define SBP_L1_COUNT (1 << SBP_L1_BITS)
105#define SBP_L2_COUNT (1 << SBP_L2_BITS)
106#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
107#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
108
109extern void subpage_prot_free(pgd_t *pgd);
110
111static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
112{
113 return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
114}
115#endif /* CONFIG_PPC_SUBPAGE_PROT */
116#endif /* __ASSEMBLY__ */ 79#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index f388f0ab193f..0947b36e534c 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -87,7 +87,7 @@ extern spinlock_t cmxgcr_lock;
87 87
88/* Export QE common operations */ 88/* Export QE common operations */
89#ifdef CONFIG_QUICC_ENGINE 89#ifdef CONFIG_QUICC_ENGINE
90extern void __init qe_reset(void); 90extern void qe_reset(void);
91#else 91#else
92static inline void qe_reset(void) {} 92static inline void qe_reset(void) {}
93#endif 93#endif
@@ -145,8 +145,17 @@ static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {}
145static inline void qe_pin_set_dedicated(struct qe_pin *pin) {} 145static inline void qe_pin_set_dedicated(struct qe_pin *pin) {}
146#endif /* CONFIG_QE_GPIO */ 146#endif /* CONFIG_QE_GPIO */
147 147
148/* QE internal API */ 148#ifdef CONFIG_QUICC_ENGINE
149int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input); 149int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
150#else
151static inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol,
152 u32 cmd_input)
153{
154 return -ENOSYS;
155}
156#endif /* CONFIG_QUICC_ENGINE */
157
158/* QE internal API */
150enum qe_clock qe_clock_source(const char *source); 159enum qe_clock qe_clock_source(const char *source);
151unsigned int qe_get_brg_clk(void); 160unsigned int qe_get_brg_clk(void);
152int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier); 161int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier);
@@ -154,7 +163,28 @@ int qe_get_snum(void);
154void qe_put_snum(u8 snum); 163void qe_put_snum(u8 snum);
155unsigned int qe_get_num_of_risc(void); 164unsigned int qe_get_num_of_risc(void);
156unsigned int qe_get_num_of_snums(void); 165unsigned int qe_get_num_of_snums(void);
157int qe_alive_during_sleep(void); 166
167static inline int qe_alive_during_sleep(void)
168{
169 /*
170 * MPC8568E reference manual says:
171 *
172 * "...power down sequence waits for all I/O interfaces to become idle.
173 * In some applications this may happen eventually without actively
174 * shutting down interfaces, but most likely, software will have to
175 * take steps to shut down the eTSEC, QUICC Engine Block, and PCI
176 * interfaces before issuing the command (either the write to the core
177 * MSR[WE] as described above or writing to POWMGTCSR) to put the
178 * device into sleep state."
179 *
180 * MPC8569E reference manual has a similar paragraph.
181 */
182#ifdef CONFIG_PPC_85xx
183 return 0;
184#else
185 return 1;
186#endif
187}
158 188
159/* we actually use cpm_muram implementation, define this for convenience */ 189/* we actually use cpm_muram implementation, define this for convenience */
160#define qe_muram_init cpm_muram_init 190#define qe_muram_init cpm_muram_init
@@ -210,8 +240,15 @@ struct qe_firmware_info {
210 u64 extended_modes; /* Extended modes */ 240 u64 extended_modes; /* Extended modes */
211}; 241};
212 242
243#ifdef CONFIG_QUICC_ENGINE
213/* Upload a firmware to the QE */ 244/* Upload a firmware to the QE */
214int qe_upload_firmware(const struct qe_firmware *firmware); 245int qe_upload_firmware(const struct qe_firmware *firmware);
246#else
247static inline int qe_upload_firmware(const struct qe_firmware *firmware)
248{
249 return -ENOSYS;
250}
251#endif /* CONFIG_QUICC_ENGINE */
215 252
216/* Obtain information on the uploaded firmware */ 253/* Obtain information on the uploaded firmware */
217struct qe_firmware_info *qe_get_firmware_info(void); 254struct qe_firmware_info *qe_get_firmware_info(void);
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index c7d671a7d9a1..07d2d19ab5e9 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -145,7 +145,7 @@ SYSCALL_SPU(setfsuid)
145SYSCALL_SPU(setfsgid) 145SYSCALL_SPU(setfsgid)
146SYSCALL_SPU(llseek) 146SYSCALL_SPU(llseek)
147COMPAT_SYS_SPU(getdents) 147COMPAT_SYS_SPU(getdents)
148SYSX_SPU(sys_select,ppc32_select,ppc_select) 148SYSX_SPU(sys_select,ppc32_select,sys_select)
149SYSCALL_SPU(flock) 149SYSCALL_SPU(flock)
150SYSCALL_SPU(msync) 150SYSCALL_SPU(msync)
151COMPAT_SYS_SPU(readv) 151COMPAT_SYS_SPU(readv)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b23664a0b86c..c002b0410219 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -42,10 +42,11 @@ obj-$(CONFIG_ALTIVEC) += vecemu.o
42obj-$(CONFIG_PPC_970_NAP) += idle_power4.o 42obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
43obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o 43obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o
44obj-$(CONFIG_PPC_CLOCK) += clock.o 44obj-$(CONFIG_PPC_CLOCK) += clock.o
45procfs-$(CONFIG_PPC64) := proc_ppc64.o 45procfs-y := proc_powerpc.o
46obj-$(CONFIG_PROC_FS) += $(procfs-y) 46obj-$(CONFIG_PROC_FS) += $(procfs-y)
47rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o 47rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
48obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y) 48obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y)
49obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o
49obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o 50obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
50obj-$(CONFIG_RTAS_PROC) += rtas-proc.o 51obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
51obj-$(CONFIG_LPARCFG) += lparcfg.o 52obj-$(CONFIG_LPARCFG) += lparcfg.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0812b0f414bb..a6c2b63227b3 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -190,6 +190,11 @@ int main(void)
190 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 190 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
191 DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); 191 DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
192 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 192 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
193#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
194 DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
195 DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
196 DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
197#endif
193#endif /* CONFIG_PPC64 */ 198#endif /* CONFIG_PPC64 */
194 199
195 /* RTAS */ 200 /* RTAS */
@@ -398,14 +403,24 @@ int main(void)
398 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 403 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
399 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 404 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
400 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); 405 DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
406
407 /* book3s_64 */
408#ifdef CONFIG_PPC64
409 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
410 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
411 DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
412 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
413 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
414 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
415 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
416 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
417 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
418#endif
401#endif 419#endif
402#ifdef CONFIG_44x 420#ifdef CONFIG_44x
403 DEFINE(PGD_T_LOG2, PGD_T_LOG2); 421 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
404 DEFINE(PTE_T_LOG2, PTE_T_LOG2); 422 DEFINE(PTE_T_LOG2, PTE_T_LOG2);
405#endif 423#endif
406#ifdef CONFIG_FSL_BOOKE
407 DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
408#endif
409 424
410#ifdef CONFIG_KVM_EXIT_TIMING 425#ifdef CONFIG_KVM_EXIT_TIMING
411 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, 426 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 0a8439aafdd1..6f4613dd05ef 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -373,7 +373,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
373 hard_irq_disable(); 373 hard_irq_disable();
374 374
375 for_each_irq(i) { 375 for_each_irq(i) {
376 struct irq_desc *desc = irq_desc + i; 376 struct irq_desc *desc = irq_to_desc(i);
377 377
378 if (desc->status & IRQ_INPROGRESS) 378 if (desc->status & IRQ_INPROGRESS)
379 desc->chip->eoi(i); 379 desc->chip->eoi(i);
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index e96cbbd9b449..59c928564a03 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -21,7 +21,6 @@
21#include <asm/dma.h> 21#include <asm/dma.h>
22#include <asm/abs_addr.h> 22#include <asm/abs_addr.h>
23 23
24int swiotlb __read_mostly;
25unsigned int ppc_swiotlb_enable; 24unsigned int ppc_swiotlb_enable;
26 25
27/* 26/*
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c7eb4e0eb86c..e3be98ffe2a7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -41,6 +41,7 @@ __start_interrupts:
41 . = 0x200 41 . = 0x200
42_machine_check_pSeries: 42_machine_check_pSeries:
43 HMT_MEDIUM 43 HMT_MEDIUM
44 DO_KVM 0x200
44 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ 45 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
45 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 46 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
46 47
@@ -48,6 +49,7 @@ _machine_check_pSeries:
48 .globl data_access_pSeries 49 .globl data_access_pSeries
49data_access_pSeries: 50data_access_pSeries:
50 HMT_MEDIUM 51 HMT_MEDIUM
52 DO_KVM 0x300
51 mtspr SPRN_SPRG_SCRATCH0,r13 53 mtspr SPRN_SPRG_SCRATCH0,r13
52BEGIN_FTR_SECTION 54BEGIN_FTR_SECTION
53 mfspr r13,SPRN_SPRG_PACA 55 mfspr r13,SPRN_SPRG_PACA
@@ -77,6 +79,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
77 .globl data_access_slb_pSeries 79 .globl data_access_slb_pSeries
78data_access_slb_pSeries: 80data_access_slb_pSeries:
79 HMT_MEDIUM 81 HMT_MEDIUM
82 DO_KVM 0x380
80 mtspr SPRN_SPRG_SCRATCH0,r13 83 mtspr SPRN_SPRG_SCRATCH0,r13
81 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ 84 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
82 std r3,PACA_EXSLB+EX_R3(r13) 85 std r3,PACA_EXSLB+EX_R3(r13)
@@ -115,6 +118,7 @@ data_access_slb_pSeries:
115 .globl instruction_access_slb_pSeries 118 .globl instruction_access_slb_pSeries
116instruction_access_slb_pSeries: 119instruction_access_slb_pSeries:
117 HMT_MEDIUM 120 HMT_MEDIUM
121 DO_KVM 0x480
118 mtspr SPRN_SPRG_SCRATCH0,r13 122 mtspr SPRN_SPRG_SCRATCH0,r13
119 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ 123 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
120 std r3,PACA_EXSLB+EX_R3(r13) 124 std r3,PACA_EXSLB+EX_R3(r13)
@@ -154,6 +158,7 @@ instruction_access_slb_pSeries:
154 .globl system_call_pSeries 158 .globl system_call_pSeries
155system_call_pSeries: 159system_call_pSeries:
156 HMT_MEDIUM 160 HMT_MEDIUM
161 DO_KVM 0xc00
157BEGIN_FTR_SECTION 162BEGIN_FTR_SECTION
158 cmpdi r0,0x1ebe 163 cmpdi r0,0x1ebe
159 beq- 1f 164 beq- 1f
@@ -187,14 +192,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
187 */ 192 */
188performance_monitor_pSeries_1: 193performance_monitor_pSeries_1:
189 . = 0xf00 194 . = 0xf00
195 DO_KVM 0xf00
190 b performance_monitor_pSeries 196 b performance_monitor_pSeries
191 197
192altivec_unavailable_pSeries_1: 198altivec_unavailable_pSeries_1:
193 . = 0xf20 199 . = 0xf20
200 DO_KVM 0xf20
194 b altivec_unavailable_pSeries 201 b altivec_unavailable_pSeries
195 202
196vsx_unavailable_pSeries_1: 203vsx_unavailable_pSeries_1:
197 . = 0xf40 204 . = 0xf40
205 DO_KVM 0xf40
198 b vsx_unavailable_pSeries 206 b vsx_unavailable_pSeries
199 207
200#ifdef CONFIG_CBE_RAS 208#ifdef CONFIG_CBE_RAS
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index c38afdb45d7b..925807488022 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -37,6 +37,7 @@
37#include <asm/firmware.h> 37#include <asm/firmware.h>
38#include <asm/page_64.h> 38#include <asm/page_64.h>
39#include <asm/irqflags.h> 39#include <asm/irqflags.h>
40#include <asm/kvm_book3s_64_asm.h>
40 41
41/* The physical memory is layed out such that the secondary processor 42/* The physical memory is layed out such that the secondary processor
42 * spin code sits at 0x0000...0x00ff. On server, the vectors follow 43 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -165,6 +166,12 @@ exception_marker:
165#include "exceptions-64s.S" 166#include "exceptions-64s.S"
166#endif 167#endif
167 168
169/* KVM trampoline code needs to be close to the interrupt handlers */
170
171#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
172#include "../kvm/book3s_64_rmhandlers.S"
173#endif
174
168_GLOBAL(generic_secondary_thread_init) 175_GLOBAL(generic_secondary_thread_init)
169 mr r24,r3 176 mr r24,r3
170 177
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 6ded19d01891..678f98cd5e64 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -206,6 +206,8 @@ MachineCheck:
206 EXCEPTION_PROLOG 206 EXCEPTION_PROLOG
207 mfspr r4,SPRN_DAR 207 mfspr r4,SPRN_DAR
208 stw r4,_DAR(r11) 208 stw r4,_DAR(r11)
209 li r5,0x00f0
210 mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */
209 mfspr r5,SPRN_DSISR 211 mfspr r5,SPRN_DSISR
210 stw r5,_DSISR(r11) 212 stw r5,_DSISR(r11)
211 addi r3,r1,STACK_FRAME_OVERHEAD 213 addi r3,r1,STACK_FRAME_OVERHEAD
@@ -222,6 +224,8 @@ DataAccess:
222 stw r10,_DSISR(r11) 224 stw r10,_DSISR(r11)
223 mr r5,r10 225 mr r5,r10
224 mfspr r4,SPRN_DAR 226 mfspr r4,SPRN_DAR
227 li r10,0x00f0
228 mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
225 EXC_XFER_EE_LITE(0x300, handle_page_fault) 229 EXC_XFER_EE_LITE(0x300, handle_page_fault)
226 230
227/* Instruction access exception. 231/* Instruction access exception.
@@ -244,6 +248,8 @@ Alignment:
244 EXCEPTION_PROLOG 248 EXCEPTION_PROLOG
245 mfspr r4,SPRN_DAR 249 mfspr r4,SPRN_DAR
246 stw r4,_DAR(r11) 250 stw r4,_DAR(r11)
251 li r5,0x00f0
252 mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */
247 mfspr r5,SPRN_DSISR 253 mfspr r5,SPRN_DSISR
248 stw r5,_DSISR(r11) 254 stw r5,_DSISR(r11)
249 addi r3,r1,STACK_FRAME_OVERHEAD 255 addi r3,r1,STACK_FRAME_OVERHEAD
@@ -333,26 +339,20 @@ InstructionTLBMiss:
333 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ 339 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
334 lwz r10, 0(r11) /* Get the pte */ 340 lwz r10, 0(r11) /* Get the pte */
335 341
336#ifdef CONFIG_SWAP 342 andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
337 /* do not set the _PAGE_ACCESSED bit of a non-present page */ 343 cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
338 andi. r11, r10, _PAGE_PRESENT 344 bne- cr0, 2f
339 beq 4f 345
340 ori r10, r10, _PAGE_ACCESSED 346 /* Clear PP lsb, 0x400 */
341 mfspr r11, SPRN_MD_TWC /* get the pte address again */ 347 rlwinm r10, r10, 0, 22, 20
342 stw r10, 0(r11)
3434:
344#else
345 ori r10, r10, _PAGE_ACCESSED
346 stw r10, 0(r11)
347#endif
348 348
349 /* The Linux PTE won't go exactly into the MMU TLB. 349 /* The Linux PTE won't go exactly into the MMU TLB.
350 * Software indicator bits 21, 22 and 28 must be clear. 350 * Software indicator bits 22 and 28 must be clear.
351 * Software indicator bits 24, 25, 26, and 27 must be 351 * Software indicator bits 24, 25, 26, and 27 must be
352 * set. All other Linux PTE bits control the behavior 352 * set. All other Linux PTE bits control the behavior
353 * of the MMU. 353 * of the MMU.
354 */ 354 */
3552: li r11, 0x00f0 355 li r11, 0x00f0
356 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ 356 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
357 DO_8xx_CPU6(0x2d80, r3) 357 DO_8xx_CPU6(0x2d80, r3)
358 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ 358 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
@@ -365,6 +365,22 @@ InstructionTLBMiss:
365 lwz r3, 8(r0) 365 lwz r3, 8(r0)
366#endif 366#endif
367 rfi 367 rfi
3682:
369 mfspr r11, SPRN_SRR1
370 /* clear all error bits as TLB Miss
371 * sets a few unconditionally
372 */
373 rlwinm r11, r11, 0, 0xffff
374 mtspr SPRN_SRR1, r11
375
376 mfspr r10, SPRN_M_TW /* Restore registers */
377 lwz r11, 0(r0)
378 mtcr r11
379 lwz r11, 4(r0)
380#ifdef CONFIG_8xx_CPU6
381 lwz r3, 8(r0)
382#endif
383 b InstructionAccess
368 384
369 . = 0x1200 385 . = 0x1200
370DataStoreTLBMiss: 386DataStoreTLBMiss:
@@ -406,29 +422,45 @@ DataStoreTLBMiss:
406 * above. 422 * above.
407 */ 423 */
408 rlwimi r11, r10, 0, 27, 27 424 rlwimi r11, r10, 0, 27, 27
425 /* Insert the WriteThru flag into the TWC from the Linux PTE.
426 * It is bit 25 in the Linux PTE and bit 30 in the TWC
427 */
428 rlwimi r11, r10, 32-5, 30, 30
409 DO_8xx_CPU6(0x3b80, r3) 429 DO_8xx_CPU6(0x3b80, r3)
410 mtspr SPRN_MD_TWC, r11 430 mtspr SPRN_MD_TWC, r11
411 431
412#ifdef CONFIG_SWAP 432 /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
413 /* do not set the _PAGE_ACCESSED bit of a non-present page */ 433 * We also need to know if the insn is a load/store, so:
414 andi. r11, r10, _PAGE_PRESENT 434 * Clear _PAGE_PRESENT and load that which will
415 beq 4f 435 * trap into DTLB Error with store bit set accordinly.
416 ori r10, r10, _PAGE_ACCESSED 436 */
4174: 437 /* PRESENT=0x1, ACCESSED=0x20
418 /* and update pte in table */ 438 * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
419#else 439 * r10 = (r10 & ~PRESENT) | r11;
420 ori r10, r10, _PAGE_ACCESSED 440 */
421#endif 441 rlwinm r11, r10, 32-5, _PAGE_PRESENT
422 mfspr r11, SPRN_MD_TWC /* get the pte address again */ 442 and r11, r11, r10
423 stw r10, 0(r11) 443 rlwimi r10, r11, 0, _PAGE_PRESENT
444
445 /* Honour kernel RO, User NA */
446 /* 0x200 == Extended encoding, bit 22 */
447 /* r11 = (r10 & _PAGE_USER) >> 2 */
448 rlwinm r11, r10, 32-2, 0x200
449 or r10, r11, r10
450 /* r11 = (r10 & _PAGE_RW) >> 1 */
451 rlwinm r11, r10, 32-1, 0x200
452 or r10, r11, r10
453 /* invert RW and 0x200 bits */
454 xori r10, r10, _PAGE_RW | 0x200
424 455
425 /* The Linux PTE won't go exactly into the MMU TLB. 456 /* The Linux PTE won't go exactly into the MMU TLB.
426 * Software indicator bits 21, 22 and 28 must be clear. 457 * Software indicator bits 22 and 28 must be clear.
427 * Software indicator bits 24, 25, 26, and 27 must be 458 * Software indicator bits 24, 25, 26, and 27 must be
428 * set. All other Linux PTE bits control the behavior 459 * set. All other Linux PTE bits control the behavior
429 * of the MMU. 460 * of the MMU.
430 */ 461 */
4312: li r11, 0x00f0 4622: li r11, 0x00f0
463 mtspr SPRN_DAR,r11 /* Tag DAR */
432 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ 464 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
433 DO_8xx_CPU6(0x3d80, r3) 465 DO_8xx_CPU6(0x3d80, r3)
434 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ 466 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
@@ -469,97 +501,10 @@ DataTLBError:
469 stw r10, 0(r0) 501 stw r10, 0(r0)
470 stw r11, 4(r0) 502 stw r11, 4(r0)
471 503
472 /* First, make sure this was a store operation.
473 */
474 mfspr r10, SPRN_DSISR
475 andis. r11, r10, 0x0200 /* If set, indicates store op */
476 beq 2f
477
478 /* The EA of a data TLB miss is automatically stored in the MD_EPN
479 * register. The EA of a data TLB error is automatically stored in
480 * the DAR, but not the MD_EPN register. We must copy the 20 most
481 * significant bits of the EA from the DAR to MD_EPN before we
482 * start walking the page tables. We also need to copy the CASID
483 * value from the M_CASID register.
484 * Addendum: The EA of a data TLB error is _supposed_ to be stored
485 * in DAR, but it seems that this doesn't happen in some cases, such
486 * as when the error is due to a dcbi instruction to a page with a
487 * TLB that doesn't have the changed bit set. In such cases, there
488 * does not appear to be any way to recover the EA of the error
489 * since it is neither in DAR nor MD_EPN. As a workaround, the
490 * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
491 * are initialized in mapin_ram(). This will avoid the problem,
492 * assuming we only use the dcbi instruction on kernel addresses.
493 */
494 mfspr r10, SPRN_DAR 504 mfspr r10, SPRN_DAR
495 rlwinm r11, r10, 0, 0, 19 505 cmpwi cr0, r10, 0x00f0
496 ori r11, r11, MD_EVALID 506 beq- FixupDAR /* must be a buggy dcbX, icbi insn. */
497 mfspr r10, SPRN_M_CASID 507DARFixed:/* Return from dcbx instruction bug workaround, r10 holds value of DAR */
498 rlwimi r11, r10, 0, 28, 31
499 DO_8xx_CPU6(0x3780, r3)
500 mtspr SPRN_MD_EPN, r11
501
502 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
503
504 /* If we are faulting a kernel address, we have to use the
505 * kernel page tables.
506 */
507 andi. r11, r10, 0x0800
508 beq 3f
509 lis r11, swapper_pg_dir@h
510 ori r11, r11, swapper_pg_dir@l
511 rlwimi r10, r11, 0, 2, 19
5123:
513 lwz r11, 0(r10) /* Get the level 1 entry */
514 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
515 beq 2f /* If zero, bail */
516
517 /* We have a pte table, so fetch the pte from the table.
518 */
519 ori r11, r11, 1 /* Set valid bit in physical L2 page */
520 DO_8xx_CPU6(0x3b80, r3)
521 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
522 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
523 lwz r10, 0(r11) /* Get the pte */
524
525 andi. r11, r10, _PAGE_RW /* Is it writeable? */
526 beq 2f /* Bail out if not */
527
528 /* Update 'changed', among others.
529 */
530#ifdef CONFIG_SWAP
531 ori r10, r10, _PAGE_DIRTY|_PAGE_HWWRITE
532 /* do not set the _PAGE_ACCESSED bit of a non-present page */
533 andi. r11, r10, _PAGE_PRESENT
534 beq 4f
535 ori r10, r10, _PAGE_ACCESSED
5364:
537#else
538 ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
539#endif
540 mfspr r11, SPRN_MD_TWC /* Get pte address again */
541 stw r10, 0(r11) /* and update pte in table */
542
543 /* The Linux PTE won't go exactly into the MMU TLB.
544 * Software indicator bits 21, 22 and 28 must be clear.
545 * Software indicator bits 24, 25, 26, and 27 must be
546 * set. All other Linux PTE bits control the behavior
547 * of the MMU.
548 */
549 li r11, 0x00f0
550 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
551 DO_8xx_CPU6(0x3d80, r3)
552 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
553
554 mfspr r10, SPRN_M_TW /* Restore registers */
555 lwz r11, 0(r0)
556 mtcr r11
557 lwz r11, 4(r0)
558#ifdef CONFIG_8xx_CPU6
559 lwz r3, 8(r0)
560#endif
561 rfi
5622:
563 mfspr r10, SPRN_M_TW /* Restore registers */ 508 mfspr r10, SPRN_M_TW /* Restore registers */
564 lwz r11, 0(r0) 509 lwz r11, 0(r0)
565 mtcr r11 510 mtcr r11
@@ -588,6 +533,140 @@ DataTLBError:
588 533
589 . = 0x2000 534 . = 0x2000
590 535
536/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
537 * by decoding the registers used by the dcbx instruction and adding them.
538 * DAR is set to the calculated address and r10 also holds the EA on exit.
539 */
540 /* define if you don't want to use self modifying code */
541#define NO_SELF_MODIFYING_CODE
542FixupDAR:/* Entry point for dcbx workaround. */
543 /* fetch instruction from memory. */
544 mfspr r10, SPRN_SRR0
545 DO_8xx_CPU6(0x3780, r3)
546 mtspr SPRN_MD_EPN, r10
547 mfspr r11, SPRN_M_TWB /* Get level 1 table entry address */
548 cmplwi cr0, r11, 0x0800
549 blt- 3f /* Branch if user space */
550 lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
551 ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
552 rlwimi r11, r10, 32-20, 0xffc /* r11 = r11&~0xffc|(r10>>20)&0xffc */
5533: lwz r11, 0(r11) /* Get the level 1 entry */
554 DO_8xx_CPU6(0x3b80, r3)
555 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
556 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
557 lwz r11, 0(r11) /* Get the pte */
558 /* concat physical page address(r11) and page offset(r10) */
559 rlwimi r11, r10, 0, 20, 31
560 lwz r11,0(r11)
561/* Check if it really is a dcbx instruction. */
562/* dcbt and dcbtst does not generate DTLB Misses/Errors,
563 * no need to include them here */
564 srwi r10, r11, 26 /* check if major OP code is 31 */
565 cmpwi cr0, r10, 31
566 bne- 141f
567 rlwinm r10, r11, 0, 21, 30
568 cmpwi cr0, r10, 2028 /* Is dcbz? */
569 beq+ 142f
570 cmpwi cr0, r10, 940 /* Is dcbi? */
571 beq+ 142f
572 cmpwi cr0, r10, 108 /* Is dcbst? */
573 beq+ 144f /* Fix up store bit! */
574 cmpwi cr0, r10, 172 /* Is dcbf? */
575 beq+ 142f
576 cmpwi cr0, r10, 1964 /* Is icbi? */
577 beq+ 142f
578141: mfspr r10, SPRN_DAR /* r10 must hold DAR at exit */
579 b DARFixed /* Nope, go back to normal TLB processing */
580
581144: mfspr r10, SPRN_DSISR
582 rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
583 mtspr SPRN_DSISR, r10
584142: /* continue, it was a dcbx, dcbi instruction. */
585#ifdef CONFIG_8xx_CPU6
586 lwz r3, 8(r0) /* restore r3 from memory */
587#endif
588#ifndef NO_SELF_MODIFYING_CODE
589 andis. r10,r11,0x1f /* test if reg RA is r0 */
590 li r10,modified_instr@l
591 dcbtst r0,r10 /* touch for store */
592 rlwinm r11,r11,0,0,20 /* Zero lower 10 bits */
593 oris r11,r11,640 /* Transform instr. to a "add r10,RA,RB" */
594 ori r11,r11,532
595 stw r11,0(r10) /* store add/and instruction */
596 dcbf 0,r10 /* flush new instr. to memory. */
597 icbi 0,r10 /* invalidate instr. cache line */
598 lwz r11, 4(r0) /* restore r11 from memory */
599 mfspr r10, SPRN_M_TW /* restore r10 from M_TW */
600 isync /* Wait until new instr is loaded from memory */
601modified_instr:
602 .space 4 /* this is where the add instr. is stored */
603 bne+ 143f
604 subf r10,r0,r10 /* r10=r10-r0, only if reg RA is r0 */
605143: mtdar r10 /* store faulting EA in DAR */
606 b DARFixed /* Go back to normal TLB handling */
607#else
608 mfctr r10
609 mtdar r10 /* save ctr reg in DAR */
610 rlwinm r10, r11, 24, 24, 28 /* offset into jump table for reg RB */
611 addi r10, r10, 150f@l /* add start of table */
612 mtctr r10 /* load ctr with jump address */
613 xor r10, r10, r10 /* sum starts at zero */
614 bctr /* jump into table */
615150:
616 add r10, r10, r0 ;b 151f
617 add r10, r10, r1 ;b 151f
618 add r10, r10, r2 ;b 151f
619 add r10, r10, r3 ;b 151f
620 add r10, r10, r4 ;b 151f
621 add r10, r10, r5 ;b 151f
622 add r10, r10, r6 ;b 151f
623 add r10, r10, r7 ;b 151f
624 add r10, r10, r8 ;b 151f
625 add r10, r10, r9 ;b 151f
626 mtctr r11 ;b 154f /* r10 needs special handling */
627 mtctr r11 ;b 153f /* r11 needs special handling */
628 add r10, r10, r12 ;b 151f
629 add r10, r10, r13 ;b 151f
630 add r10, r10, r14 ;b 151f
631 add r10, r10, r15 ;b 151f
632 add r10, r10, r16 ;b 151f
633 add r10, r10, r17 ;b 151f
634 add r10, r10, r18 ;b 151f
635 add r10, r10, r19 ;b 151f
636 add r10, r10, r20 ;b 151f
637 add r10, r10, r21 ;b 151f
638 add r10, r10, r22 ;b 151f
639 add r10, r10, r23 ;b 151f
640 add r10, r10, r24 ;b 151f
641 add r10, r10, r25 ;b 151f
642 add r10, r10, r26 ;b 151f
643 add r10, r10, r27 ;b 151f
644 add r10, r10, r28 ;b 151f
645 add r10, r10, r29 ;b 151f
646 add r10, r10, r30 ;b 151f
647 add r10, r10, r31
648151:
649 rlwinm. r11,r11,19,24,28 /* offset into jump table for reg RA */
650 beq 152f /* if reg RA is zero, don't add it */
651 addi r11, r11, 150b@l /* add start of table */
652 mtctr r11 /* load ctr with jump address */
653 rlwinm r11,r11,0,16,10 /* make sure we don't execute this more than once */
654 bctr /* jump into table */
655152:
656 mfdar r11
657 mtctr r11 /* restore ctr reg from DAR */
658 mtdar r10 /* save fault EA to DAR */
659 b DARFixed /* Go back to normal TLB handling */
660
661 /* special handling for r10,r11 since these are modified already */
662153: lwz r11, 4(r0) /* load r11 from memory */
663 b 155f
664154: mfspr r11, SPRN_M_TW /* load r10 from M_TW */
665155: add r10, r10, r11 /* add it */
666 mfctr r11 /* restore r11 */
667 b 151b
668#endif
669
591 .globl giveup_fpu 670 .globl giveup_fpu
592giveup_fpu: 671giveup_fpu:
593 blr 672 blr
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 975788ca05d2..7f4bd7f3b6af 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -944,28 +944,6 @@ _GLOBAL(__setup_e500mc_ivors)
944 blr 944 blr
945 945
946/* 946/*
947 * extern void loadcam_entry(unsigned int index)
948 *
949 * Load TLBCAM[index] entry in to the L2 CAM MMU
950 */
951_GLOBAL(loadcam_entry)
952 lis r4,TLBCAM@ha
953 addi r4,r4,TLBCAM@l
954 mulli r5,r3,TLBCAM_SIZE
955 add r3,r5,r4
956 lwz r4,0(r3)
957 mtspr SPRN_MAS0,r4
958 lwz r4,4(r3)
959 mtspr SPRN_MAS1,r4
960 lwz r4,8(r3)
961 mtspr SPRN_MAS2,r4
962 lwz r4,12(r3)
963 mtspr SPRN_MAS3,r4
964 tlbwe
965 isync
966 blr
967
968/*
969 * extern void giveup_altivec(struct task_struct *prev) 947 * extern void giveup_altivec(struct task_struct *prev)
970 * 948 *
971 * The e500 core does not have an AltiVec unit. 949 * The e500 core does not have an AltiVec unit.
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index 1882bf419fa6..8dc7547c2377 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -161,7 +161,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src,
161 dest++; 161 dest++;
162 n--; 162 n--;
163 } 163 }
164 while(n > 4) { 164 while(n >= 4) {
165 *((u32 *)dest) = *((volatile u32 *)vsrc); 165 *((u32 *)dest) = *((volatile u32 *)vsrc);
166 eieio(); 166 eieio();
167 vsrc += 4; 167 vsrc += 4;
@@ -190,7 +190,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
190 vdest++; 190 vdest++;
191 n--; 191 n--;
192 } 192 }
193 while(n > 4) { 193 while(n >= 4) {
194 *((volatile u32 *)vdest) = *((volatile u32 *)src); 194 *((volatile u32 *)vdest) = *((volatile u32 *)src);
195 src += 4; 195 src += 4;
196 vdest += 4; 196 vdest += 4;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 02a334662cc0..f6dca4f4b295 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -87,7 +87,10 @@ extern int tau_interrupts(int);
87#endif /* CONFIG_PPC32 */ 87#endif /* CONFIG_PPC32 */
88 88
89#ifdef CONFIG_PPC64 89#ifdef CONFIG_PPC64
90
91#ifndef CONFIG_SPARSE_IRQ
90EXPORT_SYMBOL(irq_desc); 92EXPORT_SYMBOL(irq_desc);
93#endif
91 94
92int distribute_irqs = 1; 95int distribute_irqs = 1;
93 96
@@ -189,33 +192,7 @@ int show_interrupts(struct seq_file *p, void *v)
189 for_each_online_cpu(j) 192 for_each_online_cpu(j)
190 seq_printf(p, "CPU%d ", j); 193 seq_printf(p, "CPU%d ", j);
191 seq_putc(p, '\n'); 194 seq_putc(p, '\n');
192 } 195 } else if (i == nr_irqs) {
193
194 if (i < NR_IRQS) {
195 desc = get_irq_desc(i);
196 spin_lock_irqsave(&desc->lock, flags);
197 action = desc->action;
198 if (!action || !action->handler)
199 goto skip;
200 seq_printf(p, "%3d: ", i);
201#ifdef CONFIG_SMP
202 for_each_online_cpu(j)
203 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
204#else
205 seq_printf(p, "%10u ", kstat_irqs(i));
206#endif /* CONFIG_SMP */
207 if (desc->chip)
208 seq_printf(p, " %s ", desc->chip->typename);
209 else
210 seq_puts(p, " None ");
211 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
212 seq_printf(p, " %s", action->name);
213 for (action = action->next; action; action = action->next)
214 seq_printf(p, ", %s", action->name);
215 seq_putc(p, '\n');
216skip:
217 spin_unlock_irqrestore(&desc->lock, flags);
218 } else if (i == NR_IRQS) {
219#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) 196#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
220 if (tau_initialized){ 197 if (tau_initialized){
221 seq_puts(p, "TAU: "); 198 seq_puts(p, "TAU: ");
@@ -225,30 +202,68 @@ skip:
225 } 202 }
226#endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/ 203#endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/
227 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); 204 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
205
206 return 0;
228 } 207 }
208
209 desc = irq_to_desc(i);
210 if (!desc)
211 return 0;
212
213 spin_lock_irqsave(&desc->lock, flags);
214
215 action = desc->action;
216 if (!action || !action->handler)
217 goto skip;
218
219 seq_printf(p, "%3d: ", i);
220#ifdef CONFIG_SMP
221 for_each_online_cpu(j)
222 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
223#else
224 seq_printf(p, "%10u ", kstat_irqs(i));
225#endif /* CONFIG_SMP */
226
227 if (desc->chip)
228 seq_printf(p, " %s ", desc->chip->name);
229 else
230 seq_puts(p, " None ");
231
232 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
233 seq_printf(p, " %s", action->name);
234
235 for (action = action->next; action; action = action->next)
236 seq_printf(p, ", %s", action->name);
237 seq_putc(p, '\n');
238
239skip:
240 spin_unlock_irqrestore(&desc->lock, flags);
241
229 return 0; 242 return 0;
230} 243}
231 244
232#ifdef CONFIG_HOTPLUG_CPU 245#ifdef CONFIG_HOTPLUG_CPU
233void fixup_irqs(cpumask_t map) 246void fixup_irqs(cpumask_t map)
234{ 247{
248 struct irq_desc *desc;
235 unsigned int irq; 249 unsigned int irq;
236 static int warned; 250 static int warned;
237 251
238 for_each_irq(irq) { 252 for_each_irq(irq) {
239 cpumask_t mask; 253 cpumask_t mask;
240 254
241 if (irq_desc[irq].status & IRQ_PER_CPU) 255 desc = irq_to_desc(irq);
256 if (desc && desc->status & IRQ_PER_CPU)
242 continue; 257 continue;
243 258
244 cpumask_and(&mask, irq_desc[irq].affinity, &map); 259 cpumask_and(&mask, desc->affinity, &map);
245 if (any_online_cpu(mask) == NR_CPUS) { 260 if (any_online_cpu(mask) == NR_CPUS) {
246 printk("Breaking affinity for irq %i\n", irq); 261 printk("Breaking affinity for irq %i\n", irq);
247 mask = map; 262 mask = map;
248 } 263 }
249 if (irq_desc[irq].chip->set_affinity) 264 if (desc->chip->set_affinity)
250 irq_desc[irq].chip->set_affinity(irq, &mask); 265 desc->chip->set_affinity(irq, &mask);
251 else if (irq_desc[irq].action && !(warned++)) 266 else if (desc->action && !(warned++))
252 printk("Cannot set affinity for irq %i\n", irq); 267 printk("Cannot set affinity for irq %i\n", irq);
253 } 268 }
254 269
@@ -275,7 +290,7 @@ static inline void handle_one_irq(unsigned int irq)
275 return; 290 return;
276 } 291 }
277 292
278 desc = irq_desc + irq; 293 desc = irq_to_desc(irq);
279 saved_sp_limit = current->thread.ksp_limit; 294 saved_sp_limit = current->thread.ksp_limit;
280 295
281 irqtp->task = curtp->task; 296 irqtp->task = curtp->task;
@@ -541,7 +556,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
541 smp_wmb(); 556 smp_wmb();
542 557
543 /* Clear norequest flags */ 558 /* Clear norequest flags */
544 get_irq_desc(i)->status &= ~IRQ_NOREQUEST; 559 irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
545 560
546 /* Legacy flags are left to default at this point, 561 /* Legacy flags are left to default at this point,
547 * one can then use irq_create_mapping() to 562 * one can then use irq_create_mapping() to
@@ -607,8 +622,16 @@ void irq_set_virq_count(unsigned int count)
607static int irq_setup_virq(struct irq_host *host, unsigned int virq, 622static int irq_setup_virq(struct irq_host *host, unsigned int virq,
608 irq_hw_number_t hwirq) 623 irq_hw_number_t hwirq)
609{ 624{
625 struct irq_desc *desc;
626
627 desc = irq_to_desc_alloc_node(virq, 0);
628 if (!desc) {
629 pr_debug("irq: -> allocating desc failed\n");
630 goto error;
631 }
632
610 /* Clear IRQ_NOREQUEST flag */ 633 /* Clear IRQ_NOREQUEST flag */
611 get_irq_desc(virq)->status &= ~IRQ_NOREQUEST; 634 desc->status &= ~IRQ_NOREQUEST;
612 635
613 /* map it */ 636 /* map it */
614 smp_wmb(); 637 smp_wmb();
@@ -617,11 +640,14 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
617 640
618 if (host->ops->map(host, virq, hwirq)) { 641 if (host->ops->map(host, virq, hwirq)) {
619 pr_debug("irq: -> mapping failed, freeing\n"); 642 pr_debug("irq: -> mapping failed, freeing\n");
620 irq_free_virt(virq, 1); 643 goto error;
621 return -1;
622 } 644 }
623 645
624 return 0; 646 return 0;
647
648error:
649 irq_free_virt(virq, 1);
650 return -1;
625} 651}
626 652
627unsigned int irq_create_direct_mapping(struct irq_host *host) 653unsigned int irq_create_direct_mapping(struct irq_host *host)
@@ -705,7 +731,7 @@ unsigned int irq_create_mapping(struct irq_host *host,
705EXPORT_SYMBOL_GPL(irq_create_mapping); 731EXPORT_SYMBOL_GPL(irq_create_mapping);
706 732
707unsigned int irq_create_of_mapping(struct device_node *controller, 733unsigned int irq_create_of_mapping(struct device_node *controller,
708 u32 *intspec, unsigned int intsize) 734 const u32 *intspec, unsigned int intsize)
709{ 735{
710 struct irq_host *host; 736 struct irq_host *host;
711 irq_hw_number_t hwirq; 737 irq_hw_number_t hwirq;
@@ -738,7 +764,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
738 764
739 /* Set type if specified and different than the current one */ 765 /* Set type if specified and different than the current one */
740 if (type != IRQ_TYPE_NONE && 766 if (type != IRQ_TYPE_NONE &&
741 type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK)) 767 type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
742 set_irq_type(virq, type); 768 set_irq_type(virq, type);
743 return virq; 769 return virq;
744} 770}
@@ -810,7 +836,7 @@ void irq_dispose_mapping(unsigned int virq)
810 irq_map[virq].hwirq = host->inval_irq; 836 irq_map[virq].hwirq = host->inval_irq;
811 837
812 /* Set some flags */ 838 /* Set some flags */
813 get_irq_desc(virq)->status |= IRQ_NOREQUEST; 839 irq_to_desc(virq)->status |= IRQ_NOREQUEST;
814 840
815 /* Free it */ 841 /* Free it */
816 irq_free_virt(virq, 1); 842 irq_free_virt(virq, 1);
@@ -1002,12 +1028,24 @@ void irq_free_virt(unsigned int virq, unsigned int count)
1002 spin_unlock_irqrestore(&irq_big_lock, flags); 1028 spin_unlock_irqrestore(&irq_big_lock, flags);
1003} 1029}
1004 1030
1005void irq_early_init(void) 1031int arch_early_irq_init(void)
1006{ 1032{
1007 unsigned int i; 1033 struct irq_desc *desc;
1034 int i;
1035
1036 for (i = 0; i < NR_IRQS; i++) {
1037 desc = irq_to_desc(i);
1038 if (desc)
1039 desc->status |= IRQ_NOREQUEST;
1040 }
1008 1041
1009 for (i = 0; i < NR_IRQS; i++) 1042 return 0;
1010 get_irq_desc(i)->status |= IRQ_NOREQUEST; 1043}
1044
1045int arch_init_chip_data(struct irq_desc *desc, int node)
1046{
1047 desc->status |= IRQ_NOREQUEST;
1048 return 0;
1011} 1049}
1012 1050
1013/* We need to create the radix trees late */ 1051/* We need to create the radix trees late */
@@ -1069,16 +1107,19 @@ static int virq_debug_show(struct seq_file *m, void *private)
1069 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", 1107 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
1070 "chip name", "host name"); 1108 "chip name", "host name");
1071 1109
1072 for (i = 1; i < NR_IRQS; i++) { 1110 for (i = 1; i < nr_irqs; i++) {
1073 desc = get_irq_desc(i); 1111 desc = irq_to_desc(i);
1112 if (!desc)
1113 continue;
1114
1074 spin_lock_irqsave(&desc->lock, flags); 1115 spin_lock_irqsave(&desc->lock, flags);
1075 1116
1076 if (desc->action && desc->action->handler) { 1117 if (desc->action && desc->action->handler) {
1077 seq_printf(m, "%5d ", i); 1118 seq_printf(m, "%5d ", i);
1078 seq_printf(m, "0x%05lx ", virq_to_hw(i)); 1119 seq_printf(m, "0x%05lx ", virq_to_hw(i));
1079 1120
1080 if (desc->chip && desc->chip->typename) 1121 if (desc->chip && desc->chip->name)
1081 p = desc->chip->typename; 1122 p = desc->chip->name;
1082 else 1123 else
1083 p = none; 1124 p = none;
1084 seq_printf(m, "%-15s ", p); 1125 seq_printf(m, "%-15s ", p);
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index ed0ac4e4b8d8..79a00bb9c64c 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -781,9 +781,9 @@ static int __init lparcfg_init(void)
781 !firmware_has_feature(FW_FEATURE_ISERIES)) 781 !firmware_has_feature(FW_FEATURE_ISERIES))
782 mode |= S_IWUSR; 782 mode |= S_IWUSR;
783 783
784 ent = proc_create("ppc64/lparcfg", mode, NULL, &lparcfg_fops); 784 ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops);
785 if (!ent) { 785 if (!ent) {
786 printk(KERN_ERR "Failed to create ppc64/lparcfg\n"); 786 printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
787 return -EIO; 787 return -EIO;
788 } 788 }
789 789
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index da9c0c4c10f3..8649f536f8df 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -502,15 +502,7 @@ _GLOBAL(clear_pages)
502 li r0,PAGE_SIZE/L1_CACHE_BYTES 502 li r0,PAGE_SIZE/L1_CACHE_BYTES
503 slw r0,r0,r4 503 slw r0,r0,r4
504 mtctr r0 504 mtctr r0
505#ifdef CONFIG_8xx
506 li r4, 0
5071: stw r4, 0(r3)
508 stw r4, 4(r3)
509 stw r4, 8(r3)
510 stw r4, 12(r3)
511#else
5121: dcbz 0,r3 5051: dcbz 0,r3
513#endif
514 addi r3,r3,L1_CACHE_BYTES 506 addi r3,r3,L1_CACHE_BYTES
515 bdnz 1b 507 bdnz 1b
516 blr 508 blr
@@ -535,15 +527,6 @@ _GLOBAL(copy_page)
535 addi r3,r3,-4 527 addi r3,r3,-4
536 addi r4,r4,-4 528 addi r4,r4,-4
537 529
538#ifdef CONFIG_8xx
539 /* don't use prefetch on 8xx */
540 li r0,4096/L1_CACHE_BYTES
541 mtctr r0
5421: COPY_16_BYTES
543 bdnz 1b
544 blr
545
546#else /* not 8xx, we can prefetch */
547 li r5,4 530 li r5,4
548 531
549#if MAX_COPY_PREFETCH > 1 532#if MAX_COPY_PREFETCH > 1
@@ -584,7 +567,6 @@ _GLOBAL(copy_page)
584 li r0,MAX_COPY_PREFETCH 567 li r0,MAX_COPY_PREFETCH
585 li r11,4 568 li r11,4
586 b 2b 569 b 2b
587#endif /* CONFIG_8xx */
588 570
589/* 571/*
590 * void atomic_clear_mask(atomic_t mask, atomic_t *addr) 572 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 0ed31f220482..ad461e735aec 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -139,8 +139,8 @@ out:
139 139
140} 140}
141 141
142static int dev_nvram_ioctl(struct inode *inode, struct file *file, 142static long dev_nvram_ioctl(struct file *file, unsigned int cmd,
143 unsigned int cmd, unsigned long arg) 143 unsigned long arg)
144{ 144{
145 switch(cmd) { 145 switch(cmd) {
146#ifdef CONFIG_PPC_PMAC 146#ifdef CONFIG_PPC_PMAC
@@ -169,11 +169,11 @@ static int dev_nvram_ioctl(struct inode *inode, struct file *file,
169} 169}
170 170
171const struct file_operations nvram_fops = { 171const struct file_operations nvram_fops = {
172 .owner = THIS_MODULE, 172 .owner = THIS_MODULE,
173 .llseek = dev_nvram_llseek, 173 .llseek = dev_nvram_llseek,
174 .read = dev_nvram_read, 174 .read = dev_nvram_read,
175 .write = dev_nvram_write, 175 .write = dev_nvram_write,
176 .ioctl = dev_nvram_ioctl, 176 .unlocked_ioctl = dev_nvram_ioctl,
177}; 177};
178 178
179static struct miscdevice nvram_dev = { 179static struct miscdevice nvram_dev = {
@@ -184,7 +184,7 @@ static struct miscdevice nvram_dev = {
184 184
185 185
186#ifdef DEBUG_NVRAM 186#ifdef DEBUG_NVRAM
187static void nvram_print_partitions(char * label) 187static void __init nvram_print_partitions(char * label)
188{ 188{
189 struct list_head * p; 189 struct list_head * p;
190 struct nvram_partition * tmp_part; 190 struct nvram_partition * tmp_part;
@@ -202,7 +202,7 @@ static void nvram_print_partitions(char * label)
202#endif 202#endif
203 203
204 204
205static int nvram_write_header(struct nvram_partition * part) 205static int __init nvram_write_header(struct nvram_partition * part)
206{ 206{
207 loff_t tmp_index; 207 loff_t tmp_index;
208 int rc; 208 int rc;
@@ -214,7 +214,7 @@ static int nvram_write_header(struct nvram_partition * part)
214} 214}
215 215
216 216
217static unsigned char nvram_checksum(struct nvram_header *p) 217static unsigned char __init nvram_checksum(struct nvram_header *p)
218{ 218{
219 unsigned int c_sum, c_sum2; 219 unsigned int c_sum, c_sum2;
220 unsigned short *sp = (unsigned short *)p->name; /* assume 6 shorts */ 220 unsigned short *sp = (unsigned short *)p->name; /* assume 6 shorts */
@@ -228,32 +228,7 @@ static unsigned char nvram_checksum(struct nvram_header *p)
228 return c_sum; 228 return c_sum;
229} 229}
230 230
231 231static int __init nvram_remove_os_partition(void)
232/*
233 * Find an nvram partition, sig can be 0 for any
234 * partition or name can be NULL for any name, else
235 * tries to match both
236 */
237struct nvram_partition *nvram_find_partition(int sig, const char *name)
238{
239 struct nvram_partition * part;
240 struct list_head * p;
241
242 list_for_each(p, &nvram_part->partition) {
243 part = list_entry(p, struct nvram_partition, partition);
244
245 if (sig && part->header.signature != sig)
246 continue;
247 if (name && 0 != strncmp(name, part->header.name, 12))
248 continue;
249 return part;
250 }
251 return NULL;
252}
253EXPORT_SYMBOL(nvram_find_partition);
254
255
256static int nvram_remove_os_partition(void)
257{ 232{
258 struct list_head *i; 233 struct list_head *i;
259 struct list_head *j; 234 struct list_head *j;
@@ -319,7 +294,7 @@ static int nvram_remove_os_partition(void)
319 * Will create a partition starting at the first free 294 * Will create a partition starting at the first free
320 * space found if space has enough room. 295 * space found if space has enough room.
321 */ 296 */
322static int nvram_create_os_partition(void) 297static int __init nvram_create_os_partition(void)
323{ 298{
324 struct nvram_partition *part; 299 struct nvram_partition *part;
325 struct nvram_partition *new_part; 300 struct nvram_partition *new_part;
@@ -422,7 +397,7 @@ static int nvram_create_os_partition(void)
422 * 5.) If the max chunk cannot be allocated then try finding a chunk 397 * 5.) If the max chunk cannot be allocated then try finding a chunk
423 * that will satisfy the minum needed (NVRAM_MIN_REQ). 398 * that will satisfy the minum needed (NVRAM_MIN_REQ).
424 */ 399 */
425static int nvram_setup_partition(void) 400static int __init nvram_setup_partition(void)
426{ 401{
427 struct list_head * p; 402 struct list_head * p;
428 struct nvram_partition * part; 403 struct nvram_partition * part;
@@ -480,7 +455,7 @@ static int nvram_setup_partition(void)
480} 455}
481 456
482 457
483static int nvram_scan_partitions(void) 458static int __init nvram_scan_partitions(void)
484{ 459{
485 loff_t cur_index = 0; 460 loff_t cur_index = 0;
486 struct nvram_header phead; 461 struct nvram_header phead;
@@ -706,6 +681,9 @@ int nvram_clear_error_log(void)
706 int clear_word = ERR_FLAG_ALREADY_LOGGED; 681 int clear_word = ERR_FLAG_ALREADY_LOGGED;
707 int rc; 682 int rc;
708 683
684 if (nvram_error_log_index == -1)
685 return -1;
686
709 tmp_index = nvram_error_log_index; 687 tmp_index = nvram_error_log_index;
710 688
711 rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index); 689 rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index 0a03cf70d247..936f04dbfc6f 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -119,13 +119,6 @@ static void perf_callchain_kernel(struct pt_regs *regs,
119} 119}
120 120
121#ifdef CONFIG_PPC64 121#ifdef CONFIG_PPC64
122
123#ifdef CONFIG_HUGETLB_PAGE
124#define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize])
125#else
126#define is_huge_psize(pagesize) 0
127#endif
128
129/* 122/*
130 * On 64-bit we don't want to invoke hash_page on user addresses from 123 * On 64-bit we don't want to invoke hash_page on user addresses from
131 * interrupt context, so if the access faults, we read the page tables 124 * interrupt context, so if the access faults, we read the page tables
@@ -135,7 +128,7 @@ static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
135{ 128{
136 pgd_t *pgdir; 129 pgd_t *pgdir;
137 pte_t *ptep, pte; 130 pte_t *ptep, pte;
138 int pagesize; 131 unsigned shift;
139 unsigned long addr = (unsigned long) ptr; 132 unsigned long addr = (unsigned long) ptr;
140 unsigned long offset; 133 unsigned long offset;
141 unsigned long pfn; 134 unsigned long pfn;
@@ -145,17 +138,14 @@ static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
145 if (!pgdir) 138 if (!pgdir)
146 return -EFAULT; 139 return -EFAULT;
147 140
148 pagesize = get_slice_psize(current->mm, addr); 141 ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
142 if (!shift)
143 shift = PAGE_SHIFT;
149 144
150 /* align address to page boundary */ 145 /* align address to page boundary */
151 offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1); 146 offset = addr & ((1UL << shift) - 1);
152 addr -= offset; 147 addr -= offset;
153 148
154 if (is_huge_psize(pagesize))
155 ptep = huge_pte_offset(current->mm, addr);
156 else
157 ptep = find_linux_pte(pgdir, addr);
158
159 if (ptep == NULL) 149 if (ptep == NULL)
160 return -EFAULT; 150 return -EFAULT;
161 pte = *ptep; 151 pte = *ptep;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index c8b27bb4dbde..425451453e96 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -96,8 +96,6 @@ EXPORT_SYMBOL(copy_4K_page);
96EXPORT_SYMBOL(isa_io_base); 96EXPORT_SYMBOL(isa_io_base);
97EXPORT_SYMBOL(isa_mem_base); 97EXPORT_SYMBOL(isa_mem_base);
98EXPORT_SYMBOL(pci_dram_offset); 98EXPORT_SYMBOL(pci_dram_offset);
99EXPORT_SYMBOL(pci_alloc_consistent);
100EXPORT_SYMBOL(pci_free_consistent);
101#endif /* CONFIG_PCI */ 99#endif /* CONFIG_PCI */
102 100
103EXPORT_SYMBOL(start_thread); 101EXPORT_SYMBOL(start_thread);
@@ -162,7 +160,6 @@ EXPORT_SYMBOL(screen_info);
162 160
163#ifdef CONFIG_PPC32 161#ifdef CONFIG_PPC32
164EXPORT_SYMBOL(timer_interrupt); 162EXPORT_SYMBOL(timer_interrupt);
165EXPORT_SYMBOL(irq_desc);
166EXPORT_SYMBOL(tb_ticks_per_jiffy); 163EXPORT_SYMBOL(tb_ticks_per_jiffy);
167EXPORT_SYMBOL(cacheable_memcpy); 164EXPORT_SYMBOL(cacheable_memcpy);
168EXPORT_SYMBOL(cacheable_memzero); 165EXPORT_SYMBOL(cacheable_memzero);
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_powerpc.c
index c647ddef40dc..1ed3b8d7981e 100644
--- a/arch/powerpc/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -28,55 +28,7 @@
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/prom.h> 29#include <asm/prom.h>
30 30
31static loff_t page_map_seek( struct file *file, loff_t off, int whence); 31#ifdef CONFIG_PPC64
32static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes,
33 loff_t *ppos);
34static int page_map_mmap( struct file *file, struct vm_area_struct *vma );
35
36static const struct file_operations page_map_fops = {
37 .llseek = page_map_seek,
38 .read = page_map_read,
39 .mmap = page_map_mmap
40};
41
42/*
43 * Create the ppc64 and ppc64/rtas directories early. This allows us to
44 * assume that they have been previously created in drivers.
45 */
46static int __init proc_ppc64_create(void)
47{
48 struct proc_dir_entry *root;
49
50 root = proc_mkdir("ppc64", NULL);
51 if (!root)
52 return 1;
53
54 if (!of_find_node_by_path("/rtas"))
55 return 0;
56
57 if (!proc_mkdir("rtas", root))
58 return 1;
59
60 if (!proc_symlink("rtas", NULL, "ppc64/rtas"))
61 return 1;
62
63 return 0;
64}
65core_initcall(proc_ppc64_create);
66
67static int __init proc_ppc64_init(void)
68{
69 struct proc_dir_entry *pde;
70
71 pde = proc_create_data("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL,
72 &page_map_fops, vdso_data);
73 if (!pde)
74 return 1;
75 pde->size = PAGE_SIZE;
76
77 return 0;
78}
79__initcall(proc_ppc64_init);
80 32
81static loff_t page_map_seek( struct file *file, loff_t off, int whence) 33static loff_t page_map_seek( struct file *file, loff_t off, int whence)
82{ 34{
@@ -120,3 +72,55 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
120 return 0; 72 return 0;
121} 73}
122 74
75static const struct file_operations page_map_fops = {
76 .llseek = page_map_seek,
77 .read = page_map_read,
78 .mmap = page_map_mmap
79};
80
81
82static int __init proc_ppc64_init(void)
83{
84 struct proc_dir_entry *pde;
85
86 pde = proc_create_data("powerpc/systemcfg", S_IFREG|S_IRUGO, NULL,
87 &page_map_fops, vdso_data);
88 if (!pde)
89 return 1;
90 pde->size = PAGE_SIZE;
91
92 return 0;
93}
94__initcall(proc_ppc64_init);
95
96#endif /* CONFIG_PPC64 */
97
98/*
99 * Create the ppc64 and ppc64/rtas directories early. This allows us to
100 * assume that they have been previously created in drivers.
101 */
102static int __init proc_ppc64_create(void)
103{
104 struct proc_dir_entry *root;
105
106 root = proc_mkdir("powerpc", NULL);
107 if (!root)
108 return 1;
109
110#ifdef CONFIG_PPC64
111 if (!proc_symlink("ppc64", NULL, "powerpc"))
112 pr_err("Failed to create link /proc/ppc64 -> /proc/powerpc\n");
113#endif
114
115 if (!of_find_node_by_path("/rtas"))
116 return 0;
117
118 if (!proc_mkdir("rtas", root))
119 return 1;
120
121 if (!proc_symlink("rtas", NULL, "powerpc/rtas"))
122 return 1;
123
124 return 0;
125}
126core_initcall(proc_ppc64_create);
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 13011a96a977..a85117d5c9a4 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -6,7 +6,7 @@
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 * 8 *
9 * /proc/ppc64/rtas/firmware_flash interface 9 * /proc/powerpc/rtas/firmware_flash interface
10 * 10 *
11 * This file implements a firmware_flash interface to pump a firmware 11 * This file implements a firmware_flash interface to pump a firmware
12 * image into the kernel. At reboot time rtas_restart() will see the 12 * image into the kernel. At reboot time rtas_restart() will see the
@@ -740,7 +740,7 @@ static int __init rtas_flash_init(void)
740 return 1; 740 return 1;
741 } 741 }
742 742
743 firmware_flash_pde = create_flash_pde("ppc64/rtas/" 743 firmware_flash_pde = create_flash_pde("powerpc/rtas/"
744 FIRMWARE_FLASH_NAME, 744 FIRMWARE_FLASH_NAME,
745 &rtas_flash_operations); 745 &rtas_flash_operations);
746 if (firmware_flash_pde == NULL) { 746 if (firmware_flash_pde == NULL) {
@@ -754,7 +754,7 @@ static int __init rtas_flash_init(void)
754 if (rc != 0) 754 if (rc != 0)
755 goto cleanup; 755 goto cleanup;
756 756
757 firmware_update_pde = create_flash_pde("ppc64/rtas/" 757 firmware_update_pde = create_flash_pde("powerpc/rtas/"
758 FIRMWARE_UPDATE_NAME, 758 FIRMWARE_UPDATE_NAME,
759 &rtas_flash_operations); 759 &rtas_flash_operations);
760 if (firmware_update_pde == NULL) { 760 if (firmware_update_pde == NULL) {
@@ -768,7 +768,7 @@ static int __init rtas_flash_init(void)
768 if (rc != 0) 768 if (rc != 0)
769 goto cleanup; 769 goto cleanup;
770 770
771 validate_pde = create_flash_pde("ppc64/rtas/" VALIDATE_FLASH_NAME, 771 validate_pde = create_flash_pde("powerpc/rtas/" VALIDATE_FLASH_NAME,
772 &validate_flash_operations); 772 &validate_flash_operations);
773 if (validate_pde == NULL) { 773 if (validate_pde == NULL) {
774 rc = -ENOMEM; 774 rc = -ENOMEM;
@@ -781,7 +781,7 @@ static int __init rtas_flash_init(void)
781 if (rc != 0) 781 if (rc != 0)
782 goto cleanup; 782 goto cleanup;
783 783
784 manage_pde = create_flash_pde("ppc64/rtas/" MANAGE_FLASH_NAME, 784 manage_pde = create_flash_pde("powerpc/rtas/" MANAGE_FLASH_NAME,
785 &manage_flash_operations); 785 &manage_flash_operations);
786 if (manage_pde == NULL) { 786 if (manage_pde == NULL) {
787 rc = -ENOMEM; 787 rc = -ENOMEM;
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/kernel/rtasd.c
index b3cbac855924..2e4832ab2108 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -39,6 +39,7 @@ static unsigned long rtas_log_start;
39static unsigned long rtas_log_size; 39static unsigned long rtas_log_size;
40 40
41static int surveillance_timeout = -1; 41static int surveillance_timeout = -1;
42
42static unsigned int rtas_error_log_max; 43static unsigned int rtas_error_log_max;
43static unsigned int rtas_error_log_buffer_max; 44static unsigned int rtas_error_log_buffer_max;
44 45
@@ -213,9 +214,11 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
213 return; 214 return;
214 } 215 }
215 216
217#ifdef CONFIG_PPC64
216 /* Write error to NVRAM */ 218 /* Write error to NVRAM */
217 if (logging_enabled && !(err_type & ERR_FLAG_BOOT)) 219 if (logging_enabled && !(err_type & ERR_FLAG_BOOT))
218 nvram_write_error_log(buf, len, err_type, error_log_cnt); 220 nvram_write_error_log(buf, len, err_type, error_log_cnt);
221#endif /* CONFIG_PPC64 */
219 222
220 /* 223 /*
221 * rtas errors can occur during boot, and we do want to capture 224 * rtas errors can occur during boot, and we do want to capture
@@ -264,7 +267,6 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
264 267
265} 268}
266 269
267
268static int rtas_log_open(struct inode * inode, struct file * file) 270static int rtas_log_open(struct inode * inode, struct file * file)
269{ 271{
270 return 0; 272 return 0;
@@ -300,6 +302,7 @@ static ssize_t rtas_log_read(struct file * file, char __user * buf,
300 return -ENOMEM; 302 return -ENOMEM;
301 303
302 spin_lock_irqsave(&rtasd_log_lock, s); 304 spin_lock_irqsave(&rtasd_log_lock, s);
305
303 /* if it's 0, then we know we got the last one (the one in NVRAM) */ 306 /* if it's 0, then we know we got the last one (the one in NVRAM) */
304 while (rtas_log_size == 0) { 307 while (rtas_log_size == 0) {
305 if (file->f_flags & O_NONBLOCK) { 308 if (file->f_flags & O_NONBLOCK) {
@@ -313,7 +316,9 @@ static ssize_t rtas_log_read(struct file * file, char __user * buf,
313 error = -ENODATA; 316 error = -ENODATA;
314 goto out; 317 goto out;
315 } 318 }
319#ifdef CONFIG_PPC64
316 nvram_clear_error_log(); 320 nvram_clear_error_log();
321#endif /* CONFIG_PPC64 */
317 322
318 spin_unlock_irqrestore(&rtasd_log_lock, s); 323 spin_unlock_irqrestore(&rtasd_log_lock, s);
319 error = wait_event_interruptible(rtas_log_wait, rtas_log_size); 324 error = wait_event_interruptible(rtas_log_wait, rtas_log_size);
@@ -427,14 +432,11 @@ static void rtas_event_scan(struct work_struct *w)
427 put_online_cpus(); 432 put_online_cpus();
428} 433}
429 434
430static void start_event_scan(void) 435#ifdef CONFIG_PPC64
436static void retreive_nvram_error_log(void)
431{ 437{
432 unsigned int err_type; 438 unsigned int err_type ;
433 int rc; 439 int rc ;
434
435 printk(KERN_DEBUG "RTAS daemon started\n");
436 pr_debug("rtasd: will sleep for %d milliseconds\n",
437 (30000 / rtas_event_scan_rate));
438 440
439 /* See if we have any error stored in NVRAM */ 441 /* See if we have any error stored in NVRAM */
440 memset(logdata, 0, rtas_error_log_max); 442 memset(logdata, 0, rtas_error_log_max);
@@ -442,12 +444,26 @@ static void start_event_scan(void)
442 &err_type, &error_log_cnt); 444 &err_type, &error_log_cnt);
443 /* We can use rtas_log_buf now */ 445 /* We can use rtas_log_buf now */
444 logging_enabled = 1; 446 logging_enabled = 1;
445
446 if (!rc) { 447 if (!rc) {
447 if (err_type != ERR_FLAG_ALREADY_LOGGED) { 448 if (err_type != ERR_FLAG_ALREADY_LOGGED) {
448 pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0); 449 pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0);
449 } 450 }
450 } 451 }
452}
453#else /* CONFIG_PPC64 */
454static void retreive_nvram_error_log(void)
455{
456}
457#endif /* CONFIG_PPC64 */
458
459static void start_event_scan(void)
460{
461 printk(KERN_DEBUG "RTAS daemon started\n");
462 pr_debug("rtasd: will sleep for %d milliseconds\n",
463 (30000 / rtas_event_scan_rate));
464
465 /* Retreive errors from nvram if any */
466 retreive_nvram_error_log();
451 467
452 schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work, 468 schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work,
453 event_scan_delay); 469 event_scan_delay);
@@ -457,13 +473,13 @@ static int __init rtas_init(void)
457{ 473{
458 struct proc_dir_entry *entry; 474 struct proc_dir_entry *entry;
459 475
460 if (!machine_is(pseries)) 476 if (!machine_is(pseries) && !machine_is(chrp))
461 return 0; 477 return 0;
462 478
463 /* No RTAS */ 479 /* No RTAS */
464 event_scan = rtas_token("event-scan"); 480 event_scan = rtas_token("event-scan");
465 if (event_scan == RTAS_UNKNOWN_SERVICE) { 481 if (event_scan == RTAS_UNKNOWN_SERVICE) {
466 printk(KERN_DEBUG "rtasd: no event-scan on system\n"); 482 printk(KERN_INFO "rtasd: No event-scan on system\n");
467 return -ENODEV; 483 return -ENODEV;
468 } 484 }
469 485
@@ -483,7 +499,7 @@ static int __init rtas_init(void)
483 return -ENOMEM; 499 return -ENOMEM;
484 } 500 }
485 501
486 entry = proc_create("ppc64/rtas/error_log", S_IRUSR, NULL, 502 entry = proc_create("powerpc/rtas/error_log", S_IRUSR, NULL,
487 &proc_rtas_log_operations); 503 &proc_rtas_log_operations);
488 if (!entry) 504 if (!entry)
489 printk(KERN_ERR "Failed to create error_log proc entry\n"); 505 printk(KERN_ERR "Failed to create error_log proc entry\n");
@@ -492,11 +508,16 @@ static int __init rtas_init(void)
492 508
493 return 0; 509 return 0;
494} 510}
511__initcall(rtas_init);
495 512
496static int __init surveillance_setup(char *str) 513static int __init surveillance_setup(char *str)
497{ 514{
498 int i; 515 int i;
499 516
517 /* We only do surveillance on pseries */
518 if (!machine_is(pseries))
519 return 0;
520
500 if (get_option(&str,&i)) { 521 if (get_option(&str,&i)) {
501 if (i >= 0 && i <= 255) 522 if (i >= 0 && i <= 255)
502 surveillance_timeout = i; 523 surveillance_timeout = i;
@@ -504,6 +525,7 @@ static int __init surveillance_setup(char *str)
504 525
505 return 1; 526 return 1;
506} 527}
528__setup("surveillance=", surveillance_setup);
507 529
508static int __init rtasmsgs_setup(char *str) 530static int __init rtasmsgs_setup(char *str)
509{ 531{
@@ -514,6 +536,4 @@ static int __init rtasmsgs_setup(char *str)
514 536
515 return 1; 537 return 1;
516} 538}
517__initcall(rtas_init);
518__setup("surveillance=", surveillance_setup);
519__setup("rtasmsgs=", rtasmsgs_setup); 539__setup("rtasmsgs=", rtasmsgs_setup);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index df2c9e932b37..6568406b2a30 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -356,11 +356,6 @@ void __init setup_system(void)
356 */ 356 */
357 initialize_cache_info(); 357 initialize_cache_info();
358 358
359 /*
360 * Initialize irq remapping subsystem
361 */
362 irq_early_init();
363
364#ifdef CONFIG_PPC_RTAS 359#ifdef CONFIG_PPC_RTAS
365 /* 360 /*
366 * Initialize RTAS if available 361 * Initialize RTAS if available
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 9b86a74d2815..97196eefef3e 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -218,6 +218,9 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
218 218
219static void stop_this_cpu(void *dummy) 219static void stop_this_cpu(void *dummy)
220{ 220{
221 /* Remove this CPU */
222 set_cpu_online(smp_processor_id(), false);
223
221 local_irq_disable(); 224 local_irq_disable();
222 while (1) 225 while (1)
223 ; 226 ;
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 956ab33fd73f..e235e52dc4fe 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -461,6 +461,25 @@ static void unregister_cpu_online(unsigned int cpu)
461 461
462 cacheinfo_cpu_offline(cpu); 462 cacheinfo_cpu_offline(cpu);
463} 463}
464
465#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
466ssize_t arch_cpu_probe(const char *buf, size_t count)
467{
468 if (ppc_md.cpu_probe)
469 return ppc_md.cpu_probe(buf, count);
470
471 return -EINVAL;
472}
473
474ssize_t arch_cpu_release(const char *buf, size_t count)
475{
476 if (ppc_md.cpu_release)
477 return ppc_md.cpu_release(buf, count);
478
479 return -EINVAL;
480}
481#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
482
464#endif /* CONFIG_HOTPLUG_CPU */ 483#endif /* CONFIG_HOTPLUG_CPU */
465 484
466static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, 485static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 674800b242d6..9ba2cc88591d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -269,6 +269,7 @@ void account_system_vtime(struct task_struct *tsk)
269 per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; 269 per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
270 local_irq_restore(flags); 270 local_irq_restore(flags);
271} 271}
272EXPORT_SYMBOL_GPL(account_system_vtime);
272 273
273/* 274/*
274 * Transfer the user and system times accumulated in the paca 275 * Transfer the user and system times accumulated in the paca
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 9d1f9354d6ca..804f0f30f227 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -198,28 +198,6 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
198 info.si_code = code; 198 info.si_code = code;
199 info.si_addr = (void __user *) addr; 199 info.si_addr = (void __user *) addr;
200 force_sig_info(signr, &info, current); 200 force_sig_info(signr, &info, current);
201
202 /*
203 * Init gets no signals that it doesn't have a handler for.
204 * That's all very well, but if it has caused a synchronous
205 * exception and we ignore the resulting signal, it will just
206 * generate the same exception over and over again and we get
207 * nowhere. Better to kill it and let the kernel panic.
208 */
209 if (is_global_init(current)) {
210 __sighandler_t handler;
211
212 spin_lock_irq(&current->sighand->siglock);
213 handler = current->sighand->action[signr-1].sa.sa_handler;
214 spin_unlock_irq(&current->sighand->siglock);
215 if (handler == SIG_DFL) {
216 /* init has generated a synchronous exception
217 and it doesn't have a handler for the signal */
218 printk(KERN_CRIT "init has generated signal %d "
219 "but has no handler for it\n", signr);
220 do_exit(signr);
221 }
222 }
223} 201}
224 202
225#ifdef CONFIG_PPC64 203#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 67b6916f0e94..fe460482fa68 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -58,7 +58,7 @@ _GLOBAL(load_up_altivec)
58 * all 1's 58 * all 1's
59 */ 59 */
60 mfspr r4,SPRN_VRSAVE 60 mfspr r4,SPRN_VRSAVE
61 cmpdi 0,r4,0 61 cmpwi 0,r4,0
62 bne+ 1f 62 bne+ 1f
63 li r4,-1 63 li r4,-1
64 mtspr SPRN_VRSAVE,r4 64 mtspr SPRN_VRSAVE,r4
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index c29926846613..07703f72330e 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -21,6 +21,23 @@ config KVM
21 select PREEMPT_NOTIFIERS 21 select PREEMPT_NOTIFIERS
22 select ANON_INODES 22 select ANON_INODES
23 23
24config KVM_BOOK3S_64_HANDLER
25 bool
26
27config KVM_BOOK3S_64
28 tristate "KVM support for PowerPC book3s_64 processors"
29 depends on EXPERIMENTAL && PPC64
30 select KVM
31 select KVM_BOOK3S_64_HANDLER
32 ---help---
33 Support running unmodified book3s_64 and book3s_32 guest kernels
34 in virtual machines on book3s_64 host processors.
35
36 This module provides access to the hardware capabilities through
37 a character device node named /dev/kvm.
38
39 If unsure, say N.
40
24config KVM_440 41config KVM_440
25 bool "KVM support for PowerPC 440 processors" 42 bool "KVM support for PowerPC 440 processors"
26 depends on EXPERIMENTAL && 44x 43 depends on EXPERIMENTAL && 44x
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 37655fe19f2f..56484d652377 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -12,26 +12,45 @@ CFLAGS_44x_tlb.o := -I.
12CFLAGS_e500_tlb.o := -I. 12CFLAGS_e500_tlb.o := -I.
13CFLAGS_emulate.o := -I. 13CFLAGS_emulate.o := -I.
14 14
15kvm-objs := $(common-objs-y) powerpc.o emulate.o 15common-objs-y += powerpc.o emulate.o
16obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o 16obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
17obj-$(CONFIG_KVM) += kvm.o 17obj-$(CONFIG_KVM_BOOK3S_64_HANDLER) += book3s_64_exports.o
18 18
19AFLAGS_booke_interrupts.o := -I$(obj) 19AFLAGS_booke_interrupts.o := -I$(obj)
20 20
21kvm-440-objs := \ 21kvm-440-objs := \
22 $(common-objs-y) \
22 booke.o \ 23 booke.o \
23 booke_emulate.o \ 24 booke_emulate.o \
24 booke_interrupts.o \ 25 booke_interrupts.o \
25 44x.o \ 26 44x.o \
26 44x_tlb.o \ 27 44x_tlb.o \
27 44x_emulate.o 28 44x_emulate.o
28obj-$(CONFIG_KVM_440) += kvm-440.o 29kvm-objs-$(CONFIG_KVM_440) := $(kvm-440-objs)
29 30
30kvm-e500-objs := \ 31kvm-e500-objs := \
32 $(common-objs-y) \
31 booke.o \ 33 booke.o \
32 booke_emulate.o \ 34 booke_emulate.o \
33 booke_interrupts.o \ 35 booke_interrupts.o \
34 e500.o \ 36 e500.o \
35 e500_tlb.o \ 37 e500_tlb.o \
36 e500_emulate.o 38 e500_emulate.o
37obj-$(CONFIG_KVM_E500) += kvm-e500.o 39kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
40
41kvm-book3s_64-objs := \
42 $(common-objs-y) \
43 book3s.o \
44 book3s_64_emulate.o \
45 book3s_64_interrupts.o \
46 book3s_64_mmu_host.o \
47 book3s_64_mmu.o \
48 book3s_32_mmu.o
49kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs)
50
51kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
52
53obj-$(CONFIG_KVM_440) += kvm.o
54obj-$(CONFIG_KVM_E500) += kvm.o
55obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
56
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
new file mode 100644
index 000000000000..3e294bd9b8c6
--- /dev/null
+++ b/arch/powerpc/kvm/book3s.c
@@ -0,0 +1,974 @@
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 *
8 * Description:
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/kvm_host.h>
18#include <linux/err.h>
19
20#include <asm/reg.h>
21#include <asm/cputable.h>
22#include <asm/cacheflush.h>
23#include <asm/tlbflush.h>
24#include <asm/uaccess.h>
25#include <asm/io.h>
26#include <asm/kvm_ppc.h>
27#include <asm/kvm_book3s.h>
28#include <asm/mmu_context.h>
29#include <linux/sched.h>
30#include <linux/vmalloc.h>
31
32#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
33
34/* #define EXIT_DEBUG */
35/* #define EXIT_DEBUG_SIMPLE */
36
37/* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0.
38 * When set, we retrigger a DEC interrupt after that if DEC <= 0.
39 * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */
40
41/* #define AGGRESSIVE_DEC */
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "exits", VCPU_STAT(sum_exits) },
45 { "mmio", VCPU_STAT(mmio_exits) },
46 { "sig", VCPU_STAT(signal_exits) },
47 { "sysc", VCPU_STAT(syscall_exits) },
48 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
49 { "dec", VCPU_STAT(dec_exits) },
50 { "ext_intr", VCPU_STAT(ext_intr_exits) },
51 { "queue_intr", VCPU_STAT(queue_intr) },
52 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
53 { "pf_storage", VCPU_STAT(pf_storage) },
54 { "sp_storage", VCPU_STAT(sp_storage) },
55 { "pf_instruc", VCPU_STAT(pf_instruc) },
56 { "sp_instruc", VCPU_STAT(sp_instruc) },
57 { "ld", VCPU_STAT(ld) },
58 { "ld_slow", VCPU_STAT(ld_slow) },
59 { "st", VCPU_STAT(st) },
60 { "st_slow", VCPU_STAT(st_slow) },
61 { NULL }
62};
63
64void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
65{
66}
67
68void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
69{
70}
71
72void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
73{
74 memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
75 get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
76}
77
78void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
79{
80 memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
81 to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
82}
83
84#if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG)
85static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
86{
87 u64 jd = mftb() - vcpu->arch.dec_jiffies;
88 return vcpu->arch.dec - jd;
89}
90#endif
91
92void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
93{
94 ulong old_msr = vcpu->arch.msr;
95
96#ifdef EXIT_DEBUG
97 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
98#endif
99 msr &= to_book3s(vcpu)->msr_mask;
100 vcpu->arch.msr = msr;
101 vcpu->arch.shadow_msr = msr | MSR_USER32;
102 vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 |
103 MSR_USER64 | MSR_SE | MSR_BE | MSR_DE |
104 MSR_FE1);
105
106 if (msr & (MSR_WE|MSR_POW)) {
107 if (!vcpu->arch.pending_exceptions) {
108 kvm_vcpu_block(vcpu);
109 vcpu->stat.halt_wakeup++;
110 }
111 }
112
113 if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
114 (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
115 kvmppc_mmu_flush_segments(vcpu);
116 kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
117 }
118}
119
120void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
121{
122 vcpu->arch.srr0 = vcpu->arch.pc;
123 vcpu->arch.srr1 = vcpu->arch.msr | flags;
124 vcpu->arch.pc = to_book3s(vcpu)->hior + vec;
125 vcpu->arch.mmu.reset_msr(vcpu);
126}
127
128void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
129{
130 unsigned int prio;
131
132 vcpu->stat.queue_intr++;
133 switch (vec) {
134 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
135 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
136 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
137 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
138 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
139 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
140 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
141 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
142 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
143 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
144 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
145 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
146 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
147 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
148 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
149 default: prio = BOOK3S_IRQPRIO_MAX; break;
150 }
151
152 set_bit(prio, &vcpu->arch.pending_exceptions);
153#ifdef EXIT_DEBUG
154 printk(KERN_INFO "Queueing interrupt %x\n", vec);
155#endif
156}
157
158
159void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
160{
161 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
162}
163
164void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
165{
166 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
167}
168
169int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
170{
171 return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
172}
173
174void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
175 struct kvm_interrupt *irq)
176{
177 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
178}
179
180int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
181{
182 int deliver = 1;
183 int vec = 0;
184
185 switch (priority) {
186 case BOOK3S_IRQPRIO_DECREMENTER:
187 deliver = vcpu->arch.msr & MSR_EE;
188 vec = BOOK3S_INTERRUPT_DECREMENTER;
189 break;
190 case BOOK3S_IRQPRIO_EXTERNAL:
191 deliver = vcpu->arch.msr & MSR_EE;
192 vec = BOOK3S_INTERRUPT_EXTERNAL;
193 break;
194 case BOOK3S_IRQPRIO_SYSTEM_RESET:
195 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
196 break;
197 case BOOK3S_IRQPRIO_MACHINE_CHECK:
198 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
199 break;
200 case BOOK3S_IRQPRIO_DATA_STORAGE:
201 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
202 break;
203 case BOOK3S_IRQPRIO_INST_STORAGE:
204 vec = BOOK3S_INTERRUPT_INST_STORAGE;
205 break;
206 case BOOK3S_IRQPRIO_DATA_SEGMENT:
207 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
208 break;
209 case BOOK3S_IRQPRIO_INST_SEGMENT:
210 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
211 break;
212 case BOOK3S_IRQPRIO_ALIGNMENT:
213 vec = BOOK3S_INTERRUPT_ALIGNMENT;
214 break;
215 case BOOK3S_IRQPRIO_PROGRAM:
216 vec = BOOK3S_INTERRUPT_PROGRAM;
217 break;
218 case BOOK3S_IRQPRIO_VSX:
219 vec = BOOK3S_INTERRUPT_VSX;
220 break;
221 case BOOK3S_IRQPRIO_ALTIVEC:
222 vec = BOOK3S_INTERRUPT_ALTIVEC;
223 break;
224 case BOOK3S_IRQPRIO_FP_UNAVAIL:
225 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
226 break;
227 case BOOK3S_IRQPRIO_SYSCALL:
228 vec = BOOK3S_INTERRUPT_SYSCALL;
229 break;
230 case BOOK3S_IRQPRIO_DEBUG:
231 vec = BOOK3S_INTERRUPT_TRACE;
232 break;
233 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
234 vec = BOOK3S_INTERRUPT_PERFMON;
235 break;
236 default:
237 deliver = 0;
238 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
239 break;
240 }
241
242#if 0
243 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
244#endif
245
246 if (deliver)
247 kvmppc_inject_interrupt(vcpu, vec, 0ULL);
248
249 return deliver;
250}
251
252void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
253{
254 unsigned long *pending = &vcpu->arch.pending_exceptions;
255 unsigned int priority;
256
257 /* XXX be more clever here - no need to mftb() on every entry */
258 /* Issue DEC again if it's still active */
259#ifdef AGGRESSIVE_DEC
260 if (vcpu->arch.msr & MSR_EE)
261 if (kvmppc_get_dec(vcpu) & 0x80000000)
262 kvmppc_core_queue_dec(vcpu);
263#endif
264
265#ifdef EXIT_DEBUG
266 if (vcpu->arch.pending_exceptions)
267 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
268#endif
269 priority = __ffs(*pending);
270 while (priority <= (sizeof(unsigned int) * 8)) {
271 if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) {
272 clear_bit(priority, &vcpu->arch.pending_exceptions);
273 break;
274 }
275
276 priority = find_next_bit(pending,
277 BITS_PER_BYTE * sizeof(*pending),
278 priority + 1);
279 }
280}
281
282void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
283{
284 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
285 vcpu->arch.pvr = pvr;
286 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
287 kvmppc_mmu_book3s_64_init(vcpu);
288 to_book3s(vcpu)->hior = 0xfff00000;
289 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
290 } else {
291 kvmppc_mmu_book3s_32_init(vcpu);
292 to_book3s(vcpu)->hior = 0;
293 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
294 }
295
296 /* If we are in hypervisor level on 970, we can tell the CPU to
297 * treat DCBZ as 32 bytes store */
298 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
299 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
300 !strcmp(cur_cpu_spec->platform, "ppc970"))
301 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
302
303}
304
305/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
306 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
307 * emulate 32 bytes dcbz length.
308 *
309 * The Book3s_64 inventors also realized this case and implemented a special bit
310 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
311 *
312 * My approach here is to patch the dcbz instruction on executing pages.
313 */
314static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
315{
316 bool touched = false;
317 hva_t hpage;
318 u32 *page;
319 int i;
320
321 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
322 if (kvm_is_error_hva(hpage))
323 return;
324
325 hpage |= pte->raddr & ~PAGE_MASK;
326 hpage &= ~0xFFFULL;
327
328 page = vmalloc(HW_PAGE_SIZE);
329
330 if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE))
331 goto out;
332
333 for (i=0; i < HW_PAGE_SIZE / 4; i++)
334 if ((page[i] & 0xff0007ff) == INS_DCBZ) {
335 page[i] &= 0xfffffff7; // reserved instruction, so we trap
336 touched = true;
337 }
338
339 if (touched)
340 copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE);
341
342out:
343 vfree(page);
344}
345
346static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
347 struct kvmppc_pte *pte)
348{
349 int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR));
350 int r;
351
352 if (relocated) {
353 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
354 } else {
355 pte->eaddr = eaddr;
356 pte->raddr = eaddr & 0xffffffff;
357 pte->vpage = eaddr >> 12;
358 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
359 case 0:
360 pte->vpage |= VSID_REAL;
361 case MSR_DR:
362 pte->vpage |= VSID_REAL_DR;
363 case MSR_IR:
364 pte->vpage |= VSID_REAL_IR;
365 }
366 pte->may_read = true;
367 pte->may_write = true;
368 pte->may_execute = true;
369 r = 0;
370 }
371
372 return r;
373}
374
375static hva_t kvmppc_bad_hva(void)
376{
377 return PAGE_OFFSET;
378}
379
380static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
381 bool read)
382{
383 hva_t hpage;
384
385 if (read && !pte->may_read)
386 goto err;
387
388 if (!read && !pte->may_write)
389 goto err;
390
391 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
392 if (kvm_is_error_hva(hpage))
393 goto err;
394
395 return hpage | (pte->raddr & ~PAGE_MASK);
396err:
397 return kvmppc_bad_hva();
398}
399
400int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr)
401{
402 struct kvmppc_pte pte;
403 hva_t hva = eaddr;
404
405 vcpu->stat.st++;
406
407 if (kvmppc_xlate(vcpu, eaddr, false, &pte))
408 goto err;
409
410 hva = kvmppc_pte_to_hva(vcpu, &pte, false);
411 if (kvm_is_error_hva(hva))
412 goto err;
413
414 if (copy_to_user((void __user *)hva, ptr, size)) {
415 printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva);
416 goto err;
417 }
418
419 return 0;
420
421err:
422 return -ENOENT;
423}
424
425int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr,
426 bool data)
427{
428 struct kvmppc_pte pte;
429 hva_t hva = eaddr;
430
431 vcpu->stat.ld++;
432
433 if (kvmppc_xlate(vcpu, eaddr, data, &pte))
434 goto err;
435
436 hva = kvmppc_pte_to_hva(vcpu, &pte, true);
437 if (kvm_is_error_hva(hva))
438 goto err;
439
440 if (copy_from_user(ptr, (void __user *)hva, size)) {
441 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
442 goto err;
443 }
444
445 return 0;
446
447err:
448 return -ENOENT;
449}
450
451static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
452{
453 return kvm_is_visible_gfn(vcpu->kvm, gfn);
454}
455
456int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
457 ulong eaddr, int vec)
458{
459 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
460 int r = RESUME_GUEST;
461 int relocated;
462 int page_found = 0;
463 struct kvmppc_pte pte;
464 bool is_mmio = false;
465
466 if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) {
467 relocated = (vcpu->arch.msr & MSR_DR);
468 } else {
469 relocated = (vcpu->arch.msr & MSR_IR);
470 }
471
472 /* Resolve real address if translation turned on */
473 if (relocated) {
474 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
475 } else {
476 pte.may_execute = true;
477 pte.may_read = true;
478 pte.may_write = true;
479 pte.raddr = eaddr & 0xffffffff;
480 pte.eaddr = eaddr;
481 pte.vpage = eaddr >> 12;
482 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
483 case 0:
484 pte.vpage |= VSID_REAL;
485 case MSR_DR:
486 pte.vpage |= VSID_REAL_DR;
487 case MSR_IR:
488 pte.vpage |= VSID_REAL_IR;
489 }
490 }
491
492 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
493 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
494 /*
495 * If we do the dcbz hack, we have to NX on every execution,
496 * so we can patch the executing code. This renders our guest
497 * NX-less.
498 */
499 pte.may_execute = !data;
500 }
501
502 if (page_found == -ENOENT) {
503 /* Page not found in guest PTE entries */
504 vcpu->arch.dear = vcpu->arch.fault_dear;
505 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
506 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
507 kvmppc_book3s_queue_irqprio(vcpu, vec);
508 } else if (page_found == -EPERM) {
509 /* Storage protection */
510 vcpu->arch.dear = vcpu->arch.fault_dear;
511 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
512 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
513 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
514 kvmppc_book3s_queue_irqprio(vcpu, vec);
515 } else if (page_found == -EINVAL) {
516 /* Page not found in guest SLB */
517 vcpu->arch.dear = vcpu->arch.fault_dear;
518 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
519 } else if (!is_mmio &&
520 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
521 /* The guest's PTE is not mapped yet. Map on the host */
522 kvmppc_mmu_map_page(vcpu, &pte);
523 if (data)
524 vcpu->stat.sp_storage++;
525 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
526 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
527 kvmppc_patch_dcbz(vcpu, &pte);
528 } else {
529 /* MMIO */
530 vcpu->stat.mmio_exits++;
531 vcpu->arch.paddr_accessed = pte.raddr;
532 r = kvmppc_emulate_mmio(run, vcpu);
533 if ( r == RESUME_HOST_NV )
534 r = RESUME_HOST;
535 if ( r == RESUME_GUEST_NV )
536 r = RESUME_GUEST;
537 }
538
539 return r;
540}
541
542int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
543 unsigned int exit_nr)
544{
545 int r = RESUME_HOST;
546
547 vcpu->stat.sum_exits++;
548
549 run->exit_reason = KVM_EXIT_UNKNOWN;
550 run->ready_for_interrupt_injection = 1;
551#ifdef EXIT_DEBUG
552 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
553 exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
554 kvmppc_get_dec(vcpu), vcpu->arch.msr);
555#elif defined (EXIT_DEBUG_SIMPLE)
556 if ((exit_nr != 0x900) && (exit_nr != 0x500))
557 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
558 exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
559 vcpu->arch.msr);
560#endif
561 kvm_resched(vcpu);
562 switch (exit_nr) {
563 case BOOK3S_INTERRUPT_INST_STORAGE:
564 vcpu->stat.pf_instruc++;
565 /* only care about PTEG not found errors, but leave NX alone */
566 if (vcpu->arch.shadow_msr & 0x40000000) {
567 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
568 vcpu->stat.sp_instruc++;
569 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
570 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
571 /*
572 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
573 * so we can't use the NX bit inside the guest. Let's cross our fingers,
574 * that no guest that needs the dcbz hack does NX.
575 */
576 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
577 } else {
578 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000);
579 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
580 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
581 r = RESUME_GUEST;
582 }
583 break;
584 case BOOK3S_INTERRUPT_DATA_STORAGE:
585 vcpu->stat.pf_storage++;
586 /* The only case we need to handle is missing shadow PTEs */
587 if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) {
588 r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr);
589 } else {
590 vcpu->arch.dear = vcpu->arch.fault_dear;
591 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
592 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
593 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
594 r = RESUME_GUEST;
595 }
596 break;
597 case BOOK3S_INTERRUPT_DATA_SEGMENT:
598 if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) {
599 vcpu->arch.dear = vcpu->arch.fault_dear;
600 kvmppc_book3s_queue_irqprio(vcpu,
601 BOOK3S_INTERRUPT_DATA_SEGMENT);
602 }
603 r = RESUME_GUEST;
604 break;
605 case BOOK3S_INTERRUPT_INST_SEGMENT:
606 if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) {
607 kvmppc_book3s_queue_irqprio(vcpu,
608 BOOK3S_INTERRUPT_INST_SEGMENT);
609 }
610 r = RESUME_GUEST;
611 break;
612 /* We're good on these - the host merely wanted to get our attention */
613 case BOOK3S_INTERRUPT_DECREMENTER:
614 vcpu->stat.dec_exits++;
615 r = RESUME_GUEST;
616 break;
617 case BOOK3S_INTERRUPT_EXTERNAL:
618 vcpu->stat.ext_intr_exits++;
619 r = RESUME_GUEST;
620 break;
621 case BOOK3S_INTERRUPT_PROGRAM:
622 {
623 enum emulation_result er;
624
625 if (vcpu->arch.msr & MSR_PR) {
626#ifdef EXIT_DEBUG
627 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst);
628#endif
629 if ((vcpu->arch.last_inst & 0xff0007ff) !=
630 (INS_DCBZ & 0xfffffff7)) {
631 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
632 r = RESUME_GUEST;
633 break;
634 }
635 }
636
637 vcpu->stat.emulated_inst_exits++;
638 er = kvmppc_emulate_instruction(run, vcpu);
639 switch (er) {
640 case EMULATE_DONE:
641 r = RESUME_GUEST;
642 break;
643 case EMULATE_FAIL:
644 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
645 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
646 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
647 r = RESUME_GUEST;
648 break;
649 default:
650 BUG();
651 }
652 break;
653 }
654 case BOOK3S_INTERRUPT_SYSCALL:
655#ifdef EXIT_DEBUG
656 printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]);
657#endif
658 vcpu->stat.syscall_exits++;
659 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
660 r = RESUME_GUEST;
661 break;
662 case BOOK3S_INTERRUPT_MACHINE_CHECK:
663 case BOOK3S_INTERRUPT_FP_UNAVAIL:
664 case BOOK3S_INTERRUPT_TRACE:
665 case BOOK3S_INTERRUPT_ALTIVEC:
666 case BOOK3S_INTERRUPT_VSX:
667 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
668 r = RESUME_GUEST;
669 break;
670 default:
671 /* Ugh - bork here! What did we get? */
672 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr);
673 r = RESUME_HOST;
674 BUG();
675 break;
676 }
677
678
679 if (!(r & RESUME_HOST)) {
680 /* To avoid clobbering exit_reason, only check for signals if
681 * we aren't already exiting to userspace for some other
682 * reason. */
683 if (signal_pending(current)) {
684#ifdef EXIT_DEBUG
685 printk(KERN_EMERG "KVM: Going back to host\n");
686#endif
687 vcpu->stat.signal_exits++;
688 run->exit_reason = KVM_EXIT_INTR;
689 r = -EINTR;
690 } else {
691 /* In case an interrupt came in that was triggered
692 * from userspace (like DEC), we need to check what
693 * to inject now! */
694 kvmppc_core_deliver_interrupts(vcpu);
695 }
696 }
697
698#ifdef EXIT_DEBUG
699 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r);
700#endif
701
702 return r;
703}
704
705int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
706{
707 return 0;
708}
709
710int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
711{
712 int i;
713
714 regs->pc = vcpu->arch.pc;
715 regs->cr = vcpu->arch.cr;
716 regs->ctr = vcpu->arch.ctr;
717 regs->lr = vcpu->arch.lr;
718 regs->xer = vcpu->arch.xer;
719 regs->msr = vcpu->arch.msr;
720 regs->srr0 = vcpu->arch.srr0;
721 regs->srr1 = vcpu->arch.srr1;
722 regs->pid = vcpu->arch.pid;
723 regs->sprg0 = vcpu->arch.sprg0;
724 regs->sprg1 = vcpu->arch.sprg1;
725 regs->sprg2 = vcpu->arch.sprg2;
726 regs->sprg3 = vcpu->arch.sprg3;
727 regs->sprg5 = vcpu->arch.sprg4;
728 regs->sprg6 = vcpu->arch.sprg5;
729 regs->sprg7 = vcpu->arch.sprg6;
730
731 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
732 regs->gpr[i] = vcpu->arch.gpr[i];
733
734 return 0;
735}
736
737int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
738{
739 int i;
740
741 vcpu->arch.pc = regs->pc;
742 vcpu->arch.cr = regs->cr;
743 vcpu->arch.ctr = regs->ctr;
744 vcpu->arch.lr = regs->lr;
745 vcpu->arch.xer = regs->xer;
746 kvmppc_set_msr(vcpu, regs->msr);
747 vcpu->arch.srr0 = regs->srr0;
748 vcpu->arch.srr1 = regs->srr1;
749 vcpu->arch.sprg0 = regs->sprg0;
750 vcpu->arch.sprg1 = regs->sprg1;
751 vcpu->arch.sprg2 = regs->sprg2;
752 vcpu->arch.sprg3 = regs->sprg3;
753 vcpu->arch.sprg5 = regs->sprg4;
754 vcpu->arch.sprg6 = regs->sprg5;
755 vcpu->arch.sprg7 = regs->sprg6;
756
757 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
758 vcpu->arch.gpr[i] = regs->gpr[i];
759
760 return 0;
761}
762
763int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
764 struct kvm_sregs *sregs)
765{
766 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
767 int i;
768
769 sregs->pvr = vcpu->arch.pvr;
770
771 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
772 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
773 for (i = 0; i < 64; i++) {
774 sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
775 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
776 }
777 } else {
778 for (i = 0; i < 16; i++) {
779 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
780 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
781 }
782 for (i = 0; i < 8; i++) {
783 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
784 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
785 }
786 }
787 return 0;
788}
789
790int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
791 struct kvm_sregs *sregs)
792{
793 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
794 int i;
795
796 kvmppc_set_pvr(vcpu, sregs->pvr);
797
798 vcpu3s->sdr1 = sregs->u.s.sdr1;
799 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
800 for (i = 0; i < 64; i++) {
801 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
802 sregs->u.s.ppc64.slb[i].slbe);
803 }
804 } else {
805 for (i = 0; i < 16; i++) {
806 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
807 }
808 for (i = 0; i < 8; i++) {
809 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
810 (u32)sregs->u.s.ppc32.ibat[i]);
811 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
812 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
813 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
814 (u32)sregs->u.s.ppc32.dbat[i]);
815 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
816 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
817 }
818 }
819
820 /* Flush the MMU after messing with the segments */
821 kvmppc_mmu_pte_flush(vcpu, 0, 0);
822 return 0;
823}
824
825int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
826{
827 return -ENOTSUPP;
828}
829
830int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
831{
832 return -ENOTSUPP;
833}
834
835int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
836 struct kvm_translation *tr)
837{
838 return 0;
839}
840
841/*
842 * Get (and clear) the dirty memory log for a memory slot.
843 */
844int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
845 struct kvm_dirty_log *log)
846{
847 struct kvm_memory_slot *memslot;
848 struct kvm_vcpu *vcpu;
849 ulong ga, ga_end;
850 int is_dirty = 0;
851 int r, n;
852
853 down_write(&kvm->slots_lock);
854
855 r = kvm_get_dirty_log(kvm, log, &is_dirty);
856 if (r)
857 goto out;
858
859 /* If nothing is dirty, don't bother messing with page tables. */
860 if (is_dirty) {
861 memslot = &kvm->memslots[log->slot];
862
863 ga = memslot->base_gfn << PAGE_SHIFT;
864 ga_end = ga + (memslot->npages << PAGE_SHIFT);
865
866 kvm_for_each_vcpu(n, vcpu, kvm)
867 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
868
869 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
870 memset(memslot->dirty_bitmap, 0, n);
871 }
872
873 r = 0;
874out:
875 up_write(&kvm->slots_lock);
876 return r;
877}
878
879int kvmppc_core_check_processor_compat(void)
880{
881 return 0;
882}
883
884struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
885{
886 struct kvmppc_vcpu_book3s *vcpu_book3s;
887 struct kvm_vcpu *vcpu;
888 int err;
889
890 vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO,
891 get_order(sizeof(struct kvmppc_vcpu_book3s)));
892 if (!vcpu_book3s) {
893 err = -ENOMEM;
894 goto out;
895 }
896
897 vcpu = &vcpu_book3s->vcpu;
898 err = kvm_vcpu_init(vcpu, kvm, id);
899 if (err)
900 goto free_vcpu;
901
902 vcpu->arch.host_retip = kvm_return_point;
903 vcpu->arch.host_msr = mfmsr();
904 /* default to book3s_64 (970fx) */
905 vcpu->arch.pvr = 0x3C0301;
906 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
907 vcpu_book3s->slb_nr = 64;
908
909 /* remember where some real-mode handlers are */
910 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
911 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
912 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
913
914 vcpu->arch.shadow_msr = MSR_USER64;
915
916 err = __init_new_context();
917 if (err < 0)
918 goto free_vcpu;
919 vcpu_book3s->context_id = err;
920
921 vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1;
922 vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS;
923 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
924
925 return vcpu;
926
927free_vcpu:
928 free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
929out:
930 return ERR_PTR(err);
931}
932
933void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
934{
935 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
936
937 __destroy_context(vcpu_book3s->context_id);
938 kvm_vcpu_uninit(vcpu);
939 free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
940}
941
942extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
943int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
944{
945 int ret;
946
947 /* No need to go into the guest when all we do is going out */
948 if (signal_pending(current)) {
949 kvm_run->exit_reason = KVM_EXIT_INTR;
950 return -EINTR;
951 }
952
953 /* XXX we get called with irq disabled - change that! */
954 local_irq_enable();
955
956 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
957
958 local_irq_disable();
959
960 return ret;
961}
962
963static int kvmppc_book3s_init(void)
964{
965 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE);
966}
967
968static void kvmppc_book3s_exit(void)
969{
970 kvm_exit();
971}
972
973module_init(kvmppc_book3s_init);
974module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
new file mode 100644
index 000000000000..faf99f20d993
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -0,0 +1,372 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/highmem.h>
25
26#include <asm/tlbflush.h>
27#include <asm/kvm_ppc.h>
28#include <asm/kvm_book3s.h>
29
30/* #define DEBUG_MMU */
31/* #define DEBUG_MMU_PTE */
32/* #define DEBUG_MMU_PTE_IP 0xfff14c40 */
33
34#ifdef DEBUG_MMU
35#define dprintk(X...) printk(KERN_INFO X)
36#else
37#define dprintk(X...) do { } while(0)
38#endif
39
40#ifdef DEBUG_PTE
41#define dprintk_pte(X...) printk(KERN_INFO X)
42#else
43#define dprintk_pte(X...) do { } while(0)
44#endif
45
46#define PTEG_FLAG_ACCESSED 0x00000100
47#define PTEG_FLAG_DIRTY 0x00000080
48
49static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
50{
51#ifdef DEBUG_MMU_PTE_IP
52 return vcpu->arch.pc == DEBUG_MMU_PTE_IP;
53#else
54 return true;
55#endif
56}
57
58static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
59 struct kvmppc_pte *pte, bool data);
60
61static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
62{
63 return &vcpu_book3s->sr[(eaddr >> 28) & 0xf];
64}
65
66static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
67 bool data)
68{
69 struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr);
70 struct kvmppc_pte pte;
71
72 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
73 return pte.vpage;
74
75 return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16);
76}
77
78static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
79{
80 kvmppc_set_msr(vcpu, 0);
81}
82
83static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s,
84 struct kvmppc_sr *sre, gva_t eaddr,
85 bool primary)
86{
87 u32 page, hash, pteg, htabmask;
88 hva_t r;
89
90 page = (eaddr & 0x0FFFFFFF) >> 12;
91 htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0;
92
93 hash = ((sre->vsid ^ page) << 6);
94 if (!primary)
95 hash = ~hash;
96 hash &= htabmask;
97
98 pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash;
99
100 dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n",
101 vcpu_book3s->vcpu.arch.pc, eaddr, vcpu_book3s->sdr1, pteg,
102 sre->vsid);
103
104 r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
105 if (kvm_is_error_hva(r))
106 return r;
107 return r | (pteg & ~PAGE_MASK);
108}
109
110static u32 kvmppc_mmu_book3s_32_get_ptem(struct kvmppc_sr *sre, gva_t eaddr,
111 bool primary)
112{
113 return ((eaddr & 0x0fffffff) >> 22) | (sre->vsid << 7) |
114 (primary ? 0 : 0x40) | 0x80000000;
115}
116
117static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
118 struct kvmppc_pte *pte, bool data)
119{
120 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
121 struct kvmppc_bat *bat;
122 int i;
123
124 for (i = 0; i < 8; i++) {
125 if (data)
126 bat = &vcpu_book3s->dbat[i];
127 else
128 bat = &vcpu_book3s->ibat[i];
129
130 if (vcpu->arch.msr & MSR_PR) {
131 if (!bat->vp)
132 continue;
133 } else {
134 if (!bat->vs)
135 continue;
136 }
137
138 if (check_debug_ip(vcpu))
139 {
140 dprintk_pte("%cBAT %02d: 0x%lx - 0x%x (0x%x)\n",
141 data ? 'd' : 'i', i, eaddr, bat->bepi,
142 bat->bepi_mask);
143 }
144 if ((eaddr & bat->bepi_mask) == bat->bepi) {
145 pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
146 pte->vpage = (eaddr >> 12) | VSID_BAT;
147 pte->may_read = bat->pp;
148 pte->may_write = bat->pp > 1;
149 pte->may_execute = true;
150 if (!pte->may_read) {
151 printk(KERN_INFO "BAT is not readable!\n");
152 continue;
153 }
154 if (!pte->may_write) {
155 /* let's treat r/o BATs as not-readable for now */
156 dprintk_pte("BAT is read-only!\n");
157 continue;
158 }
159
160 return 0;
161 }
162 }
163
164 return -ENOENT;
165}
166
167static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
168 struct kvmppc_pte *pte, bool data,
169 bool primary)
170{
171 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
172 struct kvmppc_sr *sre;
173 hva_t ptegp;
174 u32 pteg[16];
175 u64 ptem = 0;
176 int i;
177 int found = 0;
178
179 sre = find_sr(vcpu_book3s, eaddr);
180
181 dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28,
182 sre->vsid, sre->raw);
183
184 pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
185
186 ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary);
187 if (kvm_is_error_hva(ptegp)) {
188 printk(KERN_INFO "KVM: Invalid PTEG!\n");
189 goto no_page_found;
190 }
191
192 ptem = kvmppc_mmu_book3s_32_get_ptem(sre, eaddr, primary);
193
194 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
195 printk(KERN_ERR "KVM: Can't copy data from 0x%lx!\n", ptegp);
196 goto no_page_found;
197 }
198
199 for (i=0; i<16; i+=2) {
200 if (ptem == pteg[i]) {
201 u8 pp;
202
203 pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF);
204 pp = pteg[i+1] & 3;
205
206 if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) ||
207 (sre->Ks && !(vcpu->arch.msr & MSR_PR)))
208 pp |= 4;
209
210 pte->may_write = false;
211 pte->may_read = false;
212 pte->may_execute = true;
213 switch (pp) {
214 case 0:
215 case 1:
216 case 2:
217 case 6:
218 pte->may_write = true;
219 case 3:
220 case 5:
221 case 7:
222 pte->may_read = true;
223 break;
224 }
225
226 if ( !pte->may_read )
227 continue;
228
229 dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
230 pteg[i], pteg[i+1], pp);
231 found = 1;
232 break;
233 }
234 }
235
236 /* Update PTE C and A bits, so the guest's swapper knows we used the
237 page */
238 if (found) {
239 u32 oldpte = pteg[i+1];
240
241 if (pte->may_read)
242 pteg[i+1] |= PTEG_FLAG_ACCESSED;
243 if (pte->may_write)
244 pteg[i+1] |= PTEG_FLAG_DIRTY;
245 else
246 dprintk_pte("KVM: Mapping read-only page!\n");
247
248 /* Write back into the PTEG */
249 if (pteg[i+1] != oldpte)
250 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
251
252 return 0;
253 }
254
255no_page_found:
256
257 if (check_debug_ip(vcpu)) {
258 dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n",
259 to_book3s(vcpu)->sdr1, ptegp);
260 for (i=0; i<16; i+=2) {
261 dprintk_pte(" %02d: 0x%x - 0x%x (0x%llx)\n",
262 i, pteg[i], pteg[i+1], ptem);
263 }
264 }
265
266 return -ENOENT;
267}
268
269static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
270 struct kvmppc_pte *pte, bool data)
271{
272 int r;
273
274 pte->eaddr = eaddr;
275 r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
276 if (r < 0)
277 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
278 if (r < 0)
279 r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false);
280
281 return r;
282}
283
284
285static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum)
286{
287 return to_book3s(vcpu)->sr[srnum].raw;
288}
289
290static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
291 ulong value)
292{
293 struct kvmppc_sr *sre;
294
295 sre = &to_book3s(vcpu)->sr[srnum];
296
297 /* Flush any left-over shadows from the previous SR */
298
299 /* XXX Not necessary? */
300 /* kvmppc_mmu_pte_flush(vcpu, ((u64)sre->vsid) << 28, 0xf0000000ULL); */
301
302 /* And then put in the new SR */
303 sre->raw = value;
304 sre->vsid = (value & 0x0fffffff);
305 sre->Ks = (value & 0x40000000) ? true : false;
306 sre->Kp = (value & 0x20000000) ? true : false;
307 sre->nx = (value & 0x10000000) ? true : false;
308
309 /* Map the new segment */
310 kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT);
311}
312
313static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
314{
315 kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL);
316}
317
318static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
319 u64 *vsid)
320{
321 /* In case we only have one of MSR_IR or MSR_DR set, let's put
322 that in the real-mode context (and hope RM doesn't access
323 high memory) */
324 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
325 case 0:
326 *vsid = (VSID_REAL >> 16) | esid;
327 break;
328 case MSR_IR:
329 *vsid = (VSID_REAL_IR >> 16) | esid;
330 break;
331 case MSR_DR:
332 *vsid = (VSID_REAL_DR >> 16) | esid;
333 break;
334 case MSR_DR|MSR_IR:
335 {
336 ulong ea;
337 ea = esid << SID_SHIFT;
338 *vsid = find_sr(to_book3s(vcpu), ea)->vsid;
339 break;
340 }
341 default:
342 BUG();
343 }
344
345 return 0;
346}
347
348static bool kvmppc_mmu_book3s_32_is_dcbz32(struct kvm_vcpu *vcpu)
349{
350 return true;
351}
352
353
354void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu)
355{
356 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
357
358 mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin;
359 mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin;
360 mmu->xlate = kvmppc_mmu_book3s_32_xlate;
361 mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr;
362 mmu->tlbie = kvmppc_mmu_book3s_32_tlbie;
363 mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid;
364 mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp;
365 mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32;
366
367 mmu->slbmte = NULL;
368 mmu->slbmfee = NULL;
369 mmu->slbmfev = NULL;
370 mmu->slbie = NULL;
371 mmu->slbia = NULL;
372}
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
new file mode 100644
index 000000000000..1027eac6d474
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_emulate.c
@@ -0,0 +1,345 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/kvm_ppc.h>
21#include <asm/disassemble.h>
22#include <asm/kvm_book3s.h>
23#include <asm/reg.h>
24
25#define OP_19_XOP_RFID 18
26#define OP_19_XOP_RFI 50
27
28#define OP_31_XOP_MFMSR 83
29#define OP_31_XOP_MTMSR 146
30#define OP_31_XOP_MTMSRD 178
31#define OP_31_XOP_MTSRIN 242
32#define OP_31_XOP_TLBIEL 274
33#define OP_31_XOP_TLBIE 306
34#define OP_31_XOP_SLBMTE 402
35#define OP_31_XOP_SLBIE 434
36#define OP_31_XOP_SLBIA 498
37#define OP_31_XOP_MFSRIN 659
38#define OP_31_XOP_SLBMFEV 851
39#define OP_31_XOP_EIOIO 854
40#define OP_31_XOP_SLBMFEE 915
41
42/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
43#define OP_31_XOP_DCBZ 1010
44
45int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
46 unsigned int inst, int *advance)
47{
48 int emulated = EMULATE_DONE;
49
50 switch (get_op(inst)) {
51 case 19:
52 switch (get_xop(inst)) {
53 case OP_19_XOP_RFID:
54 case OP_19_XOP_RFI:
55 vcpu->arch.pc = vcpu->arch.srr0;
56 kvmppc_set_msr(vcpu, vcpu->arch.srr1);
57 *advance = 0;
58 break;
59
60 default:
61 emulated = EMULATE_FAIL;
62 break;
63 }
64 break;
65 case 31:
66 switch (get_xop(inst)) {
67 case OP_31_XOP_MFMSR:
68 vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr;
69 break;
70 case OP_31_XOP_MTMSRD:
71 {
72 ulong rs = vcpu->arch.gpr[get_rs(inst)];
73 if (inst & 0x10000) {
74 vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
75 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
76 } else
77 kvmppc_set_msr(vcpu, rs);
78 break;
79 }
80 case OP_31_XOP_MTMSR:
81 kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]);
82 break;
83 case OP_31_XOP_MFSRIN:
84 {
85 int srnum;
86
87 srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf;
88 if (vcpu->arch.mmu.mfsrin) {
89 u32 sr;
90 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
91 vcpu->arch.gpr[get_rt(inst)] = sr;
92 }
93 break;
94 }
95 case OP_31_XOP_MTSRIN:
96 vcpu->arch.mmu.mtsrin(vcpu,
97 (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf,
98 vcpu->arch.gpr[get_rs(inst)]);
99 break;
100 case OP_31_XOP_TLBIE:
101 case OP_31_XOP_TLBIEL:
102 {
103 bool large = (inst & 0x00200000) ? true : false;
104 ulong addr = vcpu->arch.gpr[get_rb(inst)];
105 vcpu->arch.mmu.tlbie(vcpu, addr, large);
106 break;
107 }
108 case OP_31_XOP_EIOIO:
109 break;
110 case OP_31_XOP_SLBMTE:
111 if (!vcpu->arch.mmu.slbmte)
112 return EMULATE_FAIL;
113
114 vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)],
115 vcpu->arch.gpr[get_rb(inst)]);
116 break;
117 case OP_31_XOP_SLBIE:
118 if (!vcpu->arch.mmu.slbie)
119 return EMULATE_FAIL;
120
121 vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]);
122 break;
123 case OP_31_XOP_SLBIA:
124 if (!vcpu->arch.mmu.slbia)
125 return EMULATE_FAIL;
126
127 vcpu->arch.mmu.slbia(vcpu);
128 break;
129 case OP_31_XOP_SLBMFEE:
130 if (!vcpu->arch.mmu.slbmfee) {
131 emulated = EMULATE_FAIL;
132 } else {
133 ulong t, rb;
134
135 rb = vcpu->arch.gpr[get_rb(inst)];
136 t = vcpu->arch.mmu.slbmfee(vcpu, rb);
137 vcpu->arch.gpr[get_rt(inst)] = t;
138 }
139 break;
140 case OP_31_XOP_SLBMFEV:
141 if (!vcpu->arch.mmu.slbmfev) {
142 emulated = EMULATE_FAIL;
143 } else {
144 ulong t, rb;
145
146 rb = vcpu->arch.gpr[get_rb(inst)];
147 t = vcpu->arch.mmu.slbmfev(vcpu, rb);
148 vcpu->arch.gpr[get_rt(inst)] = t;
149 }
150 break;
151 case OP_31_XOP_DCBZ:
152 {
153 ulong rb = vcpu->arch.gpr[get_rb(inst)];
154 ulong ra = 0;
155 ulong addr;
156 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
157
158 if (get_ra(inst))
159 ra = vcpu->arch.gpr[get_ra(inst)];
160
161 addr = (ra + rb) & ~31ULL;
162 if (!(vcpu->arch.msr & MSR_SF))
163 addr &= 0xffffffff;
164
165 if (kvmppc_st(vcpu, addr, 32, zeros)) {
166 vcpu->arch.dear = addr;
167 vcpu->arch.fault_dear = addr;
168 to_book3s(vcpu)->dsisr = DSISR_PROTFAULT |
169 DSISR_ISSTORE;
170 kvmppc_book3s_queue_irqprio(vcpu,
171 BOOK3S_INTERRUPT_DATA_STORAGE);
172 kvmppc_mmu_pte_flush(vcpu, addr, ~0xFFFULL);
173 }
174
175 break;
176 }
177 default:
178 emulated = EMULATE_FAIL;
179 }
180 break;
181 default:
182 emulated = EMULATE_FAIL;
183 }
184
185 return emulated;
186}
187
188void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
189 u32 val)
190{
191 if (upper) {
192 /* Upper BAT */
193 u32 bl = (val >> 2) & 0x7ff;
194 bat->bepi_mask = (~bl << 17);
195 bat->bepi = val & 0xfffe0000;
196 bat->vs = (val & 2) ? 1 : 0;
197 bat->vp = (val & 1) ? 1 : 0;
198 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
199 } else {
200 /* Lower BAT */
201 bat->brpn = val & 0xfffe0000;
202 bat->wimg = (val >> 3) & 0xf;
203 bat->pp = val & 3;
204 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
205 }
206}
207
208static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
209{
210 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
211 struct kvmppc_bat *bat;
212
213 switch (sprn) {
214 case SPRN_IBAT0U ... SPRN_IBAT3L:
215 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
216 break;
217 case SPRN_IBAT4U ... SPRN_IBAT7L:
218 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT4U) / 2];
219 break;
220 case SPRN_DBAT0U ... SPRN_DBAT3L:
221 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
222 break;
223 case SPRN_DBAT4U ... SPRN_DBAT7L:
224 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT4U) / 2];
225 break;
226 default:
227 BUG();
228 }
229
230 kvmppc_set_bat(vcpu, bat, !(sprn % 2), val);
231}
232
233int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
234{
235 int emulated = EMULATE_DONE;
236
237 switch (sprn) {
238 case SPRN_SDR1:
239 to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs];
240 break;
241 case SPRN_DSISR:
242 to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs];
243 break;
244 case SPRN_DAR:
245 vcpu->arch.dear = vcpu->arch.gpr[rs];
246 break;
247 case SPRN_HIOR:
248 to_book3s(vcpu)->hior = vcpu->arch.gpr[rs];
249 break;
250 case SPRN_IBAT0U ... SPRN_IBAT3L:
251 case SPRN_IBAT4U ... SPRN_IBAT7L:
252 case SPRN_DBAT0U ... SPRN_DBAT3L:
253 case SPRN_DBAT4U ... SPRN_DBAT7L:
254 kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]);
255 /* BAT writes happen so rarely that we're ok to flush
256 * everything here */
257 kvmppc_mmu_pte_flush(vcpu, 0, 0);
258 break;
259 case SPRN_HID0:
260 to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs];
261 break;
262 case SPRN_HID1:
263 to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs];
264 break;
265 case SPRN_HID2:
266 to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs];
267 break;
268 case SPRN_HID4:
269 to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs];
270 break;
271 case SPRN_HID5:
272 to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs];
273 /* guest HID5 set can change is_dcbz32 */
274 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
275 (mfmsr() & MSR_HV))
276 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
277 break;
278 case SPRN_ICTC:
279 case SPRN_THRM1:
280 case SPRN_THRM2:
281 case SPRN_THRM3:
282 case SPRN_CTRLF:
283 case SPRN_CTRLT:
284 break;
285 default:
286 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
287#ifndef DEBUG_SPR
288 emulated = EMULATE_FAIL;
289#endif
290 break;
291 }
292
293 return emulated;
294}
295
296int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
297{
298 int emulated = EMULATE_DONE;
299
300 switch (sprn) {
301 case SPRN_SDR1:
302 vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1;
303 break;
304 case SPRN_DSISR:
305 vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr;
306 break;
307 case SPRN_DAR:
308 vcpu->arch.gpr[rt] = vcpu->arch.dear;
309 break;
310 case SPRN_HIOR:
311 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior;
312 break;
313 case SPRN_HID0:
314 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0];
315 break;
316 case SPRN_HID1:
317 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1];
318 break;
319 case SPRN_HID2:
320 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2];
321 break;
322 case SPRN_HID4:
323 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4];
324 break;
325 case SPRN_HID5:
326 vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5];
327 break;
328 case SPRN_THRM1:
329 case SPRN_THRM2:
330 case SPRN_THRM3:
331 case SPRN_CTRLF:
332 case SPRN_CTRLT:
333 vcpu->arch.gpr[rt] = 0;
334 break;
335 default:
336 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
337#ifndef DEBUG_SPR
338 emulated = EMULATE_FAIL;
339#endif
340 break;
341 }
342
343 return emulated;
344}
345
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c
new file mode 100644
index 000000000000..5b2db38ed86c
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_exports.c
@@ -0,0 +1,24 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <linux/module.h>
21#include <asm/kvm_book3s.h>
22
23EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
24EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S
new file mode 100644
index 000000000000..7b55d8094c8b
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_interrupts.S
@@ -0,0 +1,392 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
25#include <asm/exception-64s.h>
26
27#define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit
28#define ULONG_SIZE 8
29#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
30
31.macro mfpaca tmp_reg, src_reg, offset, vcpu_reg
32 ld \tmp_reg, (PACA_EXMC+\offset)(r13)
33 std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg)
34.endm
35
36.macro DISABLE_INTERRUPTS
37 mfmsr r0
38 rldicl r0,r0,48,1
39 rotldi r0,r0,16
40 mtmsrd r0,1
41.endm
42
43/*****************************************************************************
44 * *
45 * Guest entry / exit code that is in kernel module memory (highmem) *
46 * *
47 ****************************************************************************/
48
49/* Registers:
50 * r3: kvm_run pointer
51 * r4: vcpu pointer
52 */
53_GLOBAL(__kvmppc_vcpu_entry)
54
55kvm_start_entry:
56 /* Write correct stack frame */
57 mflr r0
58 std r0,16(r1)
59
60 /* Save host state to the stack */
61 stdu r1, -SWITCH_FRAME_SIZE(r1)
62
63 /* Save r3 (kvm_run) and r4 (vcpu) */
64 SAVE_2GPRS(3, r1)
65
66 /* Save non-volatile registers (r14 - r31) */
67 SAVE_NVGPRS(r1)
68
69 /* Save LR */
70 mflr r14
71 std r14, _LINK(r1)
72
73/* XXX optimize non-volatile loading away */
74kvm_start_lightweight:
75
76 DISABLE_INTERRUPTS
77
78 /* Save R1/R2 in the PACA */
79 std r1, PACAR1(r13)
80 std r2, (PACA_EXMC+EX_SRR0)(r13)
81 ld r3, VCPU_HIGHMEM_HANDLER(r4)
82 std r3, PACASAVEDMSR(r13)
83
84 /* Load non-volatile guest state from the vcpu */
85 ld r14, VCPU_GPR(r14)(r4)
86 ld r15, VCPU_GPR(r15)(r4)
87 ld r16, VCPU_GPR(r16)(r4)
88 ld r17, VCPU_GPR(r17)(r4)
89 ld r18, VCPU_GPR(r18)(r4)
90 ld r19, VCPU_GPR(r19)(r4)
91 ld r20, VCPU_GPR(r20)(r4)
92 ld r21, VCPU_GPR(r21)(r4)
93 ld r22, VCPU_GPR(r22)(r4)
94 ld r23, VCPU_GPR(r23)(r4)
95 ld r24, VCPU_GPR(r24)(r4)
96 ld r25, VCPU_GPR(r25)(r4)
97 ld r26, VCPU_GPR(r26)(r4)
98 ld r27, VCPU_GPR(r27)(r4)
99 ld r28, VCPU_GPR(r28)(r4)
100 ld r29, VCPU_GPR(r29)(r4)
101 ld r30, VCPU_GPR(r30)(r4)
102 ld r31, VCPU_GPR(r31)(r4)
103
104 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
105 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
106
107 ld r3, VCPU_TRAMPOLINE_ENTER(r4)
108 mtsrr0 r3
109
110 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
111 mtsrr1 r3
112
113 /* Load guest state in the respective registers */
114 lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */
115 stw r3, (PACA_EXMC + EX_CCR)(r13)
116
117 ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
118 mtctr r3 /* CTR = r3 */
119
120 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
121 mtlr r3 /* LR = r3 */
122
123 ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */
124 std r3, (PACA_EXMC + EX_R3)(r13)
125
126 /* Some guests may need to have dcbz set to 32 byte length.
127 *
128 * Usually we ensure that by patching the guest's instructions
129 * to trap on dcbz and emulate it in the hypervisor.
130 *
131 * If we can, we should tell the CPU to use 32 byte dcbz though,
132 * because that's a lot faster.
133 */
134
135 ld r3, VCPU_HFLAGS(r4)
136 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */
137 beq no_dcbz32_on
138
139 mfspr r3,SPRN_HID5
140 ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */
141 mtspr SPRN_HID5,r3
142
143no_dcbz32_on:
144 /* Load guest GPRs */
145
146 ld r3, VCPU_GPR(r9)(r4)
147 std r3, (PACA_EXMC + EX_R9)(r13)
148 ld r3, VCPU_GPR(r10)(r4)
149 std r3, (PACA_EXMC + EX_R10)(r13)
150 ld r3, VCPU_GPR(r11)(r4)
151 std r3, (PACA_EXMC + EX_R11)(r13)
152 ld r3, VCPU_GPR(r12)(r4)
153 std r3, (PACA_EXMC + EX_R12)(r13)
154 ld r3, VCPU_GPR(r13)(r4)
155 std r3, (PACA_EXMC + EX_R13)(r13)
156
157 ld r0, VCPU_GPR(r0)(r4)
158 ld r1, VCPU_GPR(r1)(r4)
159 ld r2, VCPU_GPR(r2)(r4)
160 ld r3, VCPU_GPR(r3)(r4)
161 ld r5, VCPU_GPR(r5)(r4)
162 ld r6, VCPU_GPR(r6)(r4)
163 ld r7, VCPU_GPR(r7)(r4)
164 ld r8, VCPU_GPR(r8)(r4)
165 ld r4, VCPU_GPR(r4)(r4)
166
167 /* This sets the Magic value for the trampoline */
168
169 li r11, 1
170 stb r11, PACA_KVM_IN_GUEST(r13)
171
172 /* Jump to SLB patching handlder and into our guest */
173 RFI
174
175/*
176 * This is the handler in module memory. It gets jumped at from the
177 * lowmem trampoline code, so it's basically the guest exit code.
178 *
179 */
180
181.global kvmppc_handler_highmem
182kvmppc_handler_highmem:
183
184 /*
185 * Register usage at this point:
186 *
187 * R00 = guest R13
188 * R01 = host R1
189 * R02 = host R2
190 * R10 = guest PC
191 * R11 = guest MSR
192 * R12 = exit handler id
193 * R13 = PACA
194 * PACA.exmc.R9 = guest R1
195 * PACA.exmc.R10 = guest R10
196 * PACA.exmc.R11 = guest R11
197 * PACA.exmc.R12 = guest R12
198 * PACA.exmc.R13 = guest R2
199 * PACA.exmc.DAR = guest DAR
200 * PACA.exmc.DSISR = guest DSISR
201 * PACA.exmc.LR = guest instruction
202 * PACA.exmc.CCR = guest CR
203 * PACA.exmc.SRR0 = guest R0
204 *
205 */
206
207 std r3, (PACA_EXMC+EX_R3)(r13)
208
209 /* save the exit id in R3 */
210 mr r3, r12
211
212 /* R12 = vcpu */
213 ld r12, GPR4(r1)
214
215 /* Now save the guest state */
216
217 std r0, VCPU_GPR(r13)(r12)
218 std r4, VCPU_GPR(r4)(r12)
219 std r5, VCPU_GPR(r5)(r12)
220 std r6, VCPU_GPR(r6)(r12)
221 std r7, VCPU_GPR(r7)(r12)
222 std r8, VCPU_GPR(r8)(r12)
223 std r9, VCPU_GPR(r9)(r12)
224
225 /* get registers from PACA */
226 mfpaca r5, r0, EX_SRR0, r12
227 mfpaca r5, r3, EX_R3, r12
228 mfpaca r5, r1, EX_R9, r12
229 mfpaca r5, r10, EX_R10, r12
230 mfpaca r5, r11, EX_R11, r12
231 mfpaca r5, r12, EX_R12, r12
232 mfpaca r5, r2, EX_R13, r12
233
234 lwz r5, (PACA_EXMC+EX_LR)(r13)
235 stw r5, VCPU_LAST_INST(r12)
236
237 lwz r5, (PACA_EXMC+EX_CCR)(r13)
238 stw r5, VCPU_CR(r12)
239
240 ld r5, VCPU_HFLAGS(r12)
241 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
242 beq no_dcbz32_off
243
244 mfspr r5,SPRN_HID5
245 rldimi r5,r5,6,56
246 mtspr SPRN_HID5,r5
247
248no_dcbz32_off:
249
250 /* XXX maybe skip on lightweight? */
251 std r14, VCPU_GPR(r14)(r12)
252 std r15, VCPU_GPR(r15)(r12)
253 std r16, VCPU_GPR(r16)(r12)
254 std r17, VCPU_GPR(r17)(r12)
255 std r18, VCPU_GPR(r18)(r12)
256 std r19, VCPU_GPR(r19)(r12)
257 std r20, VCPU_GPR(r20)(r12)
258 std r21, VCPU_GPR(r21)(r12)
259 std r22, VCPU_GPR(r22)(r12)
260 std r23, VCPU_GPR(r23)(r12)
261 std r24, VCPU_GPR(r24)(r12)
262 std r25, VCPU_GPR(r25)(r12)
263 std r26, VCPU_GPR(r26)(r12)
264 std r27, VCPU_GPR(r27)(r12)
265 std r28, VCPU_GPR(r28)(r12)
266 std r29, VCPU_GPR(r29)(r12)
267 std r30, VCPU_GPR(r30)(r12)
268 std r31, VCPU_GPR(r31)(r12)
269
270 /* Restore non-volatile host registers (r14 - r31) */
271 REST_NVGPRS(r1)
272
273 /* Save guest PC (R10) */
274 std r10, VCPU_PC(r12)
275
276 /* Save guest msr (R11) */
277 std r11, VCPU_SHADOW_MSR(r12)
278
279 /* Save guest CTR (in R12) */
280 mfctr r5
281 std r5, VCPU_CTR(r12)
282
283 /* Save guest LR */
284 mflr r5
285 std r5, VCPU_LR(r12)
286
287 /* Save guest XER */
288 mfxer r5
289 std r5, VCPU_XER(r12)
290
291 /* Save guest DAR */
292 ld r5, (PACA_EXMC+EX_DAR)(r13)
293 std r5, VCPU_FAULT_DEAR(r12)
294
295 /* Save guest DSISR */
296 lwz r5, (PACA_EXMC+EX_DSISR)(r13)
297 std r5, VCPU_FAULT_DSISR(r12)
298
299 /* Restore host msr -> SRR1 */
300 ld r7, VCPU_HOST_MSR(r12)
301 mtsrr1 r7
302
303 /* Restore host IP -> SRR0 */
304 ld r6, VCPU_HOST_RETIP(r12)
305 mtsrr0 r6
306
307 /*
308 * For some interrupts, we need to call the real Linux
309 * handler, so it can do work for us. This has to happen
310 * as if the interrupt arrived from the kernel though,
311 * so let's fake it here where most state is restored.
312 *
313 * Call Linux for hardware interrupts/decrementer
314 * r3 = address of interrupt handler (exit reason)
315 */
316
317 cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL
318 beq call_linux_handler
319 cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER
320 beq call_linux_handler
321
322 /* Back to Interruptable Mode! (goto kvm_return_point) */
323 RFI
324
325call_linux_handler:
326
327 /*
328 * If we land here we need to jump back to the handler we
329 * came from.
330 *
331 * We have a page that we can access from real mode, so let's
332 * jump back to that and use it as a trampoline to get back into the
333 * interrupt handler!
334 *
335 * R3 still contains the exit code,
336 * R6 VCPU_HOST_RETIP and
337 * R7 VCPU_HOST_MSR
338 */
339
340 mtlr r3
341
342 ld r5, VCPU_TRAMPOLINE_LOWMEM(r12)
343 mtsrr0 r5
344 LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR))
345 mtsrr1 r5
346
347 RFI
348
349.global kvm_return_point
350kvm_return_point:
351
352 /* Jump back to lightweight entry if we're supposed to */
353 /* go back into the guest */
354 mr r5, r3
355 /* Restore r3 (kvm_run) and r4 (vcpu) */
356 REST_2GPRS(3, r1)
357 bl KVMPPC_HANDLE_EXIT
358
359#if 0 /* XXX get lightweight exits back */
360 cmpwi r3, RESUME_GUEST
361 bne kvm_exit_heavyweight
362
363 /* put VCPU and KVM_RUN back into place and roll again! */
364 REST_2GPRS(3, r1)
365 b kvm_start_lightweight
366
367kvm_exit_heavyweight:
368 /* Restore non-volatile host registers */
369 ld r14, _LINK(r1)
370 mtlr r14
371 REST_NVGPRS(r1)
372
373 addi r1, r1, SWITCH_FRAME_SIZE
374#else
375 ld r4, _LINK(r1)
376 mtlr r4
377
378 cmpwi r3, RESUME_GUEST
379 bne kvm_exit_heavyweight
380
381 REST_2GPRS(3, r1)
382
383 addi r1, r1, SWITCH_FRAME_SIZE
384
385 b kvm_start_entry
386
387kvm_exit_heavyweight:
388
389 addi r1, r1, SWITCH_FRAME_SIZE
390#endif
391
392 blr
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
new file mode 100644
index 000000000000..5598f88f142e
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -0,0 +1,478 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/highmem.h>
25
26#include <asm/tlbflush.h>
27#include <asm/kvm_ppc.h>
28#include <asm/kvm_book3s.h>
29
30/* #define DEBUG_MMU */
31
32#ifdef DEBUG_MMU
33#define dprintk(X...) printk(KERN_INFO X)
34#else
35#define dprintk(X...) do { } while(0)
36#endif
37
38static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
39{
40 kvmppc_set_msr(vcpu, MSR_SF);
41}
42
43static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
44 struct kvmppc_vcpu_book3s *vcpu_book3s,
45 gva_t eaddr)
46{
47 int i;
48 u64 esid = GET_ESID(eaddr);
49 u64 esid_1t = GET_ESID_1T(eaddr);
50
51 for (i = 0; i < vcpu_book3s->slb_nr; i++) {
52 u64 cmp_esid = esid;
53
54 if (!vcpu_book3s->slb[i].valid)
55 continue;
56
57 if (vcpu_book3s->slb[i].large)
58 cmp_esid = esid_1t;
59
60 if (vcpu_book3s->slb[i].esid == cmp_esid)
61 return &vcpu_book3s->slb[i];
62 }
63
64 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
65 eaddr, esid, esid_1t);
66 for (i = 0; i < vcpu_book3s->slb_nr; i++) {
67 if (vcpu_book3s->slb[i].vsid)
68 dprintk(" %d: %c%c %llx %llx\n", i,
69 vcpu_book3s->slb[i].valid ? 'v' : ' ',
70 vcpu_book3s->slb[i].large ? 'l' : ' ',
71 vcpu_book3s->slb[i].esid,
72 vcpu_book3s->slb[i].vsid);
73 }
74
75 return NULL;
76}
77
78static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
79 bool data)
80{
81 struct kvmppc_slb *slb;
82
83 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr);
84 if (!slb)
85 return 0;
86
87 if (slb->large)
88 return (((u64)eaddr >> 12) & 0xfffffff) |
89 (((u64)slb->vsid) << 28);
90
91 return (((u64)eaddr >> 12) & 0xffff) | (((u64)slb->vsid) << 16);
92}
93
94static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
95{
96 return slbe->large ? 24 : 12;
97}
98
99static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
100{
101 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
102 return ((eaddr & 0xfffffff) >> p);
103}
104
105static hva_t kvmppc_mmu_book3s_64_get_pteg(
106 struct kvmppc_vcpu_book3s *vcpu_book3s,
107 struct kvmppc_slb *slbe, gva_t eaddr,
108 bool second)
109{
110 u64 hash, pteg, htabsize;
111 u32 page;
112 hva_t r;
113
114 page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
115 htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
116
117 hash = slbe->vsid ^ page;
118 if (second)
119 hash = ~hash;
120 hash &= ((1ULL << 39ULL) - 1ULL);
121 hash &= htabsize;
122 hash <<= 7ULL;
123
124 pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
125 pteg |= hash;
126
127 dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
128 page, vcpu_book3s->sdr1, pteg, slbe->vsid);
129
130 r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
131 if (kvm_is_error_hva(r))
132 return r;
133 return r | (pteg & ~PAGE_MASK);
134}
135
136static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
137{
138 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
139 u64 avpn;
140
141 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
142 avpn |= slbe->vsid << (28 - p);
143
144 if (p < 24)
145 avpn >>= ((80 - p) - 56) - 8;
146 else
147 avpn <<= 8;
148
149 return avpn;
150}
151
152static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
153 struct kvmppc_pte *gpte, bool data)
154{
155 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
156 struct kvmppc_slb *slbe;
157 hva_t ptegp;
158 u64 pteg[16];
159 u64 avpn = 0;
160 int i;
161 u8 key = 0;
162 bool found = false;
163 bool perm_err = false;
164 int second = 0;
165
166 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
167 if (!slbe)
168 goto no_seg_found;
169
170do_second:
171 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
172 if (kvm_is_error_hva(ptegp))
173 goto no_page_found;
174
175 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
176
177 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
178 printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp);
179 goto no_page_found;
180 }
181
182 if ((vcpu->arch.msr & MSR_PR) && slbe->Kp)
183 key = 4;
184 else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks)
185 key = 4;
186
187 for (i=0; i<16; i+=2) {
188 u64 v = pteg[i];
189 u64 r = pteg[i+1];
190
191 /* Valid check */
192 if (!(v & HPTE_V_VALID))
193 continue;
194 /* Hash check */
195 if ((v & HPTE_V_SECONDARY) != second)
196 continue;
197
198 /* AVPN compare */
199 if (HPTE_V_AVPN_VAL(avpn) == HPTE_V_AVPN_VAL(v)) {
200 u8 pp = (r & HPTE_R_PP) | key;
201 int eaddr_mask = 0xFFF;
202
203 gpte->eaddr = eaddr;
204 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu,
205 eaddr,
206 data);
207 if (slbe->large)
208 eaddr_mask = 0xFFFFFF;
209 gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask);
210 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
211 gpte->may_read = false;
212 gpte->may_write = false;
213
214 switch (pp) {
215 case 0:
216 case 1:
217 case 2:
218 case 6:
219 gpte->may_write = true;
220 /* fall through */
221 case 3:
222 case 5:
223 case 7:
224 gpte->may_read = true;
225 break;
226 }
227
228 if (!gpte->may_read) {
229 perm_err = true;
230 continue;
231 }
232
233 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
234 "-> 0x%llx\n",
235 eaddr, avpn, gpte->vpage, gpte->raddr);
236 found = true;
237 break;
238 }
239 }
240
241 /* Update PTE R and C bits, so the guest's swapper knows we used the
242 * page */
243 if (found) {
244 u32 oldr = pteg[i+1];
245
246 if (gpte->may_read) {
247 /* Set the accessed flag */
248 pteg[i+1] |= HPTE_R_R;
249 }
250 if (gpte->may_write) {
251 /* Set the dirty flag */
252 pteg[i+1] |= HPTE_R_C;
253 } else {
254 dprintk("KVM: Mapping read-only page!\n");
255 }
256
257 /* Write back into the PTEG */
258 if (pteg[i+1] != oldr)
259 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
260
261 return 0;
262 } else {
263 dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx "
264 "ptegp=0x%lx)\n",
265 eaddr, to_book3s(vcpu)->sdr1, ptegp);
266 for (i = 0; i < 16; i += 2)
267 dprintk(" %02d: 0x%llx - 0x%llx (0x%llx)\n",
268 i, pteg[i], pteg[i+1], avpn);
269
270 if (!second) {
271 second = HPTE_V_SECONDARY;
272 goto do_second;
273 }
274 }
275
276
277no_page_found:
278
279
280 if (perm_err)
281 return -EPERM;
282
283 return -ENOENT;
284
285no_seg_found:
286
287 dprintk("KVM MMU: Trigger segment fault\n");
288 return -EINVAL;
289}
290
291static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
292{
293 struct kvmppc_vcpu_book3s *vcpu_book3s;
294 u64 esid, esid_1t;
295 int slb_nr;
296 struct kvmppc_slb *slbe;
297
298 dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
299
300 vcpu_book3s = to_book3s(vcpu);
301
302 esid = GET_ESID(rb);
303 esid_1t = GET_ESID_1T(rb);
304 slb_nr = rb & 0xfff;
305
306 if (slb_nr > vcpu_book3s->slb_nr)
307 return;
308
309 slbe = &vcpu_book3s->slb[slb_nr];
310
311 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
312 slbe->esid = slbe->large ? esid_1t : esid;
313 slbe->vsid = rs >> 12;
314 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
315 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
316 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
317 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
318 slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
319
320 slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
321 slbe->origv = rs;
322
323 /* Map the new segment */
324 kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
325}
326
327static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
328{
329 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
330 struct kvmppc_slb *slbe;
331
332 if (slb_nr > vcpu_book3s->slb_nr)
333 return 0;
334
335 slbe = &vcpu_book3s->slb[slb_nr];
336
337 return slbe->orige;
338}
339
340static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
341{
342 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
343 struct kvmppc_slb *slbe;
344
345 if (slb_nr > vcpu_book3s->slb_nr)
346 return 0;
347
348 slbe = &vcpu_book3s->slb[slb_nr];
349
350 return slbe->origv;
351}
352
353static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
354{
355 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
356 struct kvmppc_slb *slbe;
357
358 dprintk("KVM MMU: slbie(0x%llx)\n", ea);
359
360 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea);
361
362 if (!slbe)
363 return;
364
365 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
366
367 slbe->valid = false;
368
369 kvmppc_mmu_map_segment(vcpu, ea);
370}
371
372static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
373{
374 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
375 int i;
376
377 dprintk("KVM MMU: slbia()\n");
378
379 for (i = 1; i < vcpu_book3s->slb_nr; i++)
380 vcpu_book3s->slb[i].valid = false;
381
382 if (vcpu->arch.msr & MSR_IR) {
383 kvmppc_mmu_flush_segments(vcpu);
384 kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
385 }
386}
387
388static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
389 ulong value)
390{
391 u64 rb = 0, rs = 0;
392
393 /* ESID = srnum */
394 rb |= (srnum & 0xf) << 28;
395 /* Set the valid bit */
396 rb |= 1 << 27;
397 /* Index = ESID */
398 rb |= srnum;
399
400 /* VSID = VSID */
401 rs |= (value & 0xfffffff) << 12;
402 /* flags = flags */
403 rs |= ((value >> 27) & 0xf) << 9;
404
405 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
406}
407
408static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
409 bool large)
410{
411 u64 mask = 0xFFFFFFFFFULL;
412
413 dprintk("KVM MMU: tlbie(0x%lx)\n", va);
414
415 if (large)
416 mask = 0xFFFFFF000ULL;
417 kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
418}
419
420static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
421 u64 *vsid)
422{
423 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
424 case 0:
425 *vsid = (VSID_REAL >> 16) | esid;
426 break;
427 case MSR_IR:
428 *vsid = (VSID_REAL_IR >> 16) | esid;
429 break;
430 case MSR_DR:
431 *vsid = (VSID_REAL_DR >> 16) | esid;
432 break;
433 case MSR_DR|MSR_IR:
434 {
435 ulong ea;
436 struct kvmppc_slb *slb;
437 ea = esid << SID_SHIFT;
438 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
439 if (slb)
440 *vsid = slb->vsid;
441 else
442 return -ENOENT;
443
444 break;
445 }
446 default:
447 BUG();
448 break;
449 }
450
451 return 0;
452}
453
454static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
455{
456 return (to_book3s(vcpu)->hid[5] & 0x80);
457}
458
459void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
460{
461 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
462
463 mmu->mfsrin = NULL;
464 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
465 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
466 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
467 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
468 mmu->slbie = kvmppc_mmu_book3s_64_slbie;
469 mmu->slbia = kvmppc_mmu_book3s_64_slbia;
470 mmu->xlate = kvmppc_mmu_book3s_64_xlate;
471 mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
472 mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
473 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
474 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
475 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
476
477 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
478}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
new file mode 100644
index 000000000000..f2899b297ffd
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -0,0 +1,408 @@
1/*
2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22#include <linux/kvm_host.h>
23
24#include <asm/kvm_ppc.h>
25#include <asm/kvm_book3s.h>
26#include <asm/mmu-hash64.h>
27#include <asm/machdep.h>
28#include <asm/mmu_context.h>
29#include <asm/hw_irq.h>
30
31#define PTE_SIZE 12
32#define VSID_ALL 0
33
34/* #define DEBUG_MMU */
35/* #define DEBUG_SLB */
36
37#ifdef DEBUG_MMU
38#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
39#else
40#define dprintk_mmu(a, ...) do { } while(0)
41#endif
42
43#ifdef DEBUG_SLB
44#define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__)
45#else
46#define dprintk_slb(a, ...) do { } while(0)
47#endif
48
49static void invalidate_pte(struct hpte_cache *pte)
50{
51 dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n",
52 i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
53
54 ppc_md.hpte_invalidate(pte->slot, pte->host_va,
55 MMU_PAGE_4K, MMU_SEGSIZE_256M,
56 false);
57 pte->host_va = 0;
58 kvm_release_pfn_dirty(pte->pfn);
59}
60
61void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
62{
63 int i;
64
65 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n",
66 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
67 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
68
69 guest_ea &= ea_mask;
70 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
71 struct hpte_cache *pte;
72
73 pte = &vcpu->arch.hpte_cache[i];
74 if (!pte->host_va)
75 continue;
76
77 if ((pte->pte.eaddr & ea_mask) == guest_ea) {
78 invalidate_pte(pte);
79 }
80 }
81
82 /* Doing a complete flush -> start from scratch */
83 if (!ea_mask)
84 vcpu->arch.hpte_cache_offset = 0;
85}
86
87void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
88{
89 int i;
90
91 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
92 vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
93 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
94
95 guest_vp &= vp_mask;
96 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
97 struct hpte_cache *pte;
98
99 pte = &vcpu->arch.hpte_cache[i];
100 if (!pte->host_va)
101 continue;
102
103 if ((pte->pte.vpage & vp_mask) == guest_vp) {
104 invalidate_pte(pte);
105 }
106 }
107}
108
109void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
110{
111 int i;
112
113 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
114 vcpu->arch.hpte_cache_offset, guest_pa, pa_mask);
115 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
116
117 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
118 struct hpte_cache *pte;
119
120 pte = &vcpu->arch.hpte_cache[i];
121 if (!pte->host_va)
122 continue;
123
124 if ((pte->pte.raddr >= pa_start) &&
125 (pte->pte.raddr < pa_end)) {
126 invalidate_pte(pte);
127 }
128 }
129}
130
131struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data)
132{
133 int i;
134 u64 guest_vp;
135
136 guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false);
137 for (i=0; i<vcpu->arch.hpte_cache_offset; i++) {
138 struct hpte_cache *pte;
139
140 pte = &vcpu->arch.hpte_cache[i];
141 if (!pte->host_va)
142 continue;
143
144 if (pte->pte.vpage == guest_vp)
145 return &pte->pte;
146 }
147
148 return NULL;
149}
150
151static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
152{
153 if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
154 kvmppc_mmu_pte_flush(vcpu, 0, 0);
155
156 return vcpu->arch.hpte_cache_offset++;
157}
158
159/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
160 * a hash, so we don't waste cycles on looping */
161static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
162{
163 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
164 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
165 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
166 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
167 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
168 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
169 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
170 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
171}
172
173
174static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
175{
176 struct kvmppc_sid_map *map;
177 u16 sid_map_mask;
178
179 if (vcpu->arch.msr & MSR_PR)
180 gvsid |= VSID_PR;
181
182 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
183 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
184 if (map->guest_vsid == gvsid) {
185 dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
186 gvsid, map->host_vsid);
187 return map;
188 }
189
190 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
191 if (map->guest_vsid == gvsid) {
192 dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
193 gvsid, map->host_vsid);
194 return map;
195 }
196
197 dprintk_slb("SLB: Searching 0x%llx -> not found\n", gvsid);
198 return NULL;
199}
200
201int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
202{
203 pfn_t hpaddr;
204 ulong hash, hpteg, va;
205 u64 vsid;
206 int ret;
207 int rflags = 0x192;
208 int vflags = 0;
209 int attempt = 0;
210 struct kvmppc_sid_map *map;
211
212 /* Get host physical address for gpa */
213 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
214 if (kvm_is_error_hva(hpaddr)) {
215 printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr);
216 return -EINVAL;
217 }
218 hpaddr <<= PAGE_SHIFT;
219#if PAGE_SHIFT == 12
220#elif PAGE_SHIFT == 16
221 hpaddr |= orig_pte->raddr & 0xf000;
222#else
223#error Unknown page size
224#endif
225
226 /* and write the mapping ea -> hpa into the pt */
227 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
228 map = find_sid_vsid(vcpu, vsid);
229 if (!map) {
230 kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
231 map = find_sid_vsid(vcpu, vsid);
232 }
233 BUG_ON(!map);
234
235 vsid = map->host_vsid;
236 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
237
238 if (!orig_pte->may_write)
239 rflags |= HPTE_R_PP;
240 else
241 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
242
243 if (!orig_pte->may_execute)
244 rflags |= HPTE_R_N;
245
246 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
247
248map_again:
249 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
250
251 /* In case we tried normal mapping already, let's nuke old entries */
252 if (attempt > 1)
253 if (ppc_md.hpte_remove(hpteg) < 0)
254 return -1;
255
256 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
257
258 if (ret < 0) {
259 /* If we couldn't map a primary PTE, try a secondary */
260#ifdef USE_SECONDARY
261 hash = ~hash;
262 attempt++;
263 if (attempt % 2)
264 vflags = HPTE_V_SECONDARY;
265 else
266 vflags = 0;
267#else
268 attempt = 2;
269#endif
270 goto map_again;
271 } else {
272 int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
273 struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
274
275 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%lx (0x%llx) -> %lx\n",
276 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
277 (rflags & HPTE_R_N) ? '-' : 'x',
278 orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
279
280 pte->slot = hpteg + (ret & 7);
281 pte->host_va = va;
282 pte->pte = *orig_pte;
283 pte->pfn = hpaddr >> PAGE_SHIFT;
284 }
285
286 return 0;
287}
288
289static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
290{
291 struct kvmppc_sid_map *map;
292 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
293 u16 sid_map_mask;
294 static int backwards_map = 0;
295
296 if (vcpu->arch.msr & MSR_PR)
297 gvsid |= VSID_PR;
298
299 /* We might get collisions that trap in preceding order, so let's
300 map them differently */
301
302 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
303 if (backwards_map)
304 sid_map_mask = SID_MAP_MASK - sid_map_mask;
305
306 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
307
308 /* Make sure we're taking the other map next time */
309 backwards_map = !backwards_map;
310
311 /* Uh-oh ... out of mappings. Let's flush! */
312 if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
313 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
314 memset(vcpu_book3s->sid_map, 0,
315 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
316 kvmppc_mmu_pte_flush(vcpu, 0, 0);
317 kvmppc_mmu_flush_segments(vcpu);
318 }
319 map->host_vsid = vcpu_book3s->vsid_next++;
320
321 map->guest_vsid = gvsid;
322 map->valid = true;
323
324 return map;
325}
326
327static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
328{
329 int i;
330 int max_slb_size = 64;
331 int found_inval = -1;
332 int r;
333
334 if (!get_paca()->kvm_slb_max)
335 get_paca()->kvm_slb_max = 1;
336
337 /* Are we overwriting? */
338 for (i = 1; i < get_paca()->kvm_slb_max; i++) {
339 if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V))
340 found_inval = i;
341 else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid)
342 return i;
343 }
344
345 /* Found a spare entry that was invalidated before */
346 if (found_inval > 0)
347 return found_inval;
348
349 /* No spare invalid entry, so create one */
350
351 if (mmu_slb_size < 64)
352 max_slb_size = mmu_slb_size;
353
354 /* Overflowing -> purge */
355 if ((get_paca()->kvm_slb_max) == max_slb_size)
356 kvmppc_mmu_flush_segments(vcpu);
357
358 r = get_paca()->kvm_slb_max;
359 get_paca()->kvm_slb_max++;
360
361 return r;
362}
363
364int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
365{
366 u64 esid = eaddr >> SID_SHIFT;
367 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
368 u64 slb_vsid = SLB_VSID_USER;
369 u64 gvsid;
370 int slb_index;
371 struct kvmppc_sid_map *map;
372
373 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
374
375 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
376 /* Invalidate an entry */
377 get_paca()->kvm_slb[slb_index].esid = 0;
378 return -ENOENT;
379 }
380
381 map = find_sid_vsid(vcpu, gvsid);
382 if (!map)
383 map = create_sid_map(vcpu, gvsid);
384
385 map->guest_esid = esid;
386
387 slb_vsid |= (map->host_vsid << 12);
388 slb_vsid &= ~SLB_VSID_KP;
389 slb_esid |= slb_index;
390
391 get_paca()->kvm_slb[slb_index].esid = slb_esid;
392 get_paca()->kvm_slb[slb_index].vsid = slb_vsid;
393
394 dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
395
396 return 0;
397}
398
399void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
400{
401 get_paca()->kvm_slb_max = 1;
402 get_paca()->kvm_slb[0].esid = 0;
403}
404
405void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
406{
407 kvmppc_mmu_pte_flush(vcpu, 0, 0);
408}
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S
new file mode 100644
index 000000000000..fb7dd2e9ac88
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S
@@ -0,0 +1,131 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
25#include <asm/exception-64s.h>
26
27/*****************************************************************************
28 * *
29 * Real Mode handlers that need to be in low physical memory *
30 * *
31 ****************************************************************************/
32
33
34.macro INTERRUPT_TRAMPOLINE intno
35
36.global kvmppc_trampoline_\intno
37kvmppc_trampoline_\intno:
38
39 mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */
40
41 /*
42 * First thing to do is to find out if we're coming
43 * from a KVM guest or a Linux process.
44 *
45 * To distinguish, we check a magic byte in the PACA
46 */
47 mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */
48 std r12, (PACA_EXMC + EX_R12)(r13)
49 mfcr r12
50 stw r12, (PACA_EXMC + EX_CCR)(r13)
51 lbz r12, PACA_KVM_IN_GUEST(r13)
52 cmpwi r12, 0
53 bne ..kvmppc_handler_hasmagic_\intno
54 /* No KVM guest? Then jump back to the Linux handler! */
55 lwz r12, (PACA_EXMC + EX_CCR)(r13)
56 mtcr r12
57 ld r12, (PACA_EXMC + EX_R12)(r13)
58 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
59 b kvmppc_resume_\intno /* Get back original handler */
60
61 /* Now we know we're handling a KVM guest */
62..kvmppc_handler_hasmagic_\intno:
63 /* Unset guest state */
64 li r12, 0
65 stb r12, PACA_KVM_IN_GUEST(r13)
66
67 std r1, (PACA_EXMC+EX_R9)(r13)
68 std r10, (PACA_EXMC+EX_R10)(r13)
69 std r11, (PACA_EXMC+EX_R11)(r13)
70 std r2, (PACA_EXMC+EX_R13)(r13)
71
72 mfsrr0 r10
73 mfsrr1 r11
74
75 /* Restore R1/R2 so we can handle faults */
76 ld r1, PACAR1(r13)
77 ld r2, (PACA_EXMC+EX_SRR0)(r13)
78
79 /* Let's store which interrupt we're handling */
80 li r12, \intno
81
82 /* Jump into the SLB exit code that goes to the highmem handler */
83 b kvmppc_handler_trampoline_exit
84
85.endm
86
87INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET
88INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
89INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
90INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
91INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
92INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
93INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
94INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
95INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
96INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
97INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DECREMENTER
98INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL
99INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
100INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
101INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
102INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
103
104/*
105 * This trampoline brings us back to a real mode handler
106 *
107 * Input Registers:
108 *
109 * R6 = SRR0
110 * R7 = SRR1
111 * LR = real-mode IP
112 *
113 */
114.global kvmppc_handler_lowmem_trampoline
115kvmppc_handler_lowmem_trampoline:
116
117 mtsrr0 r6
118 mtsrr1 r7
119 blr
120kvmppc_handler_lowmem_trampoline_end:
121
122.global kvmppc_trampoline_lowmem
123kvmppc_trampoline_lowmem:
124 .long kvmppc_handler_lowmem_trampoline - _stext
125
126.global kvmppc_trampoline_enter
127kvmppc_trampoline_enter:
128 .long kvmppc_handler_trampoline_enter - _stext
129
130#include "book3s_64_slb.S"
131
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
new file mode 100644
index 000000000000..ecd237a03fd0
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -0,0 +1,262 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
21#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
22#define UNBOLT_SLB_ENTRY(num) \
23 ld r9, SHADOW_SLB_ESID(num)(r12); \
24 /* Invalid? Skip. */; \
25 rldicl. r0, r9, 37, 63; \
26 beq slb_entry_skip_ ## num; \
27 xoris r9, r9, SLB_ESID_V@h; \
28 std r9, SHADOW_SLB_ESID(num)(r12); \
29 slb_entry_skip_ ## num:
30
31#define REBOLT_SLB_ENTRY(num) \
32 ld r10, SHADOW_SLB_ESID(num)(r11); \
33 cmpdi r10, 0; \
34 beq slb_exit_skip_1; \
35 oris r10, r10, SLB_ESID_V@h; \
36 ld r9, SHADOW_SLB_VSID(num)(r11); \
37 slbmte r9, r10; \
38 std r10, SHADOW_SLB_ESID(num)(r11); \
39slb_exit_skip_ ## num:
40
41/******************************************************************************
42 * *
43 * Entry code *
44 * *
45 *****************************************************************************/
46
47.global kvmppc_handler_trampoline_enter
48kvmppc_handler_trampoline_enter:
49
50 /* Required state:
51 *
52 * MSR = ~IR|DR
53 * R13 = PACA
54 * R9 = guest IP
55 * R10 = guest MSR
56 * R11 = free
57 * R12 = free
58 * PACA[PACA_EXMC + EX_R9] = guest R9
59 * PACA[PACA_EXMC + EX_R10] = guest R10
60 * PACA[PACA_EXMC + EX_R11] = guest R11
61 * PACA[PACA_EXMC + EX_R12] = guest R12
62 * PACA[PACA_EXMC + EX_R13] = guest R13
63 * PACA[PACA_EXMC + EX_CCR] = guest CR
64 * PACA[PACA_EXMC + EX_R3] = guest XER
65 */
66
67 mtsrr0 r9
68 mtsrr1 r10
69
70 mtspr SPRN_SPRG_SCRATCH0, r0
71
72 /* Remove LPAR shadow entries */
73
74#if SLB_NUM_BOLTED == 3
75
76 ld r12, PACA_SLBSHADOWPTR(r13)
77
78 /* Save off the first entry so we can slbie it later */
79 ld r10, SHADOW_SLB_ESID(0)(r12)
80 ld r11, SHADOW_SLB_VSID(0)(r12)
81
82 /* Remove bolted entries */
83 UNBOLT_SLB_ENTRY(0)
84 UNBOLT_SLB_ENTRY(1)
85 UNBOLT_SLB_ENTRY(2)
86
87#else
88#error unknown number of bolted entries
89#endif
90
91 /* Flush SLB */
92
93 slbia
94
95 /* r0 = esid & ESID_MASK */
96 rldicr r10, r10, 0, 35
97 /* r0 |= CLASS_BIT(VSID) */
98 rldic r12, r11, 56 - 36, 36
99 or r10, r10, r12
100 slbie r10
101
102 isync
103
104 /* Fill SLB with our shadow */
105
106 lbz r12, PACA_KVM_SLB_MAX(r13)
107 mulli r12, r12, 16
108 addi r12, r12, PACA_KVM_SLB
109 add r12, r12, r13
110
111 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
112 li r11, PACA_KVM_SLB
113 add r11, r11, r13
114
115slb_loop_enter:
116
117 ld r10, 0(r11)
118
119 rldicl. r0, r10, 37, 63
120 beq slb_loop_enter_skip
121
122 ld r9, 8(r11)
123 slbmte r9, r10
124
125slb_loop_enter_skip:
126 addi r11, r11, 16
127 cmpd cr0, r11, r12
128 blt slb_loop_enter
129
130slb_do_enter:
131
132 /* Enter guest */
133
134 mfspr r0, SPRN_SPRG_SCRATCH0
135
136 ld r9, (PACA_EXMC+EX_R9)(r13)
137 ld r10, (PACA_EXMC+EX_R10)(r13)
138 ld r12, (PACA_EXMC+EX_R12)(r13)
139
140 lwz r11, (PACA_EXMC+EX_CCR)(r13)
141 mtcr r11
142
143 ld r11, (PACA_EXMC+EX_R3)(r13)
144 mtxer r11
145
146 ld r11, (PACA_EXMC+EX_R11)(r13)
147 ld r13, (PACA_EXMC+EX_R13)(r13)
148
149 RFI
150kvmppc_handler_trampoline_enter_end:
151
152
153
154/******************************************************************************
155 * *
156 * Exit code *
157 * *
158 *****************************************************************************/
159
160.global kvmppc_handler_trampoline_exit
161kvmppc_handler_trampoline_exit:
162
163 /* Register usage at this point:
164 *
165 * SPRG_SCRATCH0 = guest R13
166 * R01 = host R1
167 * R02 = host R2
168 * R10 = guest PC
169 * R11 = guest MSR
170 * R12 = exit handler id
171 * R13 = PACA
172 * PACA.exmc.CCR = guest CR
173 * PACA.exmc.R9 = guest R1
174 * PACA.exmc.R10 = guest R10
175 * PACA.exmc.R11 = guest R11
176 * PACA.exmc.R12 = guest R12
177 * PACA.exmc.R13 = guest R2
178 *
179 */
180
181 /* Save registers */
182
183 std r0, (PACA_EXMC+EX_SRR0)(r13)
184 std r9, (PACA_EXMC+EX_R3)(r13)
185 std r10, (PACA_EXMC+EX_LR)(r13)
186 std r11, (PACA_EXMC+EX_DAR)(r13)
187
188 /*
189 * In order for us to easily get the last instruction,
190 * we got the #vmexit at, we exploit the fact that the
191 * virtual layout is still the same here, so we can just
192 * ld from the guest's PC address
193 */
194
195 /* We only load the last instruction when it's safe */
196 cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
197 beq ld_last_inst
198 cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
199 beq ld_last_inst
200
201 b no_ld_last_inst
202
203ld_last_inst:
204 /* Save off the guest instruction we're at */
205 /* 1) enable paging for data */
206 mfmsr r9
207 ori r11, r9, MSR_DR /* Enable paging for data */
208 mtmsr r11
209 /* 2) fetch the instruction */
210 lwz r0, 0(r10)
211 /* 3) disable paging again */
212 mtmsr r9
213
214no_ld_last_inst:
215
216 /* Restore bolted entries from the shadow and fix it along the way */
217
218 /* We don't store anything in entry 0, so we don't need to take care of it */
219 slbia
220 isync
221
222#if SLB_NUM_BOLTED == 3
223
224 ld r11, PACA_SLBSHADOWPTR(r13)
225
226 REBOLT_SLB_ENTRY(0)
227 REBOLT_SLB_ENTRY(1)
228 REBOLT_SLB_ENTRY(2)
229
230#else
231#error unknown number of bolted entries
232#endif
233
234slb_do_exit:
235
236 /* Restore registers */
237
238 ld r11, (PACA_EXMC+EX_DAR)(r13)
239 ld r10, (PACA_EXMC+EX_LR)(r13)
240 ld r9, (PACA_EXMC+EX_R3)(r13)
241
242 /* Save last inst */
243 stw r0, (PACA_EXMC+EX_LR)(r13)
244
245 /* Save DAR and DSISR before going to paged mode */
246 mfdar r0
247 std r0, (PACA_EXMC+EX_DAR)(r13)
248 mfdsisr r0
249 stw r0, (PACA_EXMC+EX_DSISR)(r13)
250
251 /* RFI into the highmem handler */
252 mfmsr r0
253 ori r0, r0, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
254 mtsrr1 r0
255 ld r0, PACASAVEDMSR(r13) /* Highmem handler address */
256 mtsrr0 r0
257
258 mfspr r0, SPRN_SPRG_SCRATCH0
259
260 RFI
261kvmppc_handler_trampoline_exit_end:
262
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index e7bf4d029484..06f5a9ecc42c 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -520,6 +520,11 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
520 return kvmppc_core_vcpu_translate(vcpu, tr); 520 return kvmppc_core_vcpu_translate(vcpu, tr);
521} 521}
522 522
523int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
524{
525 return -ENOTSUPP;
526}
527
523int __init kvmppc_booke_init(void) 528int __init kvmppc_booke_init(void)
524{ 529{
525 unsigned long ivor[16]; 530 unsigned long ivor[16];
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 7737146af3fb..4a9ac6640fad 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -18,7 +18,7 @@
18 */ 18 */
19 19
20#include <linux/jiffies.h> 20#include <linux/jiffies.h>
21#include <linux/timer.h> 21#include <linux/hrtimer.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
@@ -32,6 +32,7 @@
32#include "trace.h" 32#include "trace.h"
33 33
34#define OP_TRAP 3 34#define OP_TRAP 3
35#define OP_TRAP_64 2
35 36
36#define OP_31_XOP_LWZX 23 37#define OP_31_XOP_LWZX 23
37#define OP_31_XOP_LBZX 87 38#define OP_31_XOP_LBZX 87
@@ -64,19 +65,45 @@
64#define OP_STH 44 65#define OP_STH 44
65#define OP_STHU 45 66#define OP_STHU 45
66 67
68#ifdef CONFIG_PPC64
69static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
70{
71 return 1;
72}
73#else
74static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
75{
76 return vcpu->arch.tcr & TCR_DIE;
77}
78#endif
79
67void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 80void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
68{ 81{
69 if (vcpu->arch.tcr & TCR_DIE) { 82 unsigned long dec_nsec;
83
84 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
85#ifdef CONFIG_PPC64
86 /* POWER4+ triggers a dec interrupt if the value is < 0 */
87 if (vcpu->arch.dec & 0x80000000) {
88 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
89 kvmppc_core_queue_dec(vcpu);
90 return;
91 }
92#endif
93 if (kvmppc_dec_enabled(vcpu)) {
70 /* The decrementer ticks at the same rate as the timebase, so 94 /* The decrementer ticks at the same rate as the timebase, so
71 * that's how we convert the guest DEC value to the number of 95 * that's how we convert the guest DEC value to the number of
72 * host ticks. */ 96 * host ticks. */
73 unsigned long nr_jiffies;
74 97
75 nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy; 98 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
76 mod_timer(&vcpu->arch.dec_timer, 99 dec_nsec = vcpu->arch.dec;
77 get_jiffies_64() + nr_jiffies); 100 dec_nsec *= 1000;
101 dec_nsec /= tb_ticks_per_usec;
102 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
103 HRTIMER_MODE_REL);
104 vcpu->arch.dec_jiffies = get_tb();
78 } else { 105 } else {
79 del_timer(&vcpu->arch.dec_timer); 106 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
80 } 107 }
81} 108}
82 109
@@ -111,9 +138,15 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
111 /* this default type might be overwritten by subcategories */ 138 /* this default type might be overwritten by subcategories */
112 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 139 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
113 140
141 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
142
114 switch (get_op(inst)) { 143 switch (get_op(inst)) {
115 case OP_TRAP: 144 case OP_TRAP:
145#ifdef CONFIG_PPC64
146 case OP_TRAP_64:
147#else
116 vcpu->arch.esr |= ESR_PTR; 148 vcpu->arch.esr |= ESR_PTR;
149#endif
117 kvmppc_core_queue_program(vcpu); 150 kvmppc_core_queue_program(vcpu);
118 advance = 0; 151 advance = 0;
119 break; 152 break;
@@ -188,17 +221,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
188 case SPRN_SRR1: 221 case SPRN_SRR1:
189 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; 222 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
190 case SPRN_PVR: 223 case SPRN_PVR:
191 vcpu->arch.gpr[rt] = mfspr(SPRN_PVR); break; 224 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
192 case SPRN_PIR: 225 case SPRN_PIR:
193 vcpu->arch.gpr[rt] = mfspr(SPRN_PIR); break; 226 vcpu->arch.gpr[rt] = vcpu->vcpu_id; break;
227 case SPRN_MSSSR0:
228 vcpu->arch.gpr[rt] = 0; break;
194 229
195 /* Note: mftb and TBRL/TBWL are user-accessible, so 230 /* Note: mftb and TBRL/TBWL are user-accessible, so
196 * the guest can always access the real TB anyways. 231 * the guest can always access the real TB anyways.
197 * In fact, we probably will never see these traps. */ 232 * In fact, we probably will never see these traps. */
198 case SPRN_TBWL: 233 case SPRN_TBWL:
199 vcpu->arch.gpr[rt] = mftbl(); break; 234 vcpu->arch.gpr[rt] = get_tb() >> 32; break;
200 case SPRN_TBWU: 235 case SPRN_TBWU:
201 vcpu->arch.gpr[rt] = mftbu(); break; 236 vcpu->arch.gpr[rt] = get_tb(); break;
202 237
203 case SPRN_SPRG0: 238 case SPRN_SPRG0:
204 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; 239 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
@@ -211,6 +246,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
211 /* Note: SPRG4-7 are user-readable, so we don't get 246 /* Note: SPRG4-7 are user-readable, so we don't get
212 * a trap. */ 247 * a trap. */
213 248
249 case SPRN_DEC:
250 {
251 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
252 vcpu->arch.gpr[rt] = vcpu->arch.dec - jd;
253 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]);
254 break;
255 }
214 default: 256 default:
215 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 257 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
216 if (emulated == EMULATE_FAIL) { 258 if (emulated == EMULATE_FAIL) {
@@ -260,6 +302,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
260 case SPRN_TBWL: break; 302 case SPRN_TBWL: break;
261 case SPRN_TBWU: break; 303 case SPRN_TBWU: break;
262 304
305 case SPRN_MSSSR0: break;
306
263 case SPRN_DEC: 307 case SPRN_DEC:
264 vcpu->arch.dec = vcpu->arch.gpr[rs]; 308 vcpu->arch.dec = vcpu->arch.gpr[rs];
265 kvmppc_emulate_dec(vcpu); 309 kvmppc_emulate_dec(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 5902bbc2411e..f06cf93b178e 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -23,6 +23,7 @@
23#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
26#include <linux/hrtimer.h>
26#include <linux/fs.h> 27#include <linux/fs.h>
27#include <asm/cputable.h> 28#include <asm/cputable.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -144,6 +145,9 @@ int kvm_dev_ioctl_check_extension(long ext)
144 int r; 145 int r;
145 146
146 switch (ext) { 147 switch (ext) {
148 case KVM_CAP_PPC_SEGSTATE:
149 r = 1;
150 break;
147 case KVM_CAP_COALESCED_MMIO: 151 case KVM_CAP_COALESCED_MMIO:
148 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 152 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
149 break; 153 break;
@@ -209,10 +213,25 @@ static void kvmppc_decrementer_func(unsigned long data)
209 } 213 }
210} 214}
211 215
216/*
217 * low level hrtimer wake routine. Because this runs in hardirq context
218 * we schedule a tasklet to do the real work.
219 */
220enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
221{
222 struct kvm_vcpu *vcpu;
223
224 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
225 tasklet_schedule(&vcpu->arch.tasklet);
226
227 return HRTIMER_NORESTART;
228}
229
212int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 230int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
213{ 231{
214 setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func, 232 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
215 (unsigned long)vcpu); 233 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
234 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
216 235
217 return 0; 236 return 0;
218} 237}
@@ -410,11 +429,6 @@ out:
410 return r; 429 return r;
411} 430}
412 431
413int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
414{
415 return -ENOTSUPP;
416}
417
418long kvm_arch_vm_ioctl(struct file *filp, 432long kvm_arch_vm_ioctl(struct file *filp,
419 unsigned int ioctl, unsigned long arg) 433 unsigned int ioctl, unsigned long arg)
420{ 434{
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 2aa371e30079..70378551c0cc 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -23,6 +23,7 @@
23#include <linux/seq_file.h> 23#include <linux/seq_file.h>
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/uaccess.h> 25#include <linux/uaccess.h>
26#include <linux/module.h>
26 27
27#include <asm/time.h> 28#include <asm/time.h>
28#include <asm-generic/div64.h> 29#include <asm-generic/div64.h>
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 67f219de0455..a8e840018052 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -12,8 +12,8 @@
12 * Tracepoint for guest mode entry. 12 * Tracepoint for guest mode entry.
13 */ 13 */
14TRACE_EVENT(kvm_ppc_instr, 14TRACE_EVENT(kvm_ppc_instr,
15 TP_PROTO(unsigned int inst, unsigned long pc, unsigned int emulate), 15 TP_PROTO(unsigned int inst, unsigned long _pc, unsigned int emulate),
16 TP_ARGS(inst, pc, emulate), 16 TP_ARGS(inst, _pc, emulate),
17 17
18 TP_STRUCT__entry( 18 TP_STRUCT__entry(
19 __field( unsigned int, inst ) 19 __field( unsigned int, inst )
@@ -23,7 +23,7 @@ TRACE_EVENT(kvm_ppc_instr,
23 23
24 TP_fast_assign( 24 TP_fast_assign(
25 __entry->inst = inst; 25 __entry->inst = inst;
26 __entry->pc = pc; 26 __entry->pc = _pc;
27 __entry->emulate = emulate; 27 __entry->emulate = emulate;
28 ), 28 ),
29 29
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index c657de59abca..74a7f4130b4c 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -98,20 +98,7 @@ _GLOBAL(cacheable_memzero)
98 bdnz 4b 98 bdnz 4b
993: mtctr r9 993: mtctr r9
100 li r7,4 100 li r7,4
101#if !defined(CONFIG_8xx)
10210: dcbz r7,r6 10110: dcbz r7,r6
103#else
10410: stw r4, 4(r6)
105 stw r4, 8(r6)
106 stw r4, 12(r6)
107 stw r4, 16(r6)
108#if CACHE_LINE_SIZE >= 32
109 stw r4, 20(r6)
110 stw r4, 24(r6)
111 stw r4, 28(r6)
112 stw r4, 32(r6)
113#endif /* CACHE_LINE_SIZE */
114#endif
115 addi r6,r6,CACHELINE_BYTES 102 addi r6,r6,CACHELINE_BYTES
116 bdnz 10b 103 bdnz 10b
117 clrlwi r5,r8,32-LG_CACHELINE_BYTES 104 clrlwi r5,r8,32-LG_CACHELINE_BYTES
@@ -200,9 +187,7 @@ _GLOBAL(cacheable_memcpy)
200 mtctr r0 187 mtctr r0
201 beq 63f 188 beq 63f
20253: 18953:
203#if !defined(CONFIG_8xx)
204 dcbz r11,r6 190 dcbz r11,r6
205#endif
206 COPY_16_BYTES 191 COPY_16_BYTES
207#if L1_CACHE_BYTES >= 32 192#if L1_CACHE_BYTES >= 32
208 COPY_16_BYTES 193 COPY_16_BYTES
@@ -356,14 +341,6 @@ _GLOBAL(__copy_tofrom_user)
356 li r11,4 341 li r11,4
357 beq 63f 342 beq 63f
358 343
359#ifdef CONFIG_8xx
360 /* Don't use prefetch on 8xx */
361 mtctr r0
362 li r0,0
36353: COPY_16_BYTES_WITHEX(0)
364 bdnz 53b
365
366#else /* not CONFIG_8xx */
367 /* Here we decide how far ahead to prefetch the source */ 344 /* Here we decide how far ahead to prefetch the source */
368 li r3,4 345 li r3,4
369 cmpwi r0,1 346 cmpwi r0,1
@@ -416,7 +393,6 @@ _GLOBAL(__copy_tofrom_user)
416 li r3,4 393 li r3,4
417 li r7,0 394 li r7,0
418 bne 114b 395 bne 114b
419#endif /* CONFIG_8xx */
420 396
42163: srwi. r0,r5,2 39763: srwi. r0,r5,2
422 mtctr r0 398 mtctr r0
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 6fb8fc8d2fea..ce68708bbad5 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -28,7 +28,10 @@ obj-$(CONFIG_44x) += 44x_mmu.o
28obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o 28obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
29obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o 29obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
30obj-$(CONFIG_PPC_MM_SLICES) += slice.o 30obj-$(CONFIG_PPC_MM_SLICES) += slice.o
31obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 31ifeq ($(CONFIG_HUGETLB_PAGE),y)
32obj-y += hugetlbpage.o
33obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o
34endif
32obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o 35obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
33obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o 36obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
34obj-$(CONFIG_HIGHMEM) += highmem.o 37obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index e7dae82c1285..26fb6b990b0a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -40,7 +40,7 @@
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
42#include <asm/siginfo.h> 42#include <asm/siginfo.h>
43 43#include <mm/mmu_decl.h>
44 44
45#ifdef CONFIG_KPROBES 45#ifdef CONFIG_KPROBES
46static inline int notify_page_fault(struct pt_regs *regs) 46static inline int notify_page_fault(struct pt_regs *regs)
@@ -246,6 +246,12 @@ good_area:
246 goto bad_area; 246 goto bad_area;
247#endif /* CONFIG_6xx */ 247#endif /* CONFIG_6xx */
248#if defined(CONFIG_8xx) 248#if defined(CONFIG_8xx)
249 /* 8xx sometimes need to load a invalid/non-present TLBs.
250 * These must be invalidated separately as linux mm don't.
251 */
252 if (error_code & 0x40000000) /* no translation? */
253 _tlbil_va(address, 0, 0, 0);
254
249 /* The MPC8xx seems to always set 0x80000000, which is 255 /* The MPC8xx seems to always set 0x80000000, which is
250 * "undefined". Of those that can be set, this is the only 256 * "undefined". Of those that can be set, this is the only
251 * one which seems bad. 257 * one which seems bad.
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index dc93e95b256e..fcfcb6e976c7 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -54,26 +54,35 @@
54 54
55#include "mmu_decl.h" 55#include "mmu_decl.h"
56 56
57extern void loadcam_entry(unsigned int index);
58unsigned int tlbcam_index; 57unsigned int tlbcam_index;
59static unsigned long cam[CONFIG_LOWMEM_CAM_NUM];
60 58
61#define NUM_TLBCAMS (16) 59#define NUM_TLBCAMS (64)
62 60
63#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) 61#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
64#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" 62#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
65#endif 63#endif
66 64
67struct tlbcam TLBCAM[NUM_TLBCAMS]; 65struct tlbcam {
66 u32 MAS0;
67 u32 MAS1;
68 unsigned long MAS2;
69 u32 MAS3;
70 u32 MAS7;
71} TLBCAM[NUM_TLBCAMS];
68 72
69struct tlbcamrange { 73struct tlbcamrange {
70 unsigned long start; 74 unsigned long start;
71 unsigned long limit; 75 unsigned long limit;
72 phys_addr_t phys; 76 phys_addr_t phys;
73} tlbcam_addrs[NUM_TLBCAMS]; 77} tlbcam_addrs[NUM_TLBCAMS];
74 78
75extern unsigned int tlbcam_index; 79extern unsigned int tlbcam_index;
76 80
81unsigned long tlbcam_sz(int idx)
82{
83 return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
84}
85
77/* 86/*
78 * Return PA for this VA if it is mapped by a CAM, or 0 87 * Return PA for this VA if it is mapped by a CAM, or 0
79 */ 88 */
@@ -94,23 +103,36 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
94 int b; 103 int b;
95 for (b = 0; b < tlbcam_index; ++b) 104 for (b = 0; b < tlbcam_index; ++b)
96 if (pa >= tlbcam_addrs[b].phys 105 if (pa >= tlbcam_addrs[b].phys
97 && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start) 106 && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
98 +tlbcam_addrs[b].phys) 107 +tlbcam_addrs[b].phys)
99 return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); 108 return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
100 return 0; 109 return 0;
101} 110}
102 111
112void loadcam_entry(int idx)
113{
114 mtspr(SPRN_MAS0, TLBCAM[idx].MAS0);
115 mtspr(SPRN_MAS1, TLBCAM[idx].MAS1);
116 mtspr(SPRN_MAS2, TLBCAM[idx].MAS2);
117 mtspr(SPRN_MAS3, TLBCAM[idx].MAS3);
118
119 if (cur_cpu_spec->cpu_features & MMU_FTR_BIG_PHYS)
120 mtspr(SPRN_MAS7, TLBCAM[idx].MAS7);
121
122 asm volatile("isync;tlbwe;isync" : : : "memory");
123}
124
103/* 125/*
104 * Set up one of the I/D BAT (block address translation) register pairs. 126 * Set up one of the I/D BAT (block address translation) register pairs.
105 * The parameters are not checked; in particular size must be a power 127 * The parameters are not checked; in particular size must be a power
106 * of 4 between 4k and 256M. 128 * of 4 between 4k and 256M.
107 */ 129 */
108void settlbcam(int index, unsigned long virt, phys_addr_t phys, 130static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
109 unsigned int size, int flags, unsigned int pid) 131 unsigned long size, unsigned long flags, unsigned int pid)
110{ 132{
111 unsigned int tsize, lz; 133 unsigned int tsize, lz;
112 134
113 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size)); 135 asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (size));
114 tsize = 21 - lz; 136 tsize = 21 - lz;
115 137
116#ifdef CONFIG_SMP 138#ifdef CONFIG_SMP
@@ -128,8 +150,10 @@ void settlbcam(int index, unsigned long virt, phys_addr_t phys,
128 TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0; 150 TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
129 TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; 151 TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
130 152
131 TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR; 153 TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SX | MAS3_SR;
132 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0); 154 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
155 if (cur_cpu_spec->cpu_features & MMU_FTR_BIG_PHYS)
156 TLBCAM[index].MAS7 = (u64)phys >> 32;
133 157
134#ifndef CONFIG_KGDB /* want user access for breakpoints */ 158#ifndef CONFIG_KGDB /* want user access for breakpoints */
135 if (flags & _PAGE_USER) { 159 if (flags & _PAGE_USER) {
@@ -148,27 +172,44 @@ void settlbcam(int index, unsigned long virt, phys_addr_t phys,
148 loadcam_entry(index); 172 loadcam_entry(index);
149} 173}
150 174
151void invalidate_tlbcam_entry(int index) 175unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
152{
153 TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index);
154 TLBCAM[index].MAS1 = ~MAS1_VALID;
155
156 loadcam_entry(index);
157}
158
159unsigned long __init mmu_mapin_ram(void)
160{ 176{
177 int i;
161 unsigned long virt = PAGE_OFFSET; 178 unsigned long virt = PAGE_OFFSET;
162 phys_addr_t phys = memstart_addr; 179 phys_addr_t phys = memstart_addr;
180 unsigned long amount_mapped = 0;
181 unsigned long max_cam = (mfspr(SPRN_TLB1CFG) >> 16) & 0xf;
182
183 /* Convert (4^max) kB to (2^max) bytes */
184 max_cam = max_cam * 2 + 10;
163 185
164 while (tlbcam_index < ARRAY_SIZE(cam) && cam[tlbcam_index]) { 186 /* Calculate CAM values */
165 settlbcam(tlbcam_index, virt, phys, cam[tlbcam_index], PAGE_KERNEL_X, 0); 187 for (i = 0; ram && i < max_cam_idx; i++) {
166 virt += cam[tlbcam_index]; 188 unsigned int camsize = __ilog2(ram) & ~1U;
167 phys += cam[tlbcam_index]; 189 unsigned int align = __ffs(virt | phys) & ~1U;
168 tlbcam_index++; 190 unsigned long cam_sz;
191
192 if (camsize > align)
193 camsize = align;
194 if (camsize > max_cam)
195 camsize = max_cam;
196
197 cam_sz = 1UL << camsize;
198 settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0);
199
200 ram -= cam_sz;
201 amount_mapped += cam_sz;
202 virt += cam_sz;
203 phys += cam_sz;
169 } 204 }
205 tlbcam_index = i;
206
207 return amount_mapped;
208}
170 209
171 return virt - PAGE_OFFSET; 210unsigned long __init mmu_mapin_ram(void)
211{
212 return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
172} 213}
173 214
174/* 215/*
@@ -179,46 +220,21 @@ void __init MMU_init_hw(void)
179 flush_instruction_cache(); 220 flush_instruction_cache();
180} 221}
181 222
182void __init 223void __init adjust_total_lowmem(void)
183adjust_total_lowmem(void)
184{ 224{
185 phys_addr_t ram; 225 unsigned long ram;
186 unsigned int max_cam = (mfspr(SPRN_TLB1CFG) >> 16) & 0xff;
187 char buf[ARRAY_SIZE(cam) * 5 + 1], *p = buf;
188 int i; 226 int i;
189 unsigned long virt = PAGE_OFFSET & 0xffffffffUL;
190 unsigned long phys = memstart_addr & 0xffffffffUL;
191
192 /* Convert (4^max) kB to (2^max) bytes */
193 max_cam = max_cam * 2 + 10;
194 227
195 /* adjust lowmem size to __max_low_memory */ 228 /* adjust lowmem size to __max_low_memory */
196 ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); 229 ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
197 230
198 /* Calculate CAM values */ 231 __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
199 __max_low_memory = 0;
200 for (i = 0; ram && i < ARRAY_SIZE(cam); i++) {
201 unsigned int camsize = __ilog2(ram) & ~1U;
202 unsigned int align = __ffs(virt | phys) & ~1U;
203 232
204 if (camsize > align) 233 pr_info("Memory CAM mapping: ");
205 camsize = align; 234 for (i = 0; i < tlbcam_index - 1; i++)
206 if (camsize > max_cam) 235 pr_cont("%lu/", tlbcam_sz(i) >> 20);
207 camsize = max_cam; 236 pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
208
209 cam[i] = 1UL << camsize;
210 ram -= cam[i];
211 __max_low_memory += cam[i];
212 virt += cam[i];
213 phys += cam[i];
214
215 p += sprintf(p, "%lu/", cam[i] >> 20);
216 }
217 for (; i < ARRAY_SIZE(cam); i++)
218 p += sprintf(p, "0/");
219 p[-1] = '\0';
220
221 pr_info("Memory CAM mapping: %s Mb, residual: %dMb\n", buf,
222 (unsigned int)((total_lowmem - __max_low_memory) >> 20)); 237 (unsigned int)((total_lowmem - __max_low_memory) >> 20));
238
223 __initial_memory_limit_addr = memstart_addr + __max_low_memory; 239 __initial_memory_limit_addr = memstart_addr + __max_low_memory;
224} 240}
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index bc122a120bf0..d7efdbf640c7 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -55,57 +55,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
55 return 1; 55 return 1;
56} 56}
57 57
58#ifdef CONFIG_HUGETLB_PAGE
59static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate,
60 unsigned long *addr, unsigned long end,
61 int write, struct page **pages, int *nr)
62{
63 unsigned long mask;
64 unsigned long pte_end;
65 struct page *head, *page;
66 pte_t pte;
67 int refs;
68
69 pte_end = (*addr + huge_page_size(hstate)) & huge_page_mask(hstate);
70 if (pte_end < end)
71 end = pte_end;
72
73 pte = *ptep;
74 mask = _PAGE_PRESENT|_PAGE_USER;
75 if (write)
76 mask |= _PAGE_RW;
77 if ((pte_val(pte) & mask) != mask)
78 return 0;
79 /* hugepages are never "special" */
80 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
81
82 refs = 0;
83 head = pte_page(pte);
84 page = head + ((*addr & ~huge_page_mask(hstate)) >> PAGE_SHIFT);
85 do {
86 VM_BUG_ON(compound_head(page) != head);
87 pages[*nr] = page;
88 (*nr)++;
89 page++;
90 refs++;
91 } while (*addr += PAGE_SIZE, *addr != end);
92
93 if (!page_cache_add_speculative(head, refs)) {
94 *nr -= refs;
95 return 0;
96 }
97 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
98 /* Could be optimized better */
99 while (*nr) {
100 put_page(page);
101 (*nr)--;
102 }
103 }
104
105 return 1;
106}
107#endif /* CONFIG_HUGETLB_PAGE */
108
109static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, 58static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
110 int write, struct page **pages, int *nr) 59 int write, struct page **pages, int *nr)
111{ 60{
@@ -119,7 +68,11 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
119 next = pmd_addr_end(addr, end); 68 next = pmd_addr_end(addr, end);
120 if (pmd_none(pmd)) 69 if (pmd_none(pmd))
121 return 0; 70 return 0;
122 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) 71 if (is_hugepd(pmdp)) {
72 if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
73 addr, next, write, pages, nr))
74 return 0;
75 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
123 return 0; 76 return 0;
124 } while (pmdp++, addr = next, addr != end); 77 } while (pmdp++, addr = next, addr != end);
125 78
@@ -139,7 +92,11 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
139 next = pud_addr_end(addr, end); 92 next = pud_addr_end(addr, end);
140 if (pud_none(pud)) 93 if (pud_none(pud))
141 return 0; 94 return 0;
142 if (!gup_pmd_range(pud, addr, next, write, pages, nr)) 95 if (is_hugepd(pudp)) {
96 if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
97 addr, next, write, pages, nr))
98 return 0;
99 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
143 return 0; 100 return 0;
144 } while (pudp++, addr = next, addr != end); 101 } while (pudp++, addr = next, addr != end);
145 102
@@ -154,10 +111,6 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
154 unsigned long next; 111 unsigned long next;
155 pgd_t *pgdp; 112 pgd_t *pgdp;
156 int nr = 0; 113 int nr = 0;
157#ifdef CONFIG_PPC64
158 unsigned int shift;
159 int psize;
160#endif
161 114
162 pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read"); 115 pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
163 116
@@ -172,25 +125,6 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
172 125
173 pr_devel(" aligned: %lx .. %lx\n", start, end); 126 pr_devel(" aligned: %lx .. %lx\n", start, end);
174 127
175#ifdef CONFIG_HUGETLB_PAGE
176 /* We bail out on slice boundary crossing when hugetlb is
177 * enabled in order to not have to deal with two different
178 * page table formats
179 */
180 if (addr < SLICE_LOW_TOP) {
181 if (end > SLICE_LOW_TOP)
182 goto slow_irqon;
183
184 if (unlikely(GET_LOW_SLICE_INDEX(addr) !=
185 GET_LOW_SLICE_INDEX(end - 1)))
186 goto slow_irqon;
187 } else {
188 if (unlikely(GET_HIGH_SLICE_INDEX(addr) !=
189 GET_HIGH_SLICE_INDEX(end - 1)))
190 goto slow_irqon;
191 }
192#endif /* CONFIG_HUGETLB_PAGE */
193
194 /* 128 /*
195 * XXX: batch / limit 'nr', to avoid large irq off latency 129 * XXX: batch / limit 'nr', to avoid large irq off latency
196 * needs some instrumenting to determine the common sizes used by 130 * needs some instrumenting to determine the common sizes used by
@@ -210,54 +144,23 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
210 */ 144 */
211 local_irq_disable(); 145 local_irq_disable();
212 146
213#ifdef CONFIG_PPC64 147 pgdp = pgd_offset(mm, addr);
214 /* Those bits are related to hugetlbfs implementation and only exist 148 do {
215 * on 64-bit for now 149 pgd_t pgd = *pgdp;
216 */ 150
217 psize = get_slice_psize(mm, addr); 151 pr_devel(" %016lx: normal pgd %p\n", addr,
218 shift = mmu_psize_defs[psize].shift; 152 (void *)pgd_val(pgd));
219#endif /* CONFIG_PPC64 */ 153 next = pgd_addr_end(addr, end);
220 154 if (pgd_none(pgd))
221#ifdef CONFIG_HUGETLB_PAGE 155 goto slow;
222 if (unlikely(mmu_huge_psizes[psize])) { 156 if (is_hugepd(pgdp)) {
223 pte_t *ptep; 157 if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
224 unsigned long a = addr; 158 addr, next, write, pages, &nr))
225 unsigned long sz = ((1UL) << shift);
226 struct hstate *hstate = size_to_hstate(sz);
227
228 BUG_ON(!hstate);
229 /*
230 * XXX: could be optimized to avoid hstate
231 * lookup entirely (just use shift)
232 */
233
234 do {
235 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift);
236 ptep = huge_pte_offset(mm, a);
237 pr_devel(" %016lx: huge ptep %p\n", a, ptep);
238 if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages,
239 &nr))
240 goto slow;
241 } while (a != end);
242 } else
243#endif /* CONFIG_HUGETLB_PAGE */
244 {
245 pgdp = pgd_offset(mm, addr);
246 do {
247 pgd_t pgd = *pgdp;
248
249#ifdef CONFIG_PPC64
250 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
251#endif
252 pr_devel(" %016lx: normal pgd %p\n", addr,
253 (void *)pgd_val(pgd));
254 next = pgd_addr_end(addr, end);
255 if (pgd_none(pgd))
256 goto slow;
257 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
258 goto slow; 159 goto slow;
259 } while (pgdp++, addr = next, addr != end); 160 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
260 } 161 goto slow;
162 } while (pgdp++, addr = next, addr != end);
163
261 local_irq_enable(); 164 local_irq_enable();
262 165
263 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); 166 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1ade7eb6ae00..50f867d657df 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -92,6 +92,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
92struct hash_pte *htab_address; 92struct hash_pte *htab_address;
93unsigned long htab_size_bytes; 93unsigned long htab_size_bytes;
94unsigned long htab_hash_mask; 94unsigned long htab_hash_mask;
95EXPORT_SYMBOL_GPL(htab_hash_mask);
95int mmu_linear_psize = MMU_PAGE_4K; 96int mmu_linear_psize = MMU_PAGE_4K;
96int mmu_virtual_psize = MMU_PAGE_4K; 97int mmu_virtual_psize = MMU_PAGE_4K;
97int mmu_vmalloc_psize = MMU_PAGE_4K; 98int mmu_vmalloc_psize = MMU_PAGE_4K;
@@ -102,6 +103,7 @@ int mmu_io_psize = MMU_PAGE_4K;
102int mmu_kernel_ssize = MMU_SEGSIZE_256M; 103int mmu_kernel_ssize = MMU_SEGSIZE_256M;
103int mmu_highuser_ssize = MMU_SEGSIZE_256M; 104int mmu_highuser_ssize = MMU_SEGSIZE_256M;
104u16 mmu_slb_size = 64; 105u16 mmu_slb_size = 64;
106EXPORT_SYMBOL_GPL(mmu_slb_size);
105#ifdef CONFIG_HUGETLB_PAGE 107#ifdef CONFIG_HUGETLB_PAGE
106unsigned int HPAGE_SHIFT; 108unsigned int HPAGE_SHIFT;
107#endif 109#endif
@@ -481,16 +483,6 @@ static void __init htab_init_page_sizes(void)
481#ifdef CONFIG_HUGETLB_PAGE 483#ifdef CONFIG_HUGETLB_PAGE
482 /* Reserve 16G huge page memory sections for huge pages */ 484 /* Reserve 16G huge page memory sections for huge pages */
483 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); 485 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
484
485/* Set default large page size. Currently, we pick 16M or 1M depending
486 * on what is available
487 */
488 if (mmu_psize_defs[MMU_PAGE_16M].shift)
489 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
490 /* With 4k/4level pagetables, we can't (for now) cope with a
491 * huge page size < PMD_SIZE */
492 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
493 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
494#endif /* CONFIG_HUGETLB_PAGE */ 486#endif /* CONFIG_HUGETLB_PAGE */
495} 487}
496 488
@@ -785,7 +777,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
785 /* page is dirty */ 777 /* page is dirty */
786 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { 778 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
787 if (trap == 0x400) { 779 if (trap == 0x400) {
788 __flush_dcache_icache(page_address(page)); 780 flush_dcache_icache_page(page);
789 set_bit(PG_arch_1, &page->flags); 781 set_bit(PG_arch_1, &page->flags);
790 } else 782 } else
791 pp |= HPTE_R_N; 783 pp |= HPTE_R_N;
@@ -843,9 +835,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
843 * Result is 0: full permissions, _PAGE_RW: read-only, 835 * Result is 0: full permissions, _PAGE_RW: read-only,
844 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access. 836 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
845 */ 837 */
846static int subpage_protection(pgd_t *pgdir, unsigned long ea) 838static int subpage_protection(struct mm_struct *mm, unsigned long ea)
847{ 839{
848 struct subpage_prot_table *spt = pgd_subpage_prot(pgdir); 840 struct subpage_prot_table *spt = &mm->context.spt;
849 u32 spp = 0; 841 u32 spp = 0;
850 u32 **sbpm, *sbpp; 842 u32 **sbpm, *sbpp;
851 843
@@ -873,7 +865,7 @@ static int subpage_protection(pgd_t *pgdir, unsigned long ea)
873} 865}
874 866
875#else /* CONFIG_PPC_SUBPAGE_PROT */ 867#else /* CONFIG_PPC_SUBPAGE_PROT */
876static inline int subpage_protection(pgd_t *pgdir, unsigned long ea) 868static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
877{ 869{
878 return 0; 870 return 0;
879} 871}
@@ -891,6 +883,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
891 unsigned long vsid; 883 unsigned long vsid;
892 struct mm_struct *mm; 884 struct mm_struct *mm;
893 pte_t *ptep; 885 pte_t *ptep;
886 unsigned hugeshift;
894 const struct cpumask *tmp; 887 const struct cpumask *tmp;
895 int rc, user_region = 0, local = 0; 888 int rc, user_region = 0, local = 0;
896 int psize, ssize; 889 int psize, ssize;
@@ -943,30 +936,31 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
943 if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) 936 if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
944 local = 1; 937 local = 1;
945 938
946#ifdef CONFIG_HUGETLB_PAGE
947 /* Handle hugepage regions */
948 if (HPAGE_SHIFT && mmu_huge_psizes[psize]) {
949 DBG_LOW(" -> huge page !\n");
950 return hash_huge_page(mm, access, ea, vsid, local, trap);
951 }
952#endif /* CONFIG_HUGETLB_PAGE */
953
954#ifndef CONFIG_PPC_64K_PAGES 939#ifndef CONFIG_PPC_64K_PAGES
955 /* If we use 4K pages and our psize is not 4K, then we are hitting 940 /* If we use 4K pages and our psize is not 4K, then we might
956 * a special driver mapping, we need to align the address before 941 * be hitting a special driver mapping, and need to align the
957 * we fetch the PTE 942 * address before we fetch the PTE.
943 *
944 * It could also be a hugepage mapping, in which case this is
945 * not necessary, but it's not harmful, either.
958 */ 946 */
959 if (psize != MMU_PAGE_4K) 947 if (psize != MMU_PAGE_4K)
960 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 948 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
961#endif /* CONFIG_PPC_64K_PAGES */ 949#endif /* CONFIG_PPC_64K_PAGES */
962 950
963 /* Get PTE and page size from page tables */ 951 /* Get PTE and page size from page tables */
964 ptep = find_linux_pte(pgdir, ea); 952 ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
965 if (ptep == NULL || !pte_present(*ptep)) { 953 if (ptep == NULL || !pte_present(*ptep)) {
966 DBG_LOW(" no PTE !\n"); 954 DBG_LOW(" no PTE !\n");
967 return 1; 955 return 1;
968 } 956 }
969 957
958#ifdef CONFIG_HUGETLB_PAGE
959 if (hugeshift)
960 return __hash_page_huge(ea, access, vsid, ptep, trap, local,
961 ssize, hugeshift, psize);
962#endif /* CONFIG_HUGETLB_PAGE */
963
970#ifndef CONFIG_PPC_64K_PAGES 964#ifndef CONFIG_PPC_64K_PAGES
971 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); 965 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
972#else 966#else
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
new file mode 100644
index 000000000000..199539882f92
--- /dev/null
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -0,0 +1,139 @@
1/*
2 * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
3 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 *
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */
9
10#include <linux/mm.h>
11#include <linux/hugetlb.h>
12#include <asm/pgtable.h>
13#include <asm/pgalloc.h>
14#include <asm/cacheflush.h>
15#include <asm/machdep.h>
16
17int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
18 pte_t *ptep, unsigned long trap, int local, int ssize,
19 unsigned int shift, unsigned int mmu_psize)
20{
21 unsigned long old_pte, new_pte;
22 unsigned long va, rflags, pa, sz;
23 long slot;
24 int err = 1;
25
26 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
27
28 /* Search the Linux page table for a match with va */
29 va = hpt_va(ea, vsid, ssize);
30
31 /*
32 * Check the user's access rights to the page. If access should be
33 * prevented then send the problem up to do_page_fault.
34 */
35 if (unlikely(access & ~pte_val(*ptep)))
36 goto out;
37 /*
38 * At this point, we have a pte (old_pte) which can be used to build
39 * or update an HPTE. There are 2 cases:
40 *
41 * 1. There is a valid (present) pte with no associated HPTE (this is
42 * the most common case)
43 * 2. There is a valid (present) pte with an associated HPTE. The
44 * current values of the pp bits in the HPTE prevent access
45 * because we are doing software DIRTY bit management and the
46 * page is currently not DIRTY.
47 */
48
49
50 do {
51 old_pte = pte_val(*ptep);
52 if (old_pte & _PAGE_BUSY)
53 goto out;
54 new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
55 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
56 old_pte, new_pte));
57
58 rflags = 0x2 | (!(new_pte & _PAGE_RW));
59 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
60 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
61 sz = ((1UL) << shift);
62 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
63 /* No CPU has hugepages but lacks no execute, so we
64 * don't need to worry about that case */
65 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
66
67 /* Check if pte already has an hpte (case 2) */
68 if (unlikely(old_pte & _PAGE_HASHPTE)) {
69 /* There MIGHT be an HPTE for this pte */
70 unsigned long hash, slot;
71
72 hash = hpt_hash(va, shift, ssize);
73 if (old_pte & _PAGE_F_SECOND)
74 hash = ~hash;
75 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
76 slot += (old_pte & _PAGE_F_GIX) >> 12;
77
78 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
79 ssize, local) == -1)
80 old_pte &= ~_PAGE_HPTEFLAGS;
81 }
82
83 if (likely(!(old_pte & _PAGE_HASHPTE))) {
84 unsigned long hash = hpt_hash(va, shift, ssize);
85 unsigned long hpte_group;
86
87 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
88
89repeat:
90 hpte_group = ((hash & htab_hash_mask) *
91 HPTES_PER_GROUP) & ~0x7UL;
92
93 /* clear HPTE slot informations in new PTE */
94#ifdef CONFIG_PPC_64K_PAGES
95 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
96#else
97 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
98#endif
99 /* Add in WIMG bits */
100 rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
101 _PAGE_COHERENT | _PAGE_GUARDED));
102
103 /* Insert into the hash table, primary slot */
104 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
105 mmu_psize, ssize);
106
107 /* Primary is full, try the secondary */
108 if (unlikely(slot == -1)) {
109 hpte_group = ((~hash & htab_hash_mask) *
110 HPTES_PER_GROUP) & ~0x7UL;
111 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
112 HPTE_V_SECONDARY,
113 mmu_psize, ssize);
114 if (slot == -1) {
115 if (mftb() & 0x1)
116 hpte_group = ((hash & htab_hash_mask) *
117 HPTES_PER_GROUP)&~0x7UL;
118
119 ppc_md.hpte_remove(hpte_group);
120 goto repeat;
121 }
122 }
123
124 if (unlikely(slot == -2))
125 panic("hash_huge_page: pte_insert failed\n");
126
127 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
128 }
129
130 /*
131 * No need to use ldarx/stdcx here
132 */
133 *ptep = __pte(new_pte & ~_PAGE_BUSY);
134
135 err = 0;
136
137 out:
138 return err;
139}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 90df6ffe3a43..123f7070238a 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -7,29 +7,17 @@
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> 7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */ 8 */
9 9
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/io.h>
13#include <linux/hugetlb.h> 12#include <linux/hugetlb.h>
14#include <linux/pagemap.h> 13#include <asm/pgtable.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17#include <linux/sysctl.h>
18#include <asm/mman.h>
19#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
20#include <asm/tlb.h> 15#include <asm/tlb.h>
21#include <asm/tlbflush.h>
22#include <asm/mmu_context.h>
23#include <asm/machdep.h>
24#include <asm/cputable.h>
25#include <asm/spu.h>
26 16
27#define PAGE_SHIFT_64K 16 17#define PAGE_SHIFT_64K 16
28#define PAGE_SHIFT_16M 24 18#define PAGE_SHIFT_16M 24
29#define PAGE_SHIFT_16G 34 19#define PAGE_SHIFT_16G 34
30 20
31#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
32#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33#define MAX_NUMBER_GPAGES 1024 21#define MAX_NUMBER_GPAGES 1024
34 22
35/* Tracks the 16G pages after the device tree is scanned and before the 23/* Tracks the 16G pages after the device tree is scanned and before the
@@ -37,53 +25,17 @@
37static unsigned long gpage_freearray[MAX_NUMBER_GPAGES]; 25static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
38static unsigned nr_gpages; 26static unsigned nr_gpages;
39 27
40/* Array of valid huge page sizes - non-zero value(hugepte_shift) is
41 * stored for the huge page sizes that are valid.
42 */
43unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
44
45#define hugepte_shift mmu_huge_psizes
46#define PTRS_PER_HUGEPTE(psize) (1 << hugepte_shift[psize])
47#define HUGEPTE_TABLE_SIZE(psize) (sizeof(pte_t) << hugepte_shift[psize])
48
49#define HUGEPD_SHIFT(psize) (mmu_psize_to_shift(psize) \
50 + hugepte_shift[psize])
51#define HUGEPD_SIZE(psize) (1UL << HUGEPD_SHIFT(psize))
52#define HUGEPD_MASK(psize) (~(HUGEPD_SIZE(psize)-1))
53
54/* Subtract one from array size because we don't need a cache for 4K since
55 * is not a huge page size */
56#define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1)
57#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
58
59static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
60 [MMU_PAGE_64K] = "hugepte_cache_64K",
61 [MMU_PAGE_1M] = "hugepte_cache_1M",
62 [MMU_PAGE_16M] = "hugepte_cache_16M",
63 [MMU_PAGE_16G] = "hugepte_cache_16G",
64};
65
66/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad() 28/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
67 * will choke on pointers to hugepte tables, which is handy for 29 * will choke on pointers to hugepte tables, which is handy for
68 * catching screwups early. */ 30 * catching screwups early. */
69#define HUGEPD_OK 0x1
70
71typedef struct { unsigned long pd; } hugepd_t;
72
73#define hugepd_none(hpd) ((hpd).pd == 0)
74 31
75static inline int shift_to_mmu_psize(unsigned int shift) 32static inline int shift_to_mmu_psize(unsigned int shift)
76{ 33{
77 switch (shift) { 34 int psize;
78#ifndef CONFIG_PPC_64K_PAGES 35
79 case PAGE_SHIFT_64K: 36 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
80 return MMU_PAGE_64K; 37 if (mmu_psize_defs[psize].shift == shift)
81#endif 38 return psize;
82 case PAGE_SHIFT_16M:
83 return MMU_PAGE_16M;
84 case PAGE_SHIFT_16G:
85 return MMU_PAGE_16G;
86 }
87 return -1; 39 return -1;
88} 40}
89 41
@@ -94,71 +46,126 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
94 BUG(); 46 BUG();
95} 47}
96 48
49#define hugepd_none(hpd) ((hpd).pd == 0)
50
97static inline pte_t *hugepd_page(hugepd_t hpd) 51static inline pte_t *hugepd_page(hugepd_t hpd)
98{ 52{
99 BUG_ON(!(hpd.pd & HUGEPD_OK)); 53 BUG_ON(!hugepd_ok(hpd));
100 return (pte_t *)(hpd.pd & ~HUGEPD_OK); 54 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | 0xc000000000000000);
55}
56
57static inline unsigned int hugepd_shift(hugepd_t hpd)
58{
59 return hpd.pd & HUGEPD_SHIFT_MASK;
101} 60}
102 61
103static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, 62static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, unsigned pdshift)
104 struct hstate *hstate)
105{ 63{
106 unsigned int shift = huge_page_shift(hstate); 64 unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
107 int psize = shift_to_mmu_psize(shift);
108 unsigned long idx = ((addr >> shift) & (PTRS_PER_HUGEPTE(psize)-1));
109 pte_t *dir = hugepd_page(*hpdp); 65 pte_t *dir = hugepd_page(*hpdp);
110 66
111 return dir + idx; 67 return dir + idx;
112} 68}
113 69
70pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
71{
72 pgd_t *pg;
73 pud_t *pu;
74 pmd_t *pm;
75 hugepd_t *hpdp = NULL;
76 unsigned pdshift = PGDIR_SHIFT;
77
78 if (shift)
79 *shift = 0;
80
81 pg = pgdir + pgd_index(ea);
82 if (is_hugepd(pg)) {
83 hpdp = (hugepd_t *)pg;
84 } else if (!pgd_none(*pg)) {
85 pdshift = PUD_SHIFT;
86 pu = pud_offset(pg, ea);
87 if (is_hugepd(pu))
88 hpdp = (hugepd_t *)pu;
89 else if (!pud_none(*pu)) {
90 pdshift = PMD_SHIFT;
91 pm = pmd_offset(pu, ea);
92 if (is_hugepd(pm))
93 hpdp = (hugepd_t *)pm;
94 else if (!pmd_none(*pm)) {
95 return pte_offset_map(pm, ea);
96 }
97 }
98 }
99
100 if (!hpdp)
101 return NULL;
102
103 if (shift)
104 *shift = hugepd_shift(*hpdp);
105 return hugepte_offset(hpdp, ea, pdshift);
106}
107
108pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
109{
110 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
111}
112
114static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, 113static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
115 unsigned long address, unsigned int psize) 114 unsigned long address, unsigned pdshift, unsigned pshift)
116{ 115{
117 pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], 116 pte_t *new = kmem_cache_zalloc(PGT_CACHE(pdshift - pshift),
118 GFP_KERNEL|__GFP_REPEAT); 117 GFP_KERNEL|__GFP_REPEAT);
118
119 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
120 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
119 121
120 if (! new) 122 if (! new)
121 return -ENOMEM; 123 return -ENOMEM;
122 124
123 spin_lock(&mm->page_table_lock); 125 spin_lock(&mm->page_table_lock);
124 if (!hugepd_none(*hpdp)) 126 if (!hugepd_none(*hpdp))
125 kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new); 127 kmem_cache_free(PGT_CACHE(pdshift - pshift), new);
126 else 128 else
127 hpdp->pd = (unsigned long)new | HUGEPD_OK; 129 hpdp->pd = ((unsigned long)new & ~0x8000000000000000) | pshift;
128 spin_unlock(&mm->page_table_lock); 130 spin_unlock(&mm->page_table_lock);
129 return 0; 131 return 0;
130} 132}
131 133
132 134pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
133static pud_t *hpud_offset(pgd_t *pgd, unsigned long addr, struct hstate *hstate)
134{
135 if (huge_page_shift(hstate) < PUD_SHIFT)
136 return pud_offset(pgd, addr);
137 else
138 return (pud_t *) pgd;
139}
140static pud_t *hpud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr,
141 struct hstate *hstate)
142{ 135{
143 if (huge_page_shift(hstate) < PUD_SHIFT) 136 pgd_t *pg;
144 return pud_alloc(mm, pgd, addr); 137 pud_t *pu;
145 else 138 pmd_t *pm;
146 return (pud_t *) pgd; 139 hugepd_t *hpdp = NULL;
147} 140 unsigned pshift = __ffs(sz);
148static pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate) 141 unsigned pdshift = PGDIR_SHIFT;
149{ 142
150 if (huge_page_shift(hstate) < PMD_SHIFT) 143 addr &= ~(sz-1);
151 return pmd_offset(pud, addr); 144
152 else 145 pg = pgd_offset(mm, addr);
153 return (pmd_t *) pud; 146 if (pshift >= PUD_SHIFT) {
154} 147 hpdp = (hugepd_t *)pg;
155static pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr, 148 } else {
156 struct hstate *hstate) 149 pdshift = PUD_SHIFT;
157{ 150 pu = pud_alloc(mm, pg, addr);
158 if (huge_page_shift(hstate) < PMD_SHIFT) 151 if (pshift >= PMD_SHIFT) {
159 return pmd_alloc(mm, pud, addr); 152 hpdp = (hugepd_t *)pu;
160 else 153 } else {
161 return (pmd_t *) pud; 154 pdshift = PMD_SHIFT;
155 pm = pmd_alloc(mm, pu, addr);
156 hpdp = (hugepd_t *)pm;
157 }
158 }
159
160 if (!hpdp)
161 return NULL;
162
163 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
164
165 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
166 return NULL;
167
168 return hugepte_offset(hpdp, addr, pdshift);
162} 169}
163 170
164/* Build list of addresses of gigantic pages. This function is used in early 171/* Build list of addresses of gigantic pages. This function is used in early
@@ -192,94 +199,38 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
192 return 1; 199 return 1;
193} 200}
194 201
195
196/* Modelled after find_linux_pte() */
197pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
198{
199 pgd_t *pg;
200 pud_t *pu;
201 pmd_t *pm;
202
203 unsigned int psize;
204 unsigned int shift;
205 unsigned long sz;
206 struct hstate *hstate;
207 psize = get_slice_psize(mm, addr);
208 shift = mmu_psize_to_shift(psize);
209 sz = ((1UL) << shift);
210 hstate = size_to_hstate(sz);
211
212 addr &= hstate->mask;
213
214 pg = pgd_offset(mm, addr);
215 if (!pgd_none(*pg)) {
216 pu = hpud_offset(pg, addr, hstate);
217 if (!pud_none(*pu)) {
218 pm = hpmd_offset(pu, addr, hstate);
219 if (!pmd_none(*pm))
220 return hugepte_offset((hugepd_t *)pm, addr,
221 hstate);
222 }
223 }
224
225 return NULL;
226}
227
228pte_t *huge_pte_alloc(struct mm_struct *mm,
229 unsigned long addr, unsigned long sz)
230{
231 pgd_t *pg;
232 pud_t *pu;
233 pmd_t *pm;
234 hugepd_t *hpdp = NULL;
235 struct hstate *hstate;
236 unsigned int psize;
237 hstate = size_to_hstate(sz);
238
239 psize = get_slice_psize(mm, addr);
240 BUG_ON(!mmu_huge_psizes[psize]);
241
242 addr &= hstate->mask;
243
244 pg = pgd_offset(mm, addr);
245 pu = hpud_alloc(mm, pg, addr, hstate);
246
247 if (pu) {
248 pm = hpmd_alloc(mm, pu, addr, hstate);
249 if (pm)
250 hpdp = (hugepd_t *)pm;
251 }
252
253 if (! hpdp)
254 return NULL;
255
256 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, psize))
257 return NULL;
258
259 return hugepte_offset(hpdp, addr, hstate);
260}
261
262int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 202int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
263{ 203{
264 return 0; 204 return 0;
265} 205}
266 206
267static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp, 207static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
268 unsigned int psize) 208 unsigned long start, unsigned long end,
209 unsigned long floor, unsigned long ceiling)
269{ 210{
270 pte_t *hugepte = hugepd_page(*hpdp); 211 pte_t *hugepte = hugepd_page(*hpdp);
212 unsigned shift = hugepd_shift(*hpdp);
213 unsigned long pdmask = ~((1UL << pdshift) - 1);
214
215 start &= pdmask;
216 if (start < floor)
217 return;
218 if (ceiling) {
219 ceiling &= pdmask;
220 if (! ceiling)
221 return;
222 }
223 if (end - 1 > ceiling - 1)
224 return;
271 225
272 hpdp->pd = 0; 226 hpdp->pd = 0;
273 tlb->need_flush = 1; 227 tlb->need_flush = 1;
274 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, 228 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
275 HUGEPTE_CACHE_NUM+psize-1,
276 PGF_CACHENUM_MASK));
277} 229}
278 230
279static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 231static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
280 unsigned long addr, unsigned long end, 232 unsigned long addr, unsigned long end,
281 unsigned long floor, unsigned long ceiling, 233 unsigned long floor, unsigned long ceiling)
282 unsigned int psize)
283{ 234{
284 pmd_t *pmd; 235 pmd_t *pmd;
285 unsigned long next; 236 unsigned long next;
@@ -291,7 +242,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
291 next = pmd_addr_end(addr, end); 242 next = pmd_addr_end(addr, end);
292 if (pmd_none(*pmd)) 243 if (pmd_none(*pmd))
293 continue; 244 continue;
294 free_hugepte_range(tlb, (hugepd_t *)pmd, psize); 245 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
246 addr, next, floor, ceiling);
295 } while (pmd++, addr = next, addr != end); 247 } while (pmd++, addr = next, addr != end);
296 248
297 start &= PUD_MASK; 249 start &= PUD_MASK;
@@ -317,23 +269,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
317 pud_t *pud; 269 pud_t *pud;
318 unsigned long next; 270 unsigned long next;
319 unsigned long start; 271 unsigned long start;
320 unsigned int shift;
321 unsigned int psize = get_slice_psize(tlb->mm, addr);
322 shift = mmu_psize_to_shift(psize);
323 272
324 start = addr; 273 start = addr;
325 pud = pud_offset(pgd, addr); 274 pud = pud_offset(pgd, addr);
326 do { 275 do {
327 next = pud_addr_end(addr, end); 276 next = pud_addr_end(addr, end);
328 if (shift < PMD_SHIFT) { 277 if (!is_hugepd(pud)) {
329 if (pud_none_or_clear_bad(pud)) 278 if (pud_none_or_clear_bad(pud))
330 continue; 279 continue;
331 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, 280 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
332 ceiling, psize); 281 ceiling);
333 } else { 282 } else {
334 if (pud_none(*pud)) 283 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
335 continue; 284 addr, next, floor, ceiling);
336 free_hugepte_range(tlb, (hugepd_t *)pud, psize);
337 } 285 }
338 } while (pud++, addr = next, addr != end); 286 } while (pud++, addr = next, addr != end);
339 287
@@ -364,121 +312,56 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
364{ 312{
365 pgd_t *pgd; 313 pgd_t *pgd;
366 unsigned long next; 314 unsigned long next;
367 unsigned long start;
368 315
369 /* 316 /*
370 * Comments below take from the normal free_pgd_range(). They 317 * Because there are a number of different possible pagetable
371 * apply here too. The tests against HUGEPD_MASK below are 318 * layouts for hugepage ranges, we limit knowledge of how
372 * essential, because we *don't* test for this at the bottom 319 * things should be laid out to the allocation path
373 * level. Without them we'll attempt to free a hugepte table 320 * (huge_pte_alloc(), above). Everything else works out the
374 * when we unmap just part of it, even if there are other 321 * structure as it goes from information in the hugepd
375 * active mappings using it. 322 * pointers. That means that we can't here use the
376 * 323 * optimization used in the normal page free_pgd_range(), of
377 * The next few lines have given us lots of grief... 324 * checking whether we're actually covering a large enough
378 * 325 * range to have to do anything at the top level of the walk
379 * Why are we testing HUGEPD* at this top level? Because 326 * instead of at the bottom.
380 * often there will be no work to do at all, and we'd prefer
381 * not to go all the way down to the bottom just to discover
382 * that.
383 *
384 * Why all these "- 1"s? Because 0 represents both the bottom
385 * of the address space and the top of it (using -1 for the
386 * top wouldn't help much: the masks would do the wrong thing).
387 * The rule is that addr 0 and floor 0 refer to the bottom of
388 * the address space, but end 0 and ceiling 0 refer to the top
389 * Comparisons need to use "end - 1" and "ceiling - 1" (though
390 * that end 0 case should be mythical).
391 * 327 *
392 * Wherever addr is brought up or ceiling brought down, we 328 * To make sense of this, you should probably go read the big
393 * must be careful to reject "the opposite 0" before it 329 * block comment at the top of the normal free_pgd_range(),
394 * confuses the subsequent tests. But what about where end is 330 * too.
395 * brought down by HUGEPD_SIZE below? no, end can't go down to
396 * 0 there.
397 *
398 * Whereas we round start (addr) and ceiling down, by different
399 * masks at different levels, in order to test whether a table
400 * now has no other vmas using it, so can be freed, we don't
401 * bother to round floor or end up - the tests don't need that.
402 */ 331 */
403 unsigned int psize = get_slice_psize(tlb->mm, addr);
404
405 addr &= HUGEPD_MASK(psize);
406 if (addr < floor) {
407 addr += HUGEPD_SIZE(psize);
408 if (!addr)
409 return;
410 }
411 if (ceiling) {
412 ceiling &= HUGEPD_MASK(psize);
413 if (!ceiling)
414 return;
415 }
416 if (end - 1 > ceiling - 1)
417 end -= HUGEPD_SIZE(psize);
418 if (addr > end - 1)
419 return;
420 332
421 start = addr;
422 pgd = pgd_offset(tlb->mm, addr); 333 pgd = pgd_offset(tlb->mm, addr);
423 do { 334 do {
424 psize = get_slice_psize(tlb->mm, addr);
425 BUG_ON(!mmu_huge_psizes[psize]);
426 next = pgd_addr_end(addr, end); 335 next = pgd_addr_end(addr, end);
427 if (mmu_psize_to_shift(psize) < PUD_SHIFT) { 336 if (!is_hugepd(pgd)) {
428 if (pgd_none_or_clear_bad(pgd)) 337 if (pgd_none_or_clear_bad(pgd))
429 continue; 338 continue;
430 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); 339 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
431 } else { 340 } else {
432 if (pgd_none(*pgd)) 341 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
433 continue; 342 addr, next, floor, ceiling);
434 free_hugepte_range(tlb, (hugepd_t *)pgd, psize);
435 } 343 }
436 } while (pgd++, addr = next, addr != end); 344 } while (pgd++, addr = next, addr != end);
437} 345}
438 346
439void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
440 pte_t *ptep, pte_t pte)
441{
442 if (pte_present(*ptep)) {
443 /* We open-code pte_clear because we need to pass the right
444 * argument to hpte_need_flush (huge / !huge). Might not be
445 * necessary anymore if we make hpte_need_flush() get the
446 * page size from the slices
447 */
448 unsigned int psize = get_slice_psize(mm, addr);
449 unsigned int shift = mmu_psize_to_shift(psize);
450 unsigned long sz = ((1UL) << shift);
451 struct hstate *hstate = size_to_hstate(sz);
452 pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1);
453 }
454 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
455}
456
457pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
458 pte_t *ptep)
459{
460 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
461 return __pte(old);
462}
463
464struct page * 347struct page *
465follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 348follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
466{ 349{
467 pte_t *ptep; 350 pte_t *ptep;
468 struct page *page; 351 struct page *page;
469 unsigned int mmu_psize = get_slice_psize(mm, address); 352 unsigned shift;
353 unsigned long mask;
354
355 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
470 356
471 /* Verify it is a huge page else bail. */ 357 /* Verify it is a huge page else bail. */
472 if (!mmu_huge_psizes[mmu_psize]) 358 if (!ptep || !shift)
473 return ERR_PTR(-EINVAL); 359 return ERR_PTR(-EINVAL);
474 360
475 ptep = huge_pte_offset(mm, address); 361 mask = (1UL << shift) - 1;
476 page = pte_page(*ptep); 362 page = pte_page(*ptep);
477 if (page) { 363 if (page)
478 unsigned int shift = mmu_psize_to_shift(mmu_psize); 364 page += (address & mask) / PAGE_SIZE;
479 unsigned long sz = ((1UL) << shift);
480 page += (address % sz) / PAGE_SIZE;
481 }
482 365
483 return page; 366 return page;
484} 367}
@@ -501,6 +384,82 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
501 return NULL; 384 return NULL;
502} 385}
503 386
387static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
388 unsigned long end, int write, struct page **pages, int *nr)
389{
390 unsigned long mask;
391 unsigned long pte_end;
392 struct page *head, *page;
393 pte_t pte;
394 int refs;
395
396 pte_end = (addr + sz) & ~(sz-1);
397 if (pte_end < end)
398 end = pte_end;
399
400 pte = *ptep;
401 mask = _PAGE_PRESENT | _PAGE_USER;
402 if (write)
403 mask |= _PAGE_RW;
404
405 if ((pte_val(pte) & mask) != mask)
406 return 0;
407
408 /* hugepages are never "special" */
409 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
410
411 refs = 0;
412 head = pte_page(pte);
413
414 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
415 do {
416 VM_BUG_ON(compound_head(page) != head);
417 pages[*nr] = page;
418 (*nr)++;
419 page++;
420 refs++;
421 } while (addr += PAGE_SIZE, addr != end);
422
423 if (!page_cache_add_speculative(head, refs)) {
424 *nr -= refs;
425 return 0;
426 }
427
428 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
429 /* Could be optimized better */
430 while (*nr) {
431 put_page(page);
432 (*nr)--;
433 }
434 }
435
436 return 1;
437}
438
439static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
440 unsigned long sz)
441{
442 unsigned long __boundary = (addr + sz) & ~(sz-1);
443 return (__boundary - 1 < end - 1) ? __boundary : end;
444}
445
446int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
447 unsigned long addr, unsigned long end,
448 int write, struct page **pages, int *nr)
449{
450 pte_t *ptep;
451 unsigned long sz = 1UL << hugepd_shift(*hugepd);
452 unsigned long next;
453
454 ptep = hugepte_offset(hugepd, addr, pdshift);
455 do {
456 next = hugepte_addr_end(addr, end, sz);
457 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
458 return 0;
459 } while (ptep++, addr = next, addr != end);
460
461 return 1;
462}
504 463
505unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 464unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
506 unsigned long len, unsigned long pgoff, 465 unsigned long len, unsigned long pgoff,
@@ -509,8 +468,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
509 struct hstate *hstate = hstate_file(file); 468 struct hstate *hstate = hstate_file(file);
510 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); 469 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
511 470
512 if (!mmu_huge_psizes[mmu_psize])
513 return -EINVAL;
514 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); 471 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
515} 472}
516 473
@@ -521,229 +478,46 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
521 return 1UL << mmu_psize_to_shift(psize); 478 return 1UL << mmu_psize_to_shift(psize);
522} 479}
523 480
524/* 481static int __init add_huge_page_size(unsigned long long size)
525 * Called by asm hashtable.S for doing lazy icache flush
526 */
527static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
528 pte_t pte, int trap, unsigned long sz)
529{ 482{
530 struct page *page; 483 int shift = __ffs(size);
531 int i; 484 int mmu_psize;
532
533 if (!pfn_valid(pte_pfn(pte)))
534 return rflags;
535
536 page = pte_page(pte);
537
538 /* page is dirty */
539 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
540 if (trap == 0x400) {
541 for (i = 0; i < (sz / PAGE_SIZE); i++)
542 __flush_dcache_icache(page_address(page+i));
543 set_bit(PG_arch_1, &page->flags);
544 } else {
545 rflags |= HPTE_R_N;
546 }
547 }
548 return rflags;
549}
550 485
551int hash_huge_page(struct mm_struct *mm, unsigned long access, 486 /* Check that it is a page size supported by the hardware and
552 unsigned long ea, unsigned long vsid, int local, 487 * that it fits within pagetable and slice limits. */
553 unsigned long trap) 488 if (!is_power_of_2(size)
554{ 489 || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
555 pte_t *ptep; 490 return -EINVAL;
556 unsigned long old_pte, new_pte;
557 unsigned long va, rflags, pa, sz;
558 long slot;
559 int err = 1;
560 int ssize = user_segment_size(ea);
561 unsigned int mmu_psize;
562 int shift;
563 mmu_psize = get_slice_psize(mm, ea);
564
565 if (!mmu_huge_psizes[mmu_psize])
566 goto out;
567 ptep = huge_pte_offset(mm, ea);
568
569 /* Search the Linux page table for a match with va */
570 va = hpt_va(ea, vsid, ssize);
571 491
572 /* 492 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
573 * If no pte found or not present, send the problem up to 493 return -EINVAL;
574 * do_page_fault
575 */
576 if (unlikely(!ptep || pte_none(*ptep)))
577 goto out;
578 494
579 /* 495#ifdef CONFIG_SPU_FS_64K_LS
580 * Check the user's access rights to the page. If access should be 496 /* Disable support for 64K huge pages when 64K SPU local store
581 * prevented then send the problem up to do_page_fault. 497 * support is enabled as the current implementation conflicts.
582 */ 498 */
583 if (unlikely(access & ~pte_val(*ptep))) 499 if (shift == PAGE_SHIFT_64K)
584 goto out; 500 return -EINVAL;
585 /* 501#endif /* CONFIG_SPU_FS_64K_LS */
586 * At this point, we have a pte (old_pte) which can be used to build
587 * or update an HPTE. There are 2 cases:
588 *
589 * 1. There is a valid (present) pte with no associated HPTE (this is
590 * the most common case)
591 * 2. There is a valid (present) pte with an associated HPTE. The
592 * current values of the pp bits in the HPTE prevent access
593 * because we are doing software DIRTY bit management and the
594 * page is currently not DIRTY.
595 */
596
597
598 do {
599 old_pte = pte_val(*ptep);
600 if (old_pte & _PAGE_BUSY)
601 goto out;
602 new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
603 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
604 old_pte, new_pte));
605
606 rflags = 0x2 | (!(new_pte & _PAGE_RW));
607 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
608 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
609 shift = mmu_psize_to_shift(mmu_psize);
610 sz = ((1UL) << shift);
611 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
612 /* No CPU has hugepages but lacks no execute, so we
613 * don't need to worry about that case */
614 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
615 trap, sz);
616
617 /* Check if pte already has an hpte (case 2) */
618 if (unlikely(old_pte & _PAGE_HASHPTE)) {
619 /* There MIGHT be an HPTE for this pte */
620 unsigned long hash, slot;
621
622 hash = hpt_hash(va, shift, ssize);
623 if (old_pte & _PAGE_F_SECOND)
624 hash = ~hash;
625 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
626 slot += (old_pte & _PAGE_F_GIX) >> 12;
627
628 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
629 ssize, local) == -1)
630 old_pte &= ~_PAGE_HPTEFLAGS;
631 }
632
633 if (likely(!(old_pte & _PAGE_HASHPTE))) {
634 unsigned long hash = hpt_hash(va, shift, ssize);
635 unsigned long hpte_group;
636
637 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
638
639repeat:
640 hpte_group = ((hash & htab_hash_mask) *
641 HPTES_PER_GROUP) & ~0x7UL;
642
643 /* clear HPTE slot informations in new PTE */
644#ifdef CONFIG_PPC_64K_PAGES
645 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
646#else
647 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
648#endif
649 /* Add in WIMG bits */
650 rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
651 _PAGE_COHERENT | _PAGE_GUARDED));
652
653 /* Insert into the hash table, primary slot */
654 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
655 mmu_psize, ssize);
656
657 /* Primary is full, try the secondary */
658 if (unlikely(slot == -1)) {
659 hpte_group = ((~hash & htab_hash_mask) *
660 HPTES_PER_GROUP) & ~0x7UL;
661 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
662 HPTE_V_SECONDARY,
663 mmu_psize, ssize);
664 if (slot == -1) {
665 if (mftb() & 0x1)
666 hpte_group = ((hash & htab_hash_mask) *
667 HPTES_PER_GROUP)&~0x7UL;
668
669 ppc_md.hpte_remove(hpte_group);
670 goto repeat;
671 }
672 }
673
674 if (unlikely(slot == -2))
675 panic("hash_huge_page: pte_insert failed\n");
676
677 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
678 }
679 502
680 /* 503 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
681 * No need to use ldarx/stdcx here
682 */
683 *ptep = __pte(new_pte & ~_PAGE_BUSY);
684 504
685 err = 0; 505 /* Return if huge page size has already been setup */
506 if (size_to_hstate(size))
507 return 0;
686 508
687 out: 509 hugetlb_add_hstate(shift - PAGE_SHIFT);
688 return err;
689}
690 510
691static void __init set_huge_psize(int psize) 511 return 0;
692{
693 /* Check that it is a page size supported by the hardware and
694 * that it fits within pagetable limits. */
695 if (mmu_psize_defs[psize].shift &&
696 mmu_psize_defs[psize].shift < SID_SHIFT_1T &&
697 (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
698 mmu_psize_defs[psize].shift == PAGE_SHIFT_64K ||
699 mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) {
700 /* Return if huge page size has already been setup or is the
701 * same as the base page size. */
702 if (mmu_huge_psizes[psize] ||
703 mmu_psize_defs[psize].shift == PAGE_SHIFT)
704 return;
705 if (WARN_ON(HUGEPTE_CACHE_NAME(psize) == NULL))
706 return;
707 hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT);
708
709 switch (mmu_psize_defs[psize].shift) {
710 case PAGE_SHIFT_64K:
711 /* We only allow 64k hpages with 4k base page,
712 * which was checked above, and always put them
713 * at the PMD */
714 hugepte_shift[psize] = PMD_SHIFT;
715 break;
716 case PAGE_SHIFT_16M:
717 /* 16M pages can be at two different levels
718 * of pagestables based on base page size */
719 if (PAGE_SHIFT == PAGE_SHIFT_64K)
720 hugepte_shift[psize] = PMD_SHIFT;
721 else /* 4k base page */
722 hugepte_shift[psize] = PUD_SHIFT;
723 break;
724 case PAGE_SHIFT_16G:
725 /* 16G pages are always at PGD level */
726 hugepte_shift[psize] = PGDIR_SHIFT;
727 break;
728 }
729 hugepte_shift[psize] -= mmu_psize_defs[psize].shift;
730 } else
731 hugepte_shift[psize] = 0;
732} 512}
733 513
734static int __init hugepage_setup_sz(char *str) 514static int __init hugepage_setup_sz(char *str)
735{ 515{
736 unsigned long long size; 516 unsigned long long size;
737 int mmu_psize;
738 int shift;
739 517
740 size = memparse(str, &str); 518 size = memparse(str, &str);
741 519
742 shift = __ffs(size); 520 if (add_huge_page_size(size) != 0)
743 mmu_psize = shift_to_mmu_psize(shift);
744 if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift)
745 set_huge_psize(mmu_psize);
746 else
747 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); 521 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
748 522
749 return 1; 523 return 1;
@@ -752,41 +526,55 @@ __setup("hugepagesz=", hugepage_setup_sz);
752 526
753static int __init hugetlbpage_init(void) 527static int __init hugetlbpage_init(void)
754{ 528{
755 unsigned int psize; 529 int psize;
756 530
757 if (!cpu_has_feature(CPU_FTR_16M_PAGE)) 531 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
758 return -ENODEV; 532 return -ENODEV;
759 533
760 /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE 534 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
761 * and adjust PTE_NONCACHE_NUM if the number of supported huge page 535 unsigned shift;
762 * sizes changes. 536 unsigned pdshift;
763 */
764 set_huge_psize(MMU_PAGE_16M);
765 set_huge_psize(MMU_PAGE_16G);
766 537
767 /* Temporarily disable support for 64K huge pages when 64K SPU local 538 if (!mmu_psize_defs[psize].shift)
768 * store support is enabled as the current implementation conflicts. 539 continue;
769 */
770#ifndef CONFIG_SPU_FS_64K_LS
771 set_huge_psize(MMU_PAGE_64K);
772#endif
773 540
774 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 541 shift = mmu_psize_to_shift(psize);
775 if (mmu_huge_psizes[psize]) { 542
776 pgtable_cache[HUGE_PGTABLE_INDEX(psize)] = 543 if (add_huge_page_size(1ULL << shift) < 0)
777 kmem_cache_create( 544 continue;
778 HUGEPTE_CACHE_NAME(psize), 545
779 HUGEPTE_TABLE_SIZE(psize), 546 if (shift < PMD_SHIFT)
780 HUGEPTE_TABLE_SIZE(psize), 547 pdshift = PMD_SHIFT;
781 0, 548 else if (shift < PUD_SHIFT)
782 NULL); 549 pdshift = PUD_SHIFT;
783 if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)]) 550 else
784 panic("hugetlbpage_init(): could not create %s"\ 551 pdshift = PGDIR_SHIFT;
785 "\n", HUGEPTE_CACHE_NAME(psize)); 552
786 } 553 pgtable_cache_add(pdshift - shift, NULL);
554 if (!PGT_CACHE(pdshift - shift))
555 panic("hugetlbpage_init(): could not create "
556 "pgtable cache for %d bit pagesize\n", shift);
787 } 557 }
788 558
559 /* Set default large page size. Currently, we pick 16M or 1M
560 * depending on what is available
561 */
562 if (mmu_psize_defs[MMU_PAGE_16M].shift)
563 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
564 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
565 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
566
789 return 0; 567 return 0;
790} 568}
791 569
792module_init(hugetlbpage_init); 570module_init(hugetlbpage_init);
571
572void flush_dcache_icache_hugepage(struct page *page)
573{
574 int i;
575
576 BUG_ON(!PageCompound(page));
577
578 for (i = 0; i < (1UL << compound_order(page)); i++)
579 __flush_dcache_icache(page_address(page+i));
580}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 335c578b9cc3..776f28d02b6b 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -41,6 +41,7 @@
41#include <linux/module.h> 41#include <linux/module.h>
42#include <linux/poison.h> 42#include <linux/poison.h>
43#include <linux/lmb.h> 43#include <linux/lmb.h>
44#include <linux/hugetlb.h>
44 45
45#include <asm/pgalloc.h> 46#include <asm/pgalloc.h>
46#include <asm/page.h> 47#include <asm/page.h>
@@ -119,30 +120,63 @@ static void pmd_ctor(void *addr)
119 memset(addr, 0, PMD_TABLE_SIZE); 120 memset(addr, 0, PMD_TABLE_SIZE);
120} 121}
121 122
122static const unsigned int pgtable_cache_size[2] = { 123struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
123 PGD_TABLE_SIZE, PMD_TABLE_SIZE 124
124}; 125/*
125static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 126 * Create a kmem_cache() for pagetables. This is not used for PTE
126#ifdef CONFIG_PPC_64K_PAGES 127 * pages - they're linked to struct page, come from the normal free
127 "pgd_cache", "pmd_cache", 128 * pages pool and have a different entry size (see real_pte_t) to
128#else 129 * everything else. Caches created by this function are used for all
129 "pgd_cache", "pud_pmd_cache", 130 * the higher level pagetables, and for hugepage pagetables.
130#endif /* CONFIG_PPC_64K_PAGES */ 131 */
131}; 132void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
132 133{
133#ifdef CONFIG_HUGETLB_PAGE 134 char *name;
134/* Hugepages need an extra cache per hugepagesize, initialized in 135 unsigned long table_size = sizeof(void *) << shift;
135 * hugetlbpage.c. We can't put into the tables above, because HPAGE_SHIFT 136 unsigned long align = table_size;
136 * is not compile time constant. */ 137
137struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT]; 138 /* When batching pgtable pointers for RCU freeing, we store
138#else 139 * the index size in the low bits. Table alignment must be
139struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 140 * big enough to fit it.
140#endif 141 *
142 * Likewise, hugeapge pagetable pointers contain a (different)
143 * shift value in the low bits. All tables must be aligned so
144 * as to leave enough 0 bits in the address to contain it. */
145 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
146 HUGEPD_SHIFT_MASK + 1);
147 struct kmem_cache *new;
148
149 /* It would be nice if this was a BUILD_BUG_ON(), but at the
150 * moment, gcc doesn't seem to recognize is_power_of_2 as a
151 * constant expression, so so much for that. */
152 BUG_ON(!is_power_of_2(minalign));
153 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
154
155 if (PGT_CACHE(shift))
156 return; /* Already have a cache of this size */
157
158 align = max_t(unsigned long, align, minalign);
159 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
160 new = kmem_cache_create(name, table_size, align, 0, ctor);
161 PGT_CACHE(shift) = new;
162
163 pr_debug("Allocated pgtable cache for order %d\n", shift);
164}
165
141 166
142void pgtable_cache_init(void) 167void pgtable_cache_init(void)
143{ 168{
144 pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor); 169 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
145 pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor); 170 pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
171 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
172 panic("Couldn't allocate pgtable caches");
173
174 /* In all current configs, when the PUD index exists it's the
175 * same size as either the pgd or pmd index. Verify that the
176 * initialization above has also created a PUD cache. This
177 * will need re-examiniation if we add new possibilities for
178 * the pagetable layout. */
179 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
146} 180}
147 181
148#ifdef CONFIG_SPARSEMEM_VMEMMAP 182#ifdef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 59736317bf0e..b9b152558f9c 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -32,6 +32,7 @@
32#include <linux/pagemap.h> 32#include <linux/pagemap.h>
33#include <linux/suspend.h> 33#include <linux/suspend.h>
34#include <linux/lmb.h> 34#include <linux/lmb.h>
35#include <linux/hugetlb.h>
35 36
36#include <asm/pgalloc.h> 37#include <asm/pgalloc.h>
37#include <asm/prom.h> 38#include <asm/prom.h>
@@ -417,18 +418,26 @@ EXPORT_SYMBOL(flush_dcache_page);
417 418
418void flush_dcache_icache_page(struct page *page) 419void flush_dcache_icache_page(struct page *page)
419{ 420{
421#ifdef CONFIG_HUGETLB_PAGE
422 if (PageCompound(page)) {
423 flush_dcache_icache_hugepage(page);
424 return;
425 }
426#endif
420#ifdef CONFIG_BOOKE 427#ifdef CONFIG_BOOKE
421 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); 428 {
422 __flush_dcache_icache(start); 429 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
423 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 430 __flush_dcache_icache(start);
431 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
432 }
424#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 433#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
425 /* On 8xx there is no need to kmap since highmem is not supported */ 434 /* On 8xx there is no need to kmap since highmem is not supported */
426 __flush_dcache_icache(page_address(page)); 435 __flush_dcache_icache(page_address(page));
427#else 436#else
428 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 437 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
429#endif 438#endif
430
431} 439}
440
432void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 441void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
433{ 442{
434 clear_page(page); 443 clear_page(page);
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index dbeb86ac90cd..b910d37aea1a 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -18,6 +18,7 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/idr.h> 20#include <linux/idr.h>
21#include <linux/module.h>
21 22
22#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
23 24
@@ -32,7 +33,7 @@ static DEFINE_IDR(mmu_context_idr);
32#define NO_CONTEXT 0 33#define NO_CONTEXT 0
33#define MAX_CONTEXT ((1UL << 19) - 1) 34#define MAX_CONTEXT ((1UL << 19) - 1)
34 35
35int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 36int __init_new_context(void)
36{ 37{
37 int index; 38 int index;
38 int err; 39 int err;
@@ -57,22 +58,41 @@ again:
57 return -ENOMEM; 58 return -ENOMEM;
58 } 59 }
59 60
61 return index;
62}
63EXPORT_SYMBOL_GPL(__init_new_context);
64
65int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
66{
67 int index;
68
69 index = __init_new_context();
70 if (index < 0)
71 return index;
72
60 /* The old code would re-promote on fork, we don't do that 73 /* The old code would re-promote on fork, we don't do that
61 * when using slices as it could cause problem promoting slices 74 * when using slices as it could cause problem promoting slices
62 * that have been forced down to 4K 75 * that have been forced down to 4K
63 */ 76 */
64 if (slice_mm_new_context(mm)) 77 if (slice_mm_new_context(mm))
65 slice_set_user_psize(mm, mmu_virtual_psize); 78 slice_set_user_psize(mm, mmu_virtual_psize);
79 subpage_prot_init_new_context(mm);
66 mm->context.id = index; 80 mm->context.id = index;
67 81
68 return 0; 82 return 0;
69} 83}
70 84
71void destroy_context(struct mm_struct *mm) 85void __destroy_context(int context_id)
72{ 86{
73 spin_lock(&mmu_context_lock); 87 spin_lock(&mmu_context_lock);
74 idr_remove(&mmu_context_idr, mm->context.id); 88 idr_remove(&mmu_context_idr, context_id);
75 spin_unlock(&mmu_context_lock); 89 spin_unlock(&mmu_context_lock);
90}
91EXPORT_SYMBOL_GPL(__destroy_context);
76 92
93void destroy_context(struct mm_struct *mm)
94{
95 __destroy_context(mm->context.id);
96 subpage_prot_free(mm);
77 mm->context.id = NO_CONTEXT; 97 mm->context.id = NO_CONTEXT;
78} 98}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d2e5321d5ea6..e27a990af42d 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -98,21 +98,10 @@ extern void _tlbia(void);
98 98
99#ifdef CONFIG_PPC32 99#ifdef CONFIG_PPC32
100 100
101struct tlbcam {
102 u32 MAS0;
103 u32 MAS1;
104 u32 MAS2;
105 u32 MAS3;
106 u32 MAS7;
107};
108
109extern void mapin_ram(void); 101extern void mapin_ram(void);
110extern int map_page(unsigned long va, phys_addr_t pa, int flags); 102extern int map_page(unsigned long va, phys_addr_t pa, int flags);
111extern void setbat(int index, unsigned long virt, phys_addr_t phys, 103extern void setbat(int index, unsigned long virt, phys_addr_t phys,
112 unsigned int size, int flags); 104 unsigned int size, int flags);
113extern void settlbcam(int index, unsigned long virt, phys_addr_t phys,
114 unsigned int size, int flags, unsigned int pid);
115extern void invalidate_tlbcam_entry(int index);
116 105
117extern int __map_without_bats; 106extern int __map_without_bats;
118extern unsigned long ioremap_base; 107extern unsigned long ioremap_base;
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 53040931de32..99df697c601a 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -49,12 +49,12 @@ struct pte_freelist_batch
49{ 49{
50 struct rcu_head rcu; 50 struct rcu_head rcu;
51 unsigned int index; 51 unsigned int index;
52 pgtable_free_t tables[0]; 52 unsigned long tables[0];
53}; 53};
54 54
55#define PTE_FREELIST_SIZE \ 55#define PTE_FREELIST_SIZE \
56 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ 56 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
57 / sizeof(pgtable_free_t)) 57 / sizeof(unsigned long))
58 58
59static void pte_free_smp_sync(void *arg) 59static void pte_free_smp_sync(void *arg)
60{ 60{
@@ -64,13 +64,13 @@ static void pte_free_smp_sync(void *arg)
64/* This is only called when we are critically out of memory 64/* This is only called when we are critically out of memory
65 * (and fail to get a page in pte_free_tlb). 65 * (and fail to get a page in pte_free_tlb).
66 */ 66 */
67static void pgtable_free_now(pgtable_free_t pgf) 67static void pgtable_free_now(void *table, unsigned shift)
68{ 68{
69 pte_freelist_forced_free++; 69 pte_freelist_forced_free++;
70 70
71 smp_call_function(pte_free_smp_sync, NULL, 1); 71 smp_call_function(pte_free_smp_sync, NULL, 1);
72 72
73 pgtable_free(pgf); 73 pgtable_free(table, shift);
74} 74}
75 75
76static void pte_free_rcu_callback(struct rcu_head *head) 76static void pte_free_rcu_callback(struct rcu_head *head)
@@ -79,8 +79,12 @@ static void pte_free_rcu_callback(struct rcu_head *head)
79 container_of(head, struct pte_freelist_batch, rcu); 79 container_of(head, struct pte_freelist_batch, rcu);
80 unsigned int i; 80 unsigned int i;
81 81
82 for (i = 0; i < batch->index; i++) 82 for (i = 0; i < batch->index; i++) {
83 pgtable_free(batch->tables[i]); 83 void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE);
84 unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE;
85
86 pgtable_free(table, shift);
87 }
84 88
85 free_page((unsigned long)batch); 89 free_page((unsigned long)batch);
86} 90}
@@ -91,25 +95,28 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
91 call_rcu(&batch->rcu, pte_free_rcu_callback); 95 call_rcu(&batch->rcu, pte_free_rcu_callback);
92} 96}
93 97
94void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) 98void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
95{ 99{
96 /* This is safe since tlb_gather_mmu has disabled preemption */ 100 /* This is safe since tlb_gather_mmu has disabled preemption */
97 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); 101 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
102 unsigned long pgf;
98 103
99 if (atomic_read(&tlb->mm->mm_users) < 2 || 104 if (atomic_read(&tlb->mm->mm_users) < 2 ||
100 cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ 105 cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
101 pgtable_free(pgf); 106 pgtable_free(table, shift);
102 return; 107 return;
103 } 108 }
104 109
105 if (*batchp == NULL) { 110 if (*batchp == NULL) {
106 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); 111 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
107 if (*batchp == NULL) { 112 if (*batchp == NULL) {
108 pgtable_free_now(pgf); 113 pgtable_free_now(table, shift);
109 return; 114 return;
110 } 115 }
111 (*batchp)->index = 0; 116 (*batchp)->index = 0;
112 } 117 }
118 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
119 pgf = (unsigned long)table | shift;
113 (*batchp)->tables[(*batchp)->index++] = pgf; 120 (*batchp)->tables[(*batchp)->index++] = pgf;
114 if ((*batchp)->index == PTE_FREELIST_SIZE) { 121 if ((*batchp)->index == PTE_FREELIST_SIZE) {
115 pte_free_submit(*batchp); 122 pte_free_submit(*batchp);
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 4cafc0c33d0a..a040b81e93bd 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -24,9 +24,9 @@
24 * Also makes sure that the subpage_prot_table structure is 24 * Also makes sure that the subpage_prot_table structure is
25 * reinitialized for the next user. 25 * reinitialized for the next user.
26 */ 26 */
27void subpage_prot_free(pgd_t *pgd) 27void subpage_prot_free(struct mm_struct *mm)
28{ 28{
29 struct subpage_prot_table *spt = pgd_subpage_prot(pgd); 29 struct subpage_prot_table *spt = &mm->context.spt;
30 unsigned long i, j, addr; 30 unsigned long i, j, addr;
31 u32 **p; 31 u32 **p;
32 32
@@ -51,6 +51,13 @@ void subpage_prot_free(pgd_t *pgd)
51 spt->maxaddr = 0; 51 spt->maxaddr = 0;
52} 52}
53 53
54void subpage_prot_init_new_context(struct mm_struct *mm)
55{
56 struct subpage_prot_table *spt = &mm->context.spt;
57
58 memset(spt, 0, sizeof(*spt));
59}
60
54static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, 61static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
55 int npages) 62 int npages)
56{ 63{
@@ -87,7 +94,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
87static void subpage_prot_clear(unsigned long addr, unsigned long len) 94static void subpage_prot_clear(unsigned long addr, unsigned long len)
88{ 95{
89 struct mm_struct *mm = current->mm; 96 struct mm_struct *mm = current->mm;
90 struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd); 97 struct subpage_prot_table *spt = &mm->context.spt;
91 u32 **spm, *spp; 98 u32 **spm, *spp;
92 int i, nw; 99 int i, nw;
93 unsigned long next, limit; 100 unsigned long next, limit;
@@ -136,7 +143,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
136long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) 143long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
137{ 144{
138 struct mm_struct *mm = current->mm; 145 struct mm_struct *mm = current->mm;
139 struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd); 146 struct subpage_prot_table *spt = &mm->context.spt;
140 u32 **spm, *spp; 147 u32 **spm, *spp;
141 int i, nw; 148 int i, nw;
142 unsigned long next, limit; 149 unsigned long next, limit;
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 2b2f35f6985e..282d9306361f 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -53,11 +53,6 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
53 53
54 i = batch->index; 54 i = batch->index;
55 55
56 /* We mask the address for the base page size. Huge pages will
57 * have applied their own masking already
58 */
59 addr &= PAGE_MASK;
60
61 /* Get page size (maybe move back to caller). 56 /* Get page size (maybe move back to caller).
62 * 57 *
63 * NOTE: when using special 64K mappings in 4K environment like 58 * NOTE: when using special 64K mappings in 4K environment like
@@ -75,6 +70,9 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
75 } else 70 } else
76 psize = pte_pagesize_index(mm, addr, pte); 71 psize = pte_pagesize_index(mm, addr, pte);
77 72
73 /* Mask the address for the correct page size */
74 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
75
78 /* Build full vaddr */ 76 /* Build full vaddr */
79 if (!is_kernel_addr(addr)) { 77 if (!is_kernel_addr(addr)) {
80 ssize = user_segment_size(addr); 78 ssize = user_segment_size(addr);
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index a6ce80566625..da9b20a63769 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -79,7 +79,7 @@ cpld_unmask_irq(unsigned int irq)
79} 79}
80 80
81static struct irq_chip cpld_pic = { 81static struct irq_chip cpld_pic = {
82 .typename = " CPLD PIC ", 82 .name = " CPLD PIC ",
83 .mask = cpld_mask_irq, 83 .mask = cpld_mask_irq,
84 .ack = cpld_mask_irq, 84 .ack = cpld_mask_irq,
85 .unmask = cpld_unmask_irq, 85 .unmask = cpld_unmask_irq,
@@ -132,7 +132,7 @@ static int
132cpld_pic_host_map(struct irq_host *h, unsigned int virq, 132cpld_pic_host_map(struct irq_host *h, unsigned int virq,
133 irq_hw_number_t hw) 133 irq_hw_number_t hw)
134{ 134{
135 get_irq_desc(virq)->status |= IRQ_LEVEL; 135 irq_to_desc(virq)->status |= IRQ_LEVEL;
136 set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq); 136 set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq);
137 return 0; 137 return 0;
138} 138}
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 8b8e9560a315..47ea1be1481b 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -62,3 +62,8 @@ config PPC_MPC5200_GPIO
62 select GENERIC_GPIO 62 select GENERIC_GPIO
63 help 63 help
64 Enable gpiolib support for mpc5200 based boards 64 Enable gpiolib support for mpc5200 based boards
65
66config PPC_MPC5200_LPBFIFO
67 tristate "MPC5200 LocalPlus bus FIFO driver"
68 depends on PPC_MPC52xx
69 select PPC_BESTCOMM_GEN_BD
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index bfd4f52cf3dd..2bc8cd0c5cfc 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -15,3 +15,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y)
15endif 15endif
16 16
17obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o 17obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
18obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 68e4f1696d14..cc0c854291d7 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -74,7 +74,7 @@ static void media5200_irq_mask(unsigned int virq)
74} 74}
75 75
76static struct irq_chip media5200_irq_chip = { 76static struct irq_chip media5200_irq_chip = {
77 .typename = "Media5200 FPGA", 77 .name = "Media5200 FPGA",
78 .unmask = media5200_irq_unmask, 78 .unmask = media5200_irq_unmask,
79 .mask = media5200_irq_mask, 79 .mask = media5200_irq_mask,
80 .mask_ack = media5200_irq_mask, 80 .mask_ack = media5200_irq_mask,
@@ -114,7 +114,7 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
114static int media5200_irq_map(struct irq_host *h, unsigned int virq, 114static int media5200_irq_map(struct irq_host *h, unsigned int virq,
115 irq_hw_number_t hw) 115 irq_hw_number_t hw)
116{ 116{
117 struct irq_desc *desc = get_irq_desc(virq); 117 struct irq_desc *desc = irq_to_desc(virq);
118 118
119 pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); 119 pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw);
120 set_irq_chip_data(virq, &media5200_irq); 120 set_irq_chip_data(virq, &media5200_irq);
@@ -127,7 +127,7 @@ static int media5200_irq_map(struct irq_host *h, unsigned int virq,
127} 127}
128 128
129static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct, 129static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct,
130 u32 *intspec, unsigned int intsize, 130 const u32 *intspec, unsigned int intsize,
131 irq_hw_number_t *out_hwirq, 131 irq_hw_number_t *out_hwirq,
132 unsigned int *out_flags) 132 unsigned int *out_flags)
133{ 133{
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index bfbcd418e690..6f8ebe1085b3 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -16,8 +16,14 @@
16 * output signals or measure input signals. 16 * output signals or measure input signals.
17 * 17 *
18 * This driver supports the GPIO and IRQ controller functions of the GPT 18 * This driver supports the GPIO and IRQ controller functions of the GPT
19 * device. Timer functions are not yet supported, nor is the watchdog 19 * device. Timer functions are not yet supported.
20 * timer. 20 *
21 * The timer gpt0 can be used as watchdog (wdt). If the wdt mode is used,
22 * this prevents the use of any gpt0 gpt function (i.e. they will fail with
23 * -EBUSY). Thus, the safety wdt function always has precedence over the gpt
24 * function. If the kernel has been compiled with CONFIG_WATCHDOG_NOWAYOUT,
25 * this means that gpt0 is locked in wdt mode until the next reboot - this
26 * may be a requirement in safety applications.
21 * 27 *
22 * To use the GPIO function, the following two properties must be added 28 * To use the GPIO function, the following two properties must be added
23 * to the device tree node for the gpt device (typically in the .dts file 29 * to the device tree node for the gpt device (typically in the .dts file
@@ -46,17 +52,24 @@
46 * the output mode. This driver does not change the output mode setting. 52 * the output mode. This driver does not change the output mode setting.
47 */ 53 */
48 54
55#include <linux/device.h>
49#include <linux/irq.h> 56#include <linux/irq.h>
50#include <linux/interrupt.h> 57#include <linux/interrupt.h>
51#include <linux/io.h> 58#include <linux/io.h>
59#include <linux/list.h>
60#include <linux/mutex.h>
52#include <linux/of.h> 61#include <linux/of.h>
53#include <linux/of_platform.h> 62#include <linux/of_platform.h>
54#include <linux/of_gpio.h> 63#include <linux/of_gpio.h>
55#include <linux/kernel.h> 64#include <linux/kernel.h>
65#include <linux/watchdog.h>
66#include <linux/miscdevice.h>
67#include <linux/uaccess.h>
68#include <asm/div64.h>
56#include <asm/mpc52xx.h> 69#include <asm/mpc52xx.h>
57 70
58MODULE_DESCRIPTION("Freescale MPC52xx gpt driver"); 71MODULE_DESCRIPTION("Freescale MPC52xx gpt driver");
59MODULE_AUTHOR("Sascha Hauer, Grant Likely"); 72MODULE_AUTHOR("Sascha Hauer, Grant Likely, Albrecht Dreß");
60MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
61 74
62/** 75/**
@@ -66,18 +79,27 @@ MODULE_LICENSE("GPL");
66 * @lock: spinlock to coordinate between different functions. 79 * @lock: spinlock to coordinate between different functions.
67 * @of_gc: of_gpio_chip instance structure; used when GPIO is enabled 80 * @of_gc: of_gpio_chip instance structure; used when GPIO is enabled
68 * @irqhost: Pointer to irq_host instance; used when IRQ mode is supported 81 * @irqhost: Pointer to irq_host instance; used when IRQ mode is supported
82 * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates
83 * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates
84 * if the timer is actively used as wdt which blocks gpt functions
69 */ 85 */
70struct mpc52xx_gpt_priv { 86struct mpc52xx_gpt_priv {
87 struct list_head list; /* List of all GPT devices */
71 struct device *dev; 88 struct device *dev;
72 struct mpc52xx_gpt __iomem *regs; 89 struct mpc52xx_gpt __iomem *regs;
73 spinlock_t lock; 90 spinlock_t lock;
74 struct irq_host *irqhost; 91 struct irq_host *irqhost;
92 u32 ipb_freq;
93 u8 wdt_mode;
75 94
76#if defined(CONFIG_GPIOLIB) 95#if defined(CONFIG_GPIOLIB)
77 struct of_gpio_chip of_gc; 96 struct of_gpio_chip of_gc;
78#endif 97#endif
79}; 98};
80 99
100LIST_HEAD(mpc52xx_gpt_list);
101DEFINE_MUTEX(mpc52xx_gpt_list_mutex);
102
81#define MPC52xx_GPT_MODE_MS_MASK (0x07) 103#define MPC52xx_GPT_MODE_MS_MASK (0x07)
82#define MPC52xx_GPT_MODE_MS_IC (0x01) 104#define MPC52xx_GPT_MODE_MS_IC (0x01)
83#define MPC52xx_GPT_MODE_MS_OC (0x02) 105#define MPC52xx_GPT_MODE_MS_OC (0x02)
@@ -88,15 +110,25 @@ struct mpc52xx_gpt_priv {
88#define MPC52xx_GPT_MODE_GPIO_OUT_LOW (0x20) 110#define MPC52xx_GPT_MODE_GPIO_OUT_LOW (0x20)
89#define MPC52xx_GPT_MODE_GPIO_OUT_HIGH (0x30) 111#define MPC52xx_GPT_MODE_GPIO_OUT_HIGH (0x30)
90 112
113#define MPC52xx_GPT_MODE_COUNTER_ENABLE (0x1000)
114#define MPC52xx_GPT_MODE_CONTINUOUS (0x0400)
115#define MPC52xx_GPT_MODE_OPEN_DRAIN (0x0200)
91#define MPC52xx_GPT_MODE_IRQ_EN (0x0100) 116#define MPC52xx_GPT_MODE_IRQ_EN (0x0100)
117#define MPC52xx_GPT_MODE_WDT_EN (0x8000)
92 118
93#define MPC52xx_GPT_MODE_ICT_MASK (0x030000) 119#define MPC52xx_GPT_MODE_ICT_MASK (0x030000)
94#define MPC52xx_GPT_MODE_ICT_RISING (0x010000) 120#define MPC52xx_GPT_MODE_ICT_RISING (0x010000)
95#define MPC52xx_GPT_MODE_ICT_FALLING (0x020000) 121#define MPC52xx_GPT_MODE_ICT_FALLING (0x020000)
96#define MPC52xx_GPT_MODE_ICT_TOGGLE (0x030000) 122#define MPC52xx_GPT_MODE_ICT_TOGGLE (0x030000)
97 123
124#define MPC52xx_GPT_MODE_WDT_PING (0xa5)
125
98#define MPC52xx_GPT_STATUS_IRQMASK (0x000f) 126#define MPC52xx_GPT_STATUS_IRQMASK (0x000f)
99 127
128#define MPC52xx_GPT_CAN_WDT (1 << 0)
129#define MPC52xx_GPT_IS_WDT (1 << 1)
130
131
100/* --------------------------------------------------------------------- 132/* ---------------------------------------------------------------------
101 * Cascaded interrupt controller hooks 133 * Cascaded interrupt controller hooks
102 */ 134 */
@@ -149,7 +181,7 @@ static int mpc52xx_gpt_irq_set_type(unsigned int virq, unsigned int flow_type)
149} 181}
150 182
151static struct irq_chip mpc52xx_gpt_irq_chip = { 183static struct irq_chip mpc52xx_gpt_irq_chip = {
152 .typename = "MPC52xx GPT", 184 .name = "MPC52xx GPT",
153 .unmask = mpc52xx_gpt_irq_unmask, 185 .unmask = mpc52xx_gpt_irq_unmask,
154 .mask = mpc52xx_gpt_irq_mask, 186 .mask = mpc52xx_gpt_irq_mask,
155 .ack = mpc52xx_gpt_irq_ack, 187 .ack = mpc52xx_gpt_irq_ack,
@@ -182,7 +214,7 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq,
182} 214}
183 215
184static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct, 216static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
185 u32 *intspec, unsigned int intsize, 217 const u32 *intspec, unsigned int intsize,
186 irq_hw_number_t *out_hwirq, 218 irq_hw_number_t *out_hwirq,
187 unsigned int *out_flags) 219 unsigned int *out_flags)
188{ 220{
@@ -190,7 +222,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
190 222
191 dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]); 223 dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]);
192 224
193 if ((intsize < 1) || (intspec[0] < 1) || (intspec[0] > 3)) { 225 if ((intsize < 1) || (intspec[0] > 3)) {
194 dev_err(gpt->dev, "bad irq specifier in %s\n", ct->full_name); 226 dev_err(gpt->dev, "bad irq specifier in %s\n", ct->full_name);
195 return -EINVAL; 227 return -EINVAL;
196 } 228 }
@@ -211,13 +243,11 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
211{ 243{
212 int cascade_virq; 244 int cascade_virq;
213 unsigned long flags; 245 unsigned long flags;
214 246 u32 mode;
215 /* Only setup cascaded IRQ if device tree claims the GPT is
216 * an interrupt controller */
217 if (!of_find_property(node, "interrupt-controller", NULL))
218 return;
219 247
220 cascade_virq = irq_of_parse_and_map(node, 0); 248 cascade_virq = irq_of_parse_and_map(node, 0);
249 if (!cascade_virq)
250 return;
221 251
222 gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1, 252 gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1,
223 &mpc52xx_gpt_irq_ops, -1); 253 &mpc52xx_gpt_irq_ops, -1);
@@ -227,14 +257,16 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
227 } 257 }
228 258
229 gpt->irqhost->host_data = gpt; 259 gpt->irqhost->host_data = gpt;
230
231 set_irq_data(cascade_virq, gpt); 260 set_irq_data(cascade_virq, gpt);
232 set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); 261 set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
233 262
234 /* Set to Input Capture mode */ 263 /* If the GPT is currently disabled, then change it to be in Input
264 * Capture mode. If the mode is non-zero, then the pin could be
265 * already in use for something. */
235 spin_lock_irqsave(&gpt->lock, flags); 266 spin_lock_irqsave(&gpt->lock, flags);
236 clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK, 267 mode = in_be32(&gpt->regs->mode);
237 MPC52xx_GPT_MODE_MS_IC); 268 if ((mode & MPC52xx_GPT_MODE_MS_MASK) == 0)
269 out_be32(&gpt->regs->mode, mode | MPC52xx_GPT_MODE_MS_IC);
238 spin_unlock_irqrestore(&gpt->lock, flags); 270 spin_unlock_irqrestore(&gpt->lock, flags);
239 271
240 dev_dbg(gpt->dev, "%s() complete. virq=%i\n", __func__, cascade_virq); 272 dev_dbg(gpt->dev, "%s() complete. virq=%i\n", __func__, cascade_virq);
@@ -335,6 +367,354 @@ static void
335mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { } 367mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { }
336#endif /* defined(CONFIG_GPIOLIB) */ 368#endif /* defined(CONFIG_GPIOLIB) */
337 369
370/***********************************************************************
371 * Timer API
372 */
373
374/**
375 * mpc52xx_gpt_from_irq - Return the GPT device associated with an IRQ number
376 * @irq: irq of timer.
377 */
378struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq)
379{
380 struct mpc52xx_gpt_priv *gpt;
381 struct list_head *pos;
382
383 /* Iterate over the list of timers looking for a matching device */
384 mutex_lock(&mpc52xx_gpt_list_mutex);
385 list_for_each(pos, &mpc52xx_gpt_list) {
386 gpt = container_of(pos, struct mpc52xx_gpt_priv, list);
387 if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) {
388 mutex_unlock(&mpc52xx_gpt_list_mutex);
389 return gpt;
390 }
391 }
392 mutex_unlock(&mpc52xx_gpt_list_mutex);
393
394 return NULL;
395}
396EXPORT_SYMBOL(mpc52xx_gpt_from_irq);
397
398static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period,
399 int continuous, int as_wdt)
400{
401 u32 clear, set;
402 u64 clocks;
403 u32 prescale;
404 unsigned long flags;
405
406 clear = MPC52xx_GPT_MODE_MS_MASK | MPC52xx_GPT_MODE_CONTINUOUS;
407 set = MPC52xx_GPT_MODE_MS_GPIO | MPC52xx_GPT_MODE_COUNTER_ENABLE;
408 if (as_wdt) {
409 clear |= MPC52xx_GPT_MODE_IRQ_EN;
410 set |= MPC52xx_GPT_MODE_WDT_EN;
411 } else if (continuous)
412 set |= MPC52xx_GPT_MODE_CONTINUOUS;
413
414 /* Determine the number of clocks in the requested period. 64 bit
415 * arithmatic is done here to preserve the precision until the value
416 * is scaled back down into the u32 range. Period is in 'ns', bus
417 * frequency is in Hz. */
418 clocks = period * (u64)gpt->ipb_freq;
419 do_div(clocks, 1000000000); /* Scale it down to ns range */
420
421 /* This device cannot handle a clock count greater than 32 bits */
422 if (clocks > 0xffffffff)
423 return -EINVAL;
424
425 /* Calculate the prescaler and count values from the clocks value.
426 * 'clocks' is the number of clock ticks in the period. The timer
427 * has 16 bit precision and a 16 bit prescaler. Prescaler is
428 * calculated by integer dividing the clocks by 0x10000 (shifting
429 * down 16 bits) to obtain the smallest possible divisor for clocks
430 * to get a 16 bit count value.
431 *
432 * Note: the prescale register is '1' based, not '0' based. ie. a
433 * value of '1' means divide the clock by one. 0xffff divides the
434 * clock by 0xffff. '0x0000' does not divide by zero, but wraps
435 * around and divides by 0x10000. That is why prescale must be
436 * a u32 variable, not a u16, for this calculation. */
437 prescale = (clocks >> 16) + 1;
438 do_div(clocks, prescale);
439 if (clocks > 0xffff) {
440 pr_err("calculation error; prescale:%x clocks:%llx\n",
441 prescale, clocks);
442 return -EINVAL;
443 }
444
445 /* Set and enable the timer, reject an attempt to use a wdt as gpt */
446 spin_lock_irqsave(&gpt->lock, flags);
447 if (as_wdt)
448 gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
449 else if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) {
450 spin_unlock_irqrestore(&gpt->lock, flags);
451 return -EBUSY;
452 }
453 out_be32(&gpt->regs->count, prescale << 16 | clocks);
454 clrsetbits_be32(&gpt->regs->mode, clear, set);
455 spin_unlock_irqrestore(&gpt->lock, flags);
456
457 return 0;
458}
459
460/**
461 * mpc52xx_gpt_start_timer - Set and enable the GPT timer
462 * @gpt: Pointer to gpt private data structure
463 * @period: period of timer in ns; max. ~130s @ 33MHz IPB clock
464 * @continuous: set to 1 to make timer continuous free running
465 *
466 * An interrupt will be generated every time the timer fires
467 */
468int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
469 int continuous)
470{
471 return mpc52xx_gpt_do_start(gpt, period, continuous, 0);
472}
473EXPORT_SYMBOL(mpc52xx_gpt_start_timer);
474
475/**
476 * mpc52xx_gpt_stop_timer - Stop a gpt
477 * @gpt: Pointer to gpt private data structure
478 *
479 * Returns an error if attempting to stop a wdt
480 */
481int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt)
482{
483 unsigned long flags;
484
485 /* reject the operation if the timer is used as watchdog (gpt 0 only) */
486 spin_lock_irqsave(&gpt->lock, flags);
487 if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) {
488 spin_unlock_irqrestore(&gpt->lock, flags);
489 return -EBUSY;
490 }
491
492 clrbits32(&gpt->regs->mode, MPC52xx_GPT_MODE_COUNTER_ENABLE);
493 spin_unlock_irqrestore(&gpt->lock, flags);
494 return 0;
495}
496EXPORT_SYMBOL(mpc52xx_gpt_stop_timer);
497
498/**
499 * mpc52xx_gpt_timer_period - Read the timer period
500 * @gpt: Pointer to gpt private data structure
501 *
502 * Returns the timer period in ns
503 */
504u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt)
505{
506 u64 period;
507 u64 prescale;
508 unsigned long flags;
509
510 spin_lock_irqsave(&gpt->lock, flags);
511 period = in_be32(&gpt->regs->count);
512 spin_unlock_irqrestore(&gpt->lock, flags);
513
514 prescale = period >> 16;
515 period &= 0xffff;
516 if (prescale == 0)
517 prescale = 0x10000;
518 period = period * prescale * 1000000000ULL;
519 do_div(period, (u64)gpt->ipb_freq);
520 return period;
521}
522EXPORT_SYMBOL(mpc52xx_gpt_timer_period);
523
524#if defined(CONFIG_MPC5200_WDT)
525/***********************************************************************
526 * Watchdog API for gpt0
527 */
528
529#define WDT_IDENTITY "mpc52xx watchdog on GPT0"
530
531/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
532static unsigned long wdt_is_active;
533
534/* wdt-capable gpt */
535static struct mpc52xx_gpt_priv *mpc52xx_gpt_wdt;
536
537/* low-level wdt functions */
538static inline void mpc52xx_gpt_wdt_ping(struct mpc52xx_gpt_priv *gpt_wdt)
539{
540 unsigned long flags;
541
542 spin_lock_irqsave(&gpt_wdt->lock, flags);
543 out_8((u8 *) &gpt_wdt->regs->mode, MPC52xx_GPT_MODE_WDT_PING);
544 spin_unlock_irqrestore(&gpt_wdt->lock, flags);
545}
546
547/* wdt misc device api */
548static ssize_t mpc52xx_wdt_write(struct file *file, const char __user *data,
549 size_t len, loff_t *ppos)
550{
551 struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
552 mpc52xx_gpt_wdt_ping(gpt_wdt);
553 return 0;
554}
555
556static struct watchdog_info mpc5200_wdt_info = {
557 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
558 .identity = WDT_IDENTITY,
559};
560
561static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd,
562 unsigned long arg)
563{
564 struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
565 int __user *data = (int __user *)arg;
566 int timeout;
567 u64 real_timeout;
568 int ret = 0;
569
570 switch (cmd) {
571 case WDIOC_GETSUPPORT:
572 ret = copy_to_user(data, &mpc5200_wdt_info,
573 sizeof(mpc5200_wdt_info));
574 if (ret)
575 ret = -EFAULT;
576 break;
577
578 case WDIOC_GETSTATUS:
579 case WDIOC_GETBOOTSTATUS:
580 ret = put_user(0, data);
581 break;
582
583 case WDIOC_KEEPALIVE:
584 mpc52xx_gpt_wdt_ping(gpt_wdt);
585 break;
586
587 case WDIOC_SETTIMEOUT:
588 ret = get_user(timeout, data);
589 if (ret)
590 break;
591 real_timeout = (u64) timeout * 1000000000ULL;
592 ret = mpc52xx_gpt_do_start(gpt_wdt, real_timeout, 0, 1);
593 if (ret)
594 break;
595 /* fall through and return the timeout */
596
597 case WDIOC_GETTIMEOUT:
598 /* we need to round here as to avoid e.g. the following
599 * situation:
600 * - timeout requested is 1 second;
601 * - real timeout @33MHz is 999997090ns
602 * - the int divide by 10^9 will return 0.
603 */
604 real_timeout =
605 mpc52xx_gpt_timer_period(gpt_wdt) + 500000000ULL;
606 do_div(real_timeout, 1000000000ULL);
607 timeout = (int) real_timeout;
608 ret = put_user(timeout, data);
609 break;
610
611 default:
612 ret = -ENOTTY;
613 }
614 return ret;
615}
616
617static int mpc52xx_wdt_open(struct inode *inode, struct file *file)
618{
619 int ret;
620
621 /* sanity check */
622 if (!mpc52xx_gpt_wdt)
623 return -ENODEV;
624
625 /* /dev/watchdog can only be opened once */
626 if (test_and_set_bit(0, &wdt_is_active))
627 return -EBUSY;
628
629 /* Set and activate the watchdog with 30 seconds timeout */
630 ret = mpc52xx_gpt_do_start(mpc52xx_gpt_wdt, 30ULL * 1000000000ULL,
631 0, 1);
632 if (ret) {
633 clear_bit(0, &wdt_is_active);
634 return ret;
635 }
636
637 file->private_data = mpc52xx_gpt_wdt;
638 return nonseekable_open(inode, file);
639}
640
641static int mpc52xx_wdt_release(struct inode *inode, struct file *file)
642{
643 /* note: releasing the wdt in NOWAYOUT-mode does not stop it */
644#if !defined(CONFIG_WATCHDOG_NOWAYOUT)
645 struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
646 unsigned long flags;
647
648 spin_lock_irqsave(&gpt_wdt->lock, flags);
649 clrbits32(&gpt_wdt->regs->mode,
650 MPC52xx_GPT_MODE_COUNTER_ENABLE | MPC52xx_GPT_MODE_WDT_EN);
651 gpt_wdt->wdt_mode &= ~MPC52xx_GPT_IS_WDT;
652 spin_unlock_irqrestore(&gpt_wdt->lock, flags);
653#endif
654 clear_bit(0, &wdt_is_active);
655 return 0;
656}
657
658
659static const struct file_operations mpc52xx_wdt_fops = {
660 .owner = THIS_MODULE,
661 .llseek = no_llseek,
662 .write = mpc52xx_wdt_write,
663 .unlocked_ioctl = mpc52xx_wdt_ioctl,
664 .open = mpc52xx_wdt_open,
665 .release = mpc52xx_wdt_release,
666};
667
668static struct miscdevice mpc52xx_wdt_miscdev = {
669 .minor = WATCHDOG_MINOR,
670 .name = "watchdog",
671 .fops = &mpc52xx_wdt_fops,
672};
673
674static int __devinit mpc52xx_gpt_wdt_init(void)
675{
676 int err;
677
678 /* try to register the watchdog misc device */
679 err = misc_register(&mpc52xx_wdt_miscdev);
680 if (err)
681 pr_err("%s: cannot register watchdog device\n", WDT_IDENTITY);
682 else
683 pr_info("%s: watchdog device registered\n", WDT_IDENTITY);
684 return err;
685}
686
687static int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt,
688 const u32 *period)
689{
690 u64 real_timeout;
691
692 /* remember the gpt for the wdt operation */
693 mpc52xx_gpt_wdt = gpt;
694
695 /* configure the wdt if the device tree contained a timeout */
696 if (!period || *period == 0)
697 return 0;
698
699 real_timeout = (u64) *period * 1000000000ULL;
700 if (mpc52xx_gpt_do_start(gpt, real_timeout, 0, 1))
701 dev_warn(gpt->dev, "starting as wdt failed\n");
702 else
703 dev_info(gpt->dev, "watchdog set to %us timeout\n", *period);
704 return 0;
705}
706
707#else
708
709static int __devinit mpc52xx_gpt_wdt_init(void)
710{
711 return 0;
712}
713
714#define mpc52xx_gpt_wdt_setup(x, y) (0)
715
716#endif /* CONFIG_MPC5200_WDT */
717
338/* --------------------------------------------------------------------- 718/* ---------------------------------------------------------------------
339 * of_platform bus binding code 719 * of_platform bus binding code
340 */ 720 */
@@ -349,6 +729,7 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
349 729
350 spin_lock_init(&gpt->lock); 730 spin_lock_init(&gpt->lock);
351 gpt->dev = &ofdev->dev; 731 gpt->dev = &ofdev->dev;
732 gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->node);
352 gpt->regs = of_iomap(ofdev->node, 0); 733 gpt->regs = of_iomap(ofdev->node, 0);
353 if (!gpt->regs) { 734 if (!gpt->regs) {
354 kfree(gpt); 735 kfree(gpt);
@@ -360,6 +741,26 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
360 mpc52xx_gpt_gpio_setup(gpt, ofdev->node); 741 mpc52xx_gpt_gpio_setup(gpt, ofdev->node);
361 mpc52xx_gpt_irq_setup(gpt, ofdev->node); 742 mpc52xx_gpt_irq_setup(gpt, ofdev->node);
362 743
744 mutex_lock(&mpc52xx_gpt_list_mutex);
745 list_add(&gpt->list, &mpc52xx_gpt_list);
746 mutex_unlock(&mpc52xx_gpt_list_mutex);
747
748 /* check if this device could be a watchdog */
749 if (of_get_property(ofdev->node, "fsl,has-wdt", NULL) ||
750 of_get_property(ofdev->node, "has-wdt", NULL)) {
751 const u32 *on_boot_wdt;
752
753 gpt->wdt_mode = MPC52xx_GPT_CAN_WDT;
754 on_boot_wdt = of_get_property(ofdev->node, "fsl,wdt-on-boot",
755 NULL);
756 if (on_boot_wdt) {
757 dev_info(gpt->dev, "used as watchdog\n");
758 gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
759 } else
760 dev_info(gpt->dev, "can function as watchdog\n");
761 mpc52xx_gpt_wdt_setup(gpt, on_boot_wdt);
762 }
763
363 return 0; 764 return 0;
364} 765}
365 766
@@ -394,3 +795,4 @@ static int __init mpc52xx_gpt_init(void)
394 795
395/* Make sure GPIOs and IRQs get set up before anyone tries to use them */ 796/* Make sure GPIOs and IRQs get set up before anyone tries to use them */
396subsys_initcall(mpc52xx_gpt_init); 797subsys_initcall(mpc52xx_gpt_init);
798device_initcall(mpc52xx_gpt_wdt_init);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
new file mode 100644
index 000000000000..929d017535a3
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -0,0 +1,560 @@
1/*
2 * LocalPlus Bus FIFO driver for the Freescale MPC52xx.
3 *
4 * Copyright (C) 2009 Secret Lab Technologies Ltd.
5 *
6 * This file is released under the GPLv2
7 *
8 * Todo:
9 * - Add support for multiple requests to be queued.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/kernel.h>
14#include <linux/of.h>
15#include <linux/of_platform.h>
16#include <linux/spinlock.h>
17#include <asm/io.h>
18#include <asm/prom.h>
19#include <asm/mpc52xx.h>
20#include <asm/time.h>
21
22#include <sysdev/bestcomm/bestcomm.h>
23#include <sysdev/bestcomm/bestcomm_priv.h>
24#include <sysdev/bestcomm/gen_bd.h>
25
26MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
27MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver");
28MODULE_LICENSE("GPL");
29
30#define LPBFIFO_REG_PACKET_SIZE (0x00)
31#define LPBFIFO_REG_START_ADDRESS (0x04)
32#define LPBFIFO_REG_CONTROL (0x08)
33#define LPBFIFO_REG_ENABLE (0x0C)
34#define LPBFIFO_REG_BYTES_DONE_STATUS (0x14)
35#define LPBFIFO_REG_FIFO_DATA (0x40)
36#define LPBFIFO_REG_FIFO_STATUS (0x44)
37#define LPBFIFO_REG_FIFO_CONTROL (0x48)
38#define LPBFIFO_REG_FIFO_ALARM (0x4C)
39
40struct mpc52xx_lpbfifo {
41 struct device *dev;
42 phys_addr_t regs_phys;
43 void __iomem *regs;
44 int irq;
45 spinlock_t lock;
46
47 struct bcom_task *bcom_tx_task;
48 struct bcom_task *bcom_rx_task;
49 struct bcom_task *bcom_cur_task;
50
51 /* Current state data */
52 struct mpc52xx_lpbfifo_request *req;
53 int dma_irqs_enabled;
54};
55
56/* The MPC5200 has only one fifo, so only need one instance structure */
57static struct mpc52xx_lpbfifo lpbfifo;
58
59/**
60 * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered
61 */
62static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
63{
64 size_t transfer_size = req->size - req->pos;
65 struct bcom_bd *bd;
66 void __iomem *reg;
67 u32 *data;
68 int i;
69 int bit_fields;
70 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
71 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
72 int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
73
74 /* Set and clear the reset bits; is good practice in User Manual */
75 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
76
77 /* set master enable bit */
78 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001);
79 if (!dma) {
80 /* While the FIFO can be setup for transfer sizes as large as
81 * 16M-1, the FIFO itself is only 512 bytes deep and it does
82 * not generate interrupts for FIFO full events (only transfer
83 * complete will raise an IRQ). Therefore when not using
84 * Bestcomm to drive the FIFO it needs to either be polled, or
85 * transfers need to constrained to the size of the fifo.
86 *
87 * This driver restricts the size of the transfer
88 */
89 if (transfer_size > 512)
90 transfer_size = 512;
91
92 /* Load the FIFO with data */
93 if (write) {
94 reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
95 data = req->data + req->pos;
96 for (i = 0; i < transfer_size; i += 4)
97 out_be32(reg, *data++);
98 }
99
100 /* Unmask both error and completion irqs */
101 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301);
102 } else {
103 /* Choose the correct direction
104 *
105 * Configure the watermarks so DMA will always complete correctly.
106 * It may be worth experimenting with the ALARM value to see if
107 * there is a performance impacit. However, if it is wrong there
108 * is a risk of DMA not transferring the last chunk of data
109 */
110 if (write) {
111 out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4);
112 out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7);
113 lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task;
114 } else {
115 out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff);
116 out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0);
117 lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task;
118
119 if (poll_dma) {
120 if (lpbfifo.dma_irqs_enabled) {
121 disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
122 lpbfifo.dma_irqs_enabled = 0;
123 }
124 } else {
125 if (!lpbfifo.dma_irqs_enabled) {
126 enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
127 lpbfifo.dma_irqs_enabled = 1;
128 }
129 }
130 }
131
132 bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task);
133 bd->status = transfer_size;
134 if (!write) {
135 /*
136 * In the DMA read case, the DMA doesn't complete,
137 * possibly due to incorrect watermarks in the ALARM
138 * and CONTROL regs. For now instead of trying to
139 * determine the right watermarks that will make this
140 * work, just increase the number of bytes the FIFO is
141 * expecting.
142 *
143 * When submitting another operation, the FIFO will get
144 * reset, so the condition of the FIFO waiting for a
145 * non-existent 4 bytes will get cleared.
146 */
147 transfer_size += 4; /* BLECH! */
148 }
149 bd->data[0] = req->data_phys + req->pos;
150 bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL);
151
152 /* error irq & master enabled bit */
153 bit_fields = 0x00000201;
154
155 /* Unmask irqs */
156 if (write && (!poll_dma))
157 bit_fields |= 0x00000100; /* completion irq too */
158 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields);
159 }
160
161 /* Set transfer size, width, chip select and READ mode */
162 out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS,
163 req->offset + req->pos);
164 out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size);
165
166 bit_fields = req->cs << 24 | 0x000008;
167 if (!write)
168 bit_fields |= 0x010000; /* read mode */
169 out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
170
171 /* Kick it off */
172 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
173 if (dma)
174 bcom_enable(lpbfifo.bcom_cur_task);
175}
176
177/**
178 * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO
179 *
180 * On transmit, the dma completion irq triggers before the fifo completion
181 * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm
182 * task completion irq becuase everyting is not really done until the LPB FIFO
183 * completion irq triggers.
184 *
185 * In other words:
186 * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on
187 * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this
188 * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings.
189 *
190 * Reasons for entering this routine:
191 * 1) PIO mode rx and tx completion irq
192 * 2) DMA interrupt mode tx completion irq
193 * 3) DMA polled mode tx
194 *
195 * Exit conditions:
196 * 1) Transfer aborted
197 * 2) FIFO complete without DMA; more data to do
198 * 3) FIFO complete without DMA; all data transfered
199 * 4) FIFO complete using DMA
200 *
201 * Condition 1 can occur regardless of whether or not DMA is used.
202 * It requires executing the callback to report the error and exiting
203 * immediately.
204 *
205 * Condition 2 requires programming the FIFO with the next block of data
206 *
207 * Condition 3 requires executing the callback to report completion
208 *
209 * Condition 4 means the same as 3, except that we also retrieve the bcom
210 * buffer so DMA doesn't get clogged up.
211 *
212 * To make things trickier, the spinlock must be dropped before
213 * executing the callback, otherwise we could end up with a deadlock
214 * or nested spinlock condition. The out path is non-trivial, so
215 * extra fiddling is done to make sure all paths lead to the same
216 * outbound code.
217 */
218static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
219{
220 struct mpc52xx_lpbfifo_request *req;
221 u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
222 void __iomem *reg;
223 u32 *data;
224 int count, i;
225 int do_callback = 0;
226 u32 ts;
227 unsigned long flags;
228 int dma, write, poll_dma;
229
230 spin_lock_irqsave(&lpbfifo.lock, flags);
231 ts = get_tbl();
232
233 req = lpbfifo.req;
234 if (!req) {
235 spin_unlock_irqrestore(&lpbfifo.lock, flags);
236 pr_err("bogus LPBFIFO IRQ\n");
237 return IRQ_HANDLED;
238 }
239
240 dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
241 write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
242 poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
243
244 if (dma && !write) {
245 spin_unlock_irqrestore(&lpbfifo.lock, flags);
246 pr_err("bogus LPBFIFO IRQ (dma and not writting)\n");
247 return IRQ_HANDLED;
248 }
249
250 if ((status & 0x01) == 0) {
251 goto out;
252 }
253
254 /* check abort bit */
255 if (status & 0x10) {
256 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
257 do_callback = 1;
258 goto out;
259 }
260
261 /* Read result from hardware */
262 count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
263 count &= 0x00ffffff;
264
265 if (!dma && !write) {
266 /* copy the data out of the FIFO */
267 reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
268 data = req->data + req->pos;
269 for (i = 0; i < count; i += 4)
270 *data++ = in_be32(reg);
271 }
272
273 /* Update transfer position and count */
274 req->pos += count;
275
276 /* Decide what to do next */
277 if (req->size - req->pos)
278 mpc52xx_lpbfifo_kick(req); /* more work to do */
279 else
280 do_callback = 1;
281
282 out:
283 /* Clear the IRQ */
284 out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01);
285
286 if (dma && (status & 0x11)) {
287 /*
288 * Count the DMA as complete only when the FIFO completion
289 * status or abort bits are set.
290 *
291 * (status & 0x01) should always be the case except sometimes
292 * when using polled DMA.
293 *
294 * (status & 0x10) {transfer aborted}: This case needs more
295 * testing.
296 */
297 bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
298 }
299 req->last_byte = ((u8 *)req->data)[req->size - 1];
300
301 /* When the do_callback flag is set; it means the transfer is finished
302 * so set the FIFO as idle */
303 if (do_callback)
304 lpbfifo.req = NULL;
305
306 if (irq != 0) /* don't increment on polled case */
307 req->irq_count++;
308
309 req->irq_ticks += get_tbl() - ts;
310 spin_unlock_irqrestore(&lpbfifo.lock, flags);
311
312 /* Spinlock is released; it is now safe to call the callback */
313 if (do_callback && req->callback)
314 req->callback(req);
315
316 return IRQ_HANDLED;
317}
318
319/**
320 * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task
321 *
322 * Only used when receiving data.
323 */
324static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
325{
326 struct mpc52xx_lpbfifo_request *req;
327 unsigned long flags;
328 u32 status;
329 u32 ts;
330
331 spin_lock_irqsave(&lpbfifo.lock, flags);
332 ts = get_tbl();
333
334 req = lpbfifo.req;
335 if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
336 spin_unlock_irqrestore(&lpbfifo.lock, flags);
337 return IRQ_HANDLED;
338 }
339
340 if (irq != 0) /* don't increment on polled case */
341 req->irq_count++;
342
343 if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) {
344 spin_unlock_irqrestore(&lpbfifo.lock, flags);
345
346 req->buffer_not_done_cnt++;
347 if ((req->buffer_not_done_cnt % 1000) == 0)
348 pr_err("transfer stalled\n");
349
350 return IRQ_HANDLED;
351 }
352
353 bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
354
355 req->last_byte = ((u8 *)req->data)[req->size - 1];
356
357 req->pos = status & 0x00ffffff;
358
359 /* Mark the FIFO as idle */
360 lpbfifo.req = NULL;
361
362 /* Release the lock before calling out to the callback. */
363 req->irq_ticks += get_tbl() - ts;
364 spin_unlock_irqrestore(&lpbfifo.lock, flags);
365
366 if (req->callback)
367 req->callback(req);
368
369 return IRQ_HANDLED;
370}
371
372/**
373 * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion
374 */
375void mpc52xx_lpbfifo_poll(void)
376{
377 struct mpc52xx_lpbfifo_request *req = lpbfifo.req;
378 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
379 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
380
381 /*
382 * For more information, see comments on the "Fat Lady"
383 */
384 if (dma && write)
385 mpc52xx_lpbfifo_irq(0, NULL);
386 else
387 mpc52xx_lpbfifo_bcom_irq(0, NULL);
388}
389EXPORT_SYMBOL(mpc52xx_lpbfifo_poll);
390
391/**
392 * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request.
393 * @req: Pointer to request structure
394 */
395int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
396{
397 unsigned long flags;
398
399 if (!lpbfifo.regs)
400 return -ENODEV;
401
402 spin_lock_irqsave(&lpbfifo.lock, flags);
403
404 /* If the req pointer is already set, then a transfer is in progress */
405 if (lpbfifo.req) {
406 spin_unlock_irqrestore(&lpbfifo.lock, flags);
407 return -EBUSY;
408 }
409
410 /* Setup the transfer */
411 lpbfifo.req = req;
412 req->irq_count = 0;
413 req->irq_ticks = 0;
414 req->buffer_not_done_cnt = 0;
415 req->pos = 0;
416
417 mpc52xx_lpbfifo_kick(req);
418 spin_unlock_irqrestore(&lpbfifo.lock, flags);
419 return 0;
420}
421EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
422
423void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
424{
425 unsigned long flags;
426
427 spin_lock_irqsave(&lpbfifo.lock, flags);
428 if (lpbfifo.req == req) {
429 /* Put it into reset and clear the state */
430 bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task);
431 bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task);
432 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
433 lpbfifo.req = NULL;
434 }
435 spin_unlock_irqrestore(&lpbfifo.lock, flags);
436}
437EXPORT_SYMBOL(mpc52xx_lpbfifo_abort);
438
439static int __devinit
440mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match)
441{
442 struct resource res;
443 int rc = -ENOMEM;
444
445 if (lpbfifo.dev != NULL)
446 return -ENOSPC;
447
448 lpbfifo.irq = irq_of_parse_and_map(op->node, 0);
449 if (!lpbfifo.irq)
450 return -ENODEV;
451
452 if (of_address_to_resource(op->node, 0, &res))
453 return -ENODEV;
454 lpbfifo.regs_phys = res.start;
455 lpbfifo.regs = of_iomap(op->node, 0);
456 if (!lpbfifo.regs)
457 return -ENOMEM;
458
459 spin_lock_init(&lpbfifo.lock);
460
461 /* Put FIFO into reset */
462 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
463
464 /* Register the interrupt handler */
465 rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0,
466 "mpc52xx-lpbfifo", &lpbfifo);
467 if (rc)
468 goto err_irq;
469
470 /* Request the Bestcomm receive (fifo --> memory) task and IRQ */
471 lpbfifo.bcom_rx_task =
472 bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
473 BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC,
474 16*1024*1024);
475 if (!lpbfifo.bcom_rx_task)
476 goto err_bcom_rx;
477
478 rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task),
479 mpc52xx_lpbfifo_bcom_irq, 0,
480 "mpc52xx-lpbfifo-rx", &lpbfifo);
481 if (rc)
482 goto err_bcom_rx_irq;
483
484 /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
485 lpbfifo.bcom_tx_task =
486 bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
487 BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC);
488 if (!lpbfifo.bcom_tx_task)
489 goto err_bcom_tx;
490
491 lpbfifo.dev = &op->dev;
492 return 0;
493
494 err_bcom_tx:
495 free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
496 err_bcom_rx_irq:
497 bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
498 err_bcom_rx:
499 err_irq:
500 iounmap(lpbfifo.regs);
501 lpbfifo.regs = NULL;
502
503 dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n");
504 return -ENODEV;
505}
506
507
508static int __devexit mpc52xx_lpbfifo_remove(struct of_device *op)
509{
510 if (lpbfifo.dev != &op->dev)
511 return 0;
512
513 /* Put FIFO in reset */
514 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
515
516 /* Release the bestcomm transmit task */
517 free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo);
518 bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task);
519
520 /* Release the bestcomm receive task */
521 free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
522 bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
523
524 free_irq(lpbfifo.irq, &lpbfifo);
525 iounmap(lpbfifo.regs);
526 lpbfifo.regs = NULL;
527 lpbfifo.dev = NULL;
528
529 return 0;
530}
531
532static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = {
533 { .compatible = "fsl,mpc5200-lpbfifo", },
534 {},
535};
536
537static struct of_platform_driver mpc52xx_lpbfifo_driver = {
538 .owner = THIS_MODULE,
539 .name = "mpc52xx-lpbfifo",
540 .match_table = mpc52xx_lpbfifo_match,
541 .probe = mpc52xx_lpbfifo_probe,
542 .remove = __devexit_p(mpc52xx_lpbfifo_remove),
543};
544
545/***********************************************************************
546 * Module init/exit
547 */
548static int __init mpc52xx_lpbfifo_init(void)
549{
550 pr_debug("Registering LocalPlus bus FIFO driver\n");
551 return of_register_platform_driver(&mpc52xx_lpbfifo_driver);
552}
553module_init(mpc52xx_lpbfifo_init);
554
555static void __exit mpc52xx_lpbfifo_exit(void)
556{
557 pr_debug("Unregistering LocalPlus bus FIFO driver\n");
558 of_unregister_platform_driver(&mpc52xx_lpbfifo_driver);
559}
560module_exit(mpc52xx_lpbfifo_exit);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 480f806fd0a9..4bf4bf7b063e 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -220,7 +220,7 @@ static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type)
220} 220}
221 221
222static struct irq_chip mpc52xx_extirq_irqchip = { 222static struct irq_chip mpc52xx_extirq_irqchip = {
223 .typename = "MPC52xx External", 223 .name = "MPC52xx External",
224 .mask = mpc52xx_extirq_mask, 224 .mask = mpc52xx_extirq_mask,
225 .unmask = mpc52xx_extirq_unmask, 225 .unmask = mpc52xx_extirq_unmask,
226 .ack = mpc52xx_extirq_ack, 226 .ack = mpc52xx_extirq_ack,
@@ -258,7 +258,7 @@ static void mpc52xx_main_unmask(unsigned int virq)
258} 258}
259 259
260static struct irq_chip mpc52xx_main_irqchip = { 260static struct irq_chip mpc52xx_main_irqchip = {
261 .typename = "MPC52xx Main", 261 .name = "MPC52xx Main",
262 .mask = mpc52xx_main_mask, 262 .mask = mpc52xx_main_mask,
263 .mask_ack = mpc52xx_main_mask, 263 .mask_ack = mpc52xx_main_mask,
264 .unmask = mpc52xx_main_unmask, 264 .unmask = mpc52xx_main_unmask,
@@ -291,7 +291,7 @@ static void mpc52xx_periph_unmask(unsigned int virq)
291} 291}
292 292
293static struct irq_chip mpc52xx_periph_irqchip = { 293static struct irq_chip mpc52xx_periph_irqchip = {
294 .typename = "MPC52xx Peripherals", 294 .name = "MPC52xx Peripherals",
295 .mask = mpc52xx_periph_mask, 295 .mask = mpc52xx_periph_mask,
296 .mask_ack = mpc52xx_periph_mask, 296 .mask_ack = mpc52xx_periph_mask,
297 .unmask = mpc52xx_periph_unmask, 297 .unmask = mpc52xx_periph_unmask,
@@ -335,7 +335,7 @@ static void mpc52xx_sdma_ack(unsigned int virq)
335} 335}
336 336
337static struct irq_chip mpc52xx_sdma_irqchip = { 337static struct irq_chip mpc52xx_sdma_irqchip = {
338 .typename = "MPC52xx SDMA", 338 .name = "MPC52xx SDMA",
339 .mask = mpc52xx_sdma_mask, 339 .mask = mpc52xx_sdma_mask,
340 .unmask = mpc52xx_sdma_unmask, 340 .unmask = mpc52xx_sdma_unmask,
341 .ack = mpc52xx_sdma_ack, 341 .ack = mpc52xx_sdma_ack,
@@ -355,7 +355,7 @@ static int mpc52xx_is_extirq(int l1, int l2)
355 * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property 355 * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
356 */ 356 */
357static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, 357static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
358 u32 *intspec, unsigned int intsize, 358 const u32 *intspec, unsigned int intsize,
359 irq_hw_number_t *out_hwirq, 359 irq_hw_number_t *out_hwirq,
360 unsigned int *out_flags) 360 unsigned int *out_flags)
361{ 361{
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 7ee979f323d1..9d962d7c72c1 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -69,7 +69,6 @@ static void pq2ads_pci_unmask_irq(unsigned int virq)
69} 69}
70 70
71static struct irq_chip pq2ads_pci_ic = { 71static struct irq_chip pq2ads_pci_ic = {
72 .typename = "PQ2 ADS PCI",
73 .name = "PQ2 ADS PCI", 72 .name = "PQ2 ADS PCI",
74 .end = pq2ads_pci_unmask_irq, 73 .end = pq2ads_pci_unmask_irq,
75 .mask = pq2ads_pci_mask_irq, 74 .mask = pq2ads_pci_mask_irq,
@@ -107,7 +106,7 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
107static int pci_pic_host_map(struct irq_host *h, unsigned int virq, 106static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
108 irq_hw_number_t hw) 107 irq_hw_number_t hw)
109{ 108{
110 get_irq_desc(virq)->status |= IRQ_LEVEL; 109 irq_to_desc(virq)->status |= IRQ_LEVEL;
111 set_irq_chip_data(virq, h->host_data); 110 set_irq_chip_data(virq, h->host_data);
112 set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); 111 set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq);
113 return 0; 112 return 0;
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 567ded7c3b9b..17f99745f0e4 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -74,7 +74,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
74 74
75 prop = of_get_property(np, "mode", NULL); 75 prop = of_get_property(np, "mode", NULL);
76 if (prop && !strcmp(prop, "cpu-qe")) 76 if (prop && !strcmp(prop, "cpu-qe"))
77 pdata.qe_mode = 1; 77 pdata.flags = SPI_QE_CPU_MODE;
78 78
79 for (j = 0; j < num_board_infos; j++) { 79 for (j = 0; j < num_board_infos; j++) {
80 if (board_infos[j].bus_num == pdata.bus_num) 80 if (board_infos[j].bus_num == pdata.bus_num)
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 08e65fc8b98c..d306f07b9aa1 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -96,6 +96,7 @@ int fsl_deep_sleep(void)
96{ 96{
97 return deep_sleeping; 97 return deep_sleeping;
98} 98}
99EXPORT_SYMBOL(fsl_deep_sleep);
99 100
100static int mpc83xx_change_state(void) 101static int mpc83xx_change_state(void)
101{ 102{
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index d3a975e8fd3e..d95121894eb7 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -1,6 +1,7 @@
1menuconfig MPC85xx 1menuconfig FSL_SOC_BOOKE
2 bool "Machine Type" 2 bool "Freescale Book-E Machine Type"
3 depends on PPC_85xx 3 depends on PPC_85xx || PPC_BOOK3E
4 select FSL_SOC
4 select PPC_UDBG_16550 5 select PPC_UDBG_16550
5 select MPIC 6 select MPIC
6 select PPC_PCI_CHOICE 7 select PPC_PCI_CHOICE
@@ -8,7 +9,7 @@ menuconfig MPC85xx
8 select SERIAL_8250_SHARE_IRQ if SERIAL_8250 9 select SERIAL_8250_SHARE_IRQ if SERIAL_8250
9 default y 10 default y
10 11
11if MPC85xx 12if FSL_SOC_BOOKE
12 13
13config MPC8540_ADS 14config MPC8540_ADS
14 bool "Freescale MPC8540 ADS" 15 bool "Freescale MPC8540 ADS"
@@ -144,7 +145,19 @@ config SBC8560
144 help 145 help
145 This option enables support for the Wind River SBC8560 board 146 This option enables support for the Wind River SBC8560 board
146 147
147endif # MPC85xx 148config P4080_DS
149 bool "Freescale P4080 DS"
150 select DEFAULT_UIMAGE
151 select PPC_FSL_BOOK3E
152 select PPC_E500MC
153 select PHYS_64BIT
154 select SWIOTLB
155 select MPC8xxx_GPIO
156 select HAS_RAPIDIO
157 help
158 This option enables support for the P4080 DS board
159
160endif # FSL_SOC_BOOKE
148 161
149config TQM85xx 162config TQM85xx
150 bool 163 bool
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 9098aea0cf32..387c128f2c8c 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_MPC8536_DS) += mpc8536_ds.o
10obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o 10obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o
11obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o 11obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o
12obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o 12obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o
13obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
13obj-$(CONFIG_STX_GP3) += stx_gp3.o 14obj-$(CONFIG_STX_GP3) += stx_gp3.o
14obj-$(CONFIG_TQM85xx) += tqm85xx.o 15obj-$(CONFIG_TQM85xx) += tqm85xx.o
15obj-$(CONFIG_SBC8560) += sbc8560.o 16obj-$(CONFIG_SBC8560) += sbc8560.o
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
new file mode 100644
index 000000000000..534c2ecc89d9
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -0,0 +1,125 @@
1/*
2 * Corenet based SoC DS Setup
3 *
4 * Maintained by Kumar Gala (see MAINTAINERS for contact information)
5 *
6 * Copyright 2009 Freescale Semiconductor Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/kdev_t.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/lmb.h>
20
21#include <asm/system.h>
22#include <asm/time.h>
23#include <asm/machdep.h>
24#include <asm/pci-bridge.h>
25#include <mm/mmu_decl.h>
26#include <asm/prom.h>
27#include <asm/udbg.h>
28#include <asm/mpic.h>
29
30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h>
32#include <sysdev/fsl_pci.h>
33
34void __init corenet_ds_pic_init(void)
35{
36 struct mpic *mpic;
37 struct resource r;
38 struct device_node *np = NULL;
39 unsigned int flags = MPIC_PRIMARY | MPIC_BIG_ENDIAN |
40 MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU;
41
42 np = of_find_node_by_type(np, "open-pic");
43
44 if (np == NULL) {
45 printk(KERN_ERR "Could not find open-pic node\n");
46 return;
47 }
48
49 if (of_address_to_resource(np, 0, &r)) {
50 printk(KERN_ERR "Failed to map mpic register space\n");
51 of_node_put(np);
52 return;
53 }
54
55 if (ppc_md.get_irq == mpic_get_coreint_irq)
56 flags |= MPIC_ENABLE_COREINT;
57
58 mpic = mpic_alloc(np, r.start, flags, 0, 256, " OpenPIC ");
59 BUG_ON(mpic == NULL);
60
61 mpic_init(mpic);
62}
63
64#ifdef CONFIG_PCI
65static int primary_phb_addr;
66#endif
67
68/*
69 * Setup the architecture
70 */
71#ifdef CONFIG_SMP
72void __init mpc85xx_smp_init(void);
73#endif
74
75void __init corenet_ds_setup_arch(void)
76{
77#ifdef CONFIG_PCI
78 struct device_node *np;
79 struct pci_controller *hose;
80#endif
81 dma_addr_t max = 0xffffffff;
82
83#ifdef CONFIG_SMP
84 mpc85xx_smp_init();
85#endif
86
87#ifdef CONFIG_PCI
88 for_each_compatible_node(np, "pci", "fsl,p4080-pcie") {
89 struct resource rsrc;
90 of_address_to_resource(np, 0, &rsrc);
91 if ((rsrc.start & 0xfffff) == primary_phb_addr)
92 fsl_add_bridge(np, 1);
93 else
94 fsl_add_bridge(np, 0);
95
96 hose = pci_find_hose_for_OF_device(np);
97 max = min(max, hose->dma_window_base_cur +
98 hose->dma_window_size);
99 }
100#endif
101
102#ifdef CONFIG_SWIOTLB
103 if (lmb_end_of_DRAM() > max) {
104 ppc_swiotlb_enable = 1;
105 set_pci_dma_ops(&swiotlb_dma_ops);
106 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
107 }
108#endif
109 pr_info("%s board from Freescale Semiconductor\n", ppc_md.name);
110}
111
112static const struct of_device_id of_device_ids[] __devinitconst = {
113 {
114 .compatible = "simple-bus"
115 },
116 {
117 .compatible = "fsl,rapidio-delta",
118 },
119 {}
120};
121
122int __init corenet_ds_publish_devices(void)
123{
124 return of_platform_bus_probe(NULL, of_device_ids, NULL);
125}
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.h b/arch/powerpc/platforms/85xx/corenet_ds.h
new file mode 100644
index 000000000000..ddd700b23031
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/corenet_ds.h
@@ -0,0 +1,19 @@
1/*
2 * Corenet based SoC DS Setup
3 *
4 * Copyright 2009 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef CORENET_DS_H
13#define CORENET_DS_H
14
15extern void __init corenet_ds_pic_init(void);
16extern void __init corenet_ds_setup_arch(void);
17extern int __init corenet_ds_publish_devices(void);
18
19#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 3909d57b86e3..c5028a2e5a58 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -301,6 +301,7 @@ static struct of_device_id mpc85xx_ids[] = {
301 { .compatible = "fsl,qe", }, 301 { .compatible = "fsl,qe", },
302 { .compatible = "gianfar", }, 302 { .compatible = "gianfar", },
303 { .compatible = "fsl,rapidio-delta", }, 303 { .compatible = "fsl,rapidio-delta", },
304 { .compatible = "fsl,mpc8548-guts", },
304 {}, 305 {},
305}; 306};
306 307
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index c8468de4acf6..088f30b0c088 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -44,6 +44,7 @@ void __init mpc85xx_rdb_pic_init(void)
44 struct mpic *mpic; 44 struct mpic *mpic;
45 struct resource r; 45 struct resource r;
46 struct device_node *np; 46 struct device_node *np;
47 unsigned long root = of_get_flat_dt_root();
47 48
48 np = of_find_node_by_type(NULL, "open-pic"); 49 np = of_find_node_by_type(NULL, "open-pic");
49 if (np == NULL) { 50 if (np == NULL) {
@@ -57,11 +58,18 @@ void __init mpc85xx_rdb_pic_init(void)
57 return; 58 return;
58 } 59 }
59 60
60 mpic = mpic_alloc(np, r.start, 61 if (of_flat_dt_is_compatible(root, "fsl,85XXRDB-CAMP")) {
62 mpic = mpic_alloc(np, r.start,
63 MPIC_PRIMARY |
64 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
65 0, 256, " OpenPIC ");
66 } else {
67 mpic = mpic_alloc(np, r.start,
61 MPIC_PRIMARY | MPIC_WANTS_RESET | 68 MPIC_PRIMARY | MPIC_WANTS_RESET |
62 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | 69 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
63 MPIC_SINGLE_DEST_CPU, 70 MPIC_SINGLE_DEST_CPU,
64 0, 256, " OpenPIC "); 71 0, 256, " OpenPIC ");
72 }
65 73
66 BUG_ON(mpic == NULL); 74 BUG_ON(mpic == NULL);
67 of_node_put(np); 75 of_node_put(np);
@@ -113,6 +121,7 @@ static int __init mpc85xxrdb_publish_devices(void)
113 return of_platform_bus_probe(NULL, mpc85xxrdb_ids, NULL); 121 return of_platform_bus_probe(NULL, mpc85xxrdb_ids, NULL);
114} 122}
115machine_device_initcall(p2020_rdb, mpc85xxrdb_publish_devices); 123machine_device_initcall(p2020_rdb, mpc85xxrdb_publish_devices);
124machine_device_initcall(p1020_rdb, mpc85xxrdb_publish_devices);
116 125
117/* 126/*
118 * Called very early, device-tree isn't unflattened 127 * Called very early, device-tree isn't unflattened
@@ -126,6 +135,15 @@ static int __init p2020_rdb_probe(void)
126 return 0; 135 return 0;
127} 136}
128 137
138static int __init p1020_rdb_probe(void)
139{
140 unsigned long root = of_get_flat_dt_root();
141
142 if (of_flat_dt_is_compatible(root, "fsl,P1020RDB"))
143 return 1;
144 return 0;
145}
146
129define_machine(p2020_rdb) { 147define_machine(p2020_rdb) {
130 .name = "P2020 RDB", 148 .name = "P2020 RDB",
131 .probe = p2020_rdb_probe, 149 .probe = p2020_rdb_probe,
@@ -139,3 +157,17 @@ define_machine(p2020_rdb) {
139 .calibrate_decr = generic_calibrate_decr, 157 .calibrate_decr = generic_calibrate_decr,
140 .progress = udbg_progress, 158 .progress = udbg_progress,
141}; 159};
160
161define_machine(p1020_rdb) {
162 .name = "P1020 RDB",
163 .probe = p1020_rdb_probe,
164 .setup_arch = mpc85xx_rdb_setup_arch,
165 .init_IRQ = mpc85xx_rdb_pic_init,
166#ifdef CONFIG_PCI
167 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
168#endif
169 .get_irq = mpic_get_irq,
170 .restart = fsl_rstcr_restart,
171 .calibrate_decr = generic_calibrate_decr,
172 .progress = udbg_progress,
173};
diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c
new file mode 100644
index 000000000000..84170460497b
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p4080_ds.c
@@ -0,0 +1,74 @@
1/*
2 * P4080 DS Setup
3 *
4 * Maintained by Kumar Gala (see MAINTAINERS for contact information)
5 *
6 * Copyright 2009 Freescale Semiconductor Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/kdev_t.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19
20#include <asm/system.h>
21#include <asm/time.h>
22#include <asm/machdep.h>
23#include <asm/pci-bridge.h>
24#include <mm/mmu_decl.h>
25#include <asm/prom.h>
26#include <asm/udbg.h>
27#include <asm/mpic.h>
28
29#include <linux/of_platform.h>
30#include <sysdev/fsl_soc.h>
31#include <sysdev/fsl_pci.h>
32
33#include "corenet_ds.h"
34
35#ifdef CONFIG_PCI
36static int primary_phb_addr;
37#endif
38
39/*
40 * Called very early, device-tree isn't unflattened
41 */
42static int __init p4080_ds_probe(void)
43{
44 unsigned long root = of_get_flat_dt_root();
45
46 if (of_flat_dt_is_compatible(root, "fsl,P4080DS")) {
47#ifdef CONFIG_PCI
48 /* treat PCIe1 as primary,
49 * shouldn't matter as we have no ISA on the board
50 */
51 primary_phb_addr = 0x0000;
52#endif
53 return 1;
54 } else {
55 return 0;
56 }
57}
58
59define_machine(p4080_ds) {
60 .name = "P4080 DS",
61 .probe = p4080_ds_probe,
62 .setup_arch = corenet_ds_setup_arch,
63 .init_IRQ = corenet_ds_pic_init,
64#ifdef CONFIG_PCI
65 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
66#endif
67 .get_irq = mpic_get_coreint_irq,
68 .restart = fsl_rstcr_restart,
69 .calibrate_decr = generic_calibrate_decr,
70 .progress = udbg_progress,
71};
72
73machine_device_initcall(p4080_ds, corenet_ds_publish_devices);
74machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 60edf63d0157..e5da5f62b24a 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -232,7 +232,7 @@ static int socrates_fpga_pic_set_type(unsigned int virq,
232} 232}
233 233
234static struct irq_chip socrates_fpga_pic_chip = { 234static struct irq_chip socrates_fpga_pic_chip = {
235 .typename = " FPGA-PIC ", 235 .name = " FPGA-PIC ",
236 .ack = socrates_fpga_pic_ack, 236 .ack = socrates_fpga_pic_ack,
237 .mask = socrates_fpga_pic_mask, 237 .mask = socrates_fpga_pic_mask,
238 .mask_ack = socrates_fpga_pic_mask_ack, 238 .mask_ack = socrates_fpga_pic_mask_ack,
@@ -245,7 +245,7 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
245 irq_hw_number_t hwirq) 245 irq_hw_number_t hwirq)
246{ 246{
247 /* All interrupts are LEVEL sensitive */ 247 /* All interrupts are LEVEL sensitive */
248 get_irq_desc(virq)->status |= IRQ_LEVEL; 248 irq_to_desc(virq)->status |= IRQ_LEVEL;
249 set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip, 249 set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip,
250 handle_fasteoi_irq); 250 handle_fasteoi_irq);
251 251
@@ -253,7 +253,7 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
253} 253}
254 254
255static int socrates_fpga_pic_host_xlate(struct irq_host *h, 255static int socrates_fpga_pic_host_xlate(struct irq_host *h,
256 struct device_node *ct, u32 *intspec, unsigned int intsize, 256 struct device_node *ct, const u32 *intspec, unsigned int intsize,
257 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 257 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
258{ 258{
259 struct socrates_fpga_irq_info *fpga_irq = &fpga_irqs[intspec[0]]; 259 struct socrates_fpga_irq_info *fpga_irq = &fpga_irqs[intspec[0]];
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 9c7b64a3402b..2bbfd530d6d8 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -35,6 +35,7 @@ config MPC8610_HPCD
35config GEF_PPC9A 35config GEF_PPC9A
36 bool "GE Fanuc PPC9A" 36 bool "GE Fanuc PPC9A"
37 select DEFAULT_UIMAGE 37 select DEFAULT_UIMAGE
38 select MMIO_NVRAM
38 select GENERIC_GPIO 39 select GENERIC_GPIO
39 select ARCH_REQUIRE_GPIOLIB 40 select ARCH_REQUIRE_GPIOLIB
40 help 41 help
@@ -43,6 +44,7 @@ config GEF_PPC9A
43config GEF_SBC310 44config GEF_SBC310
44 bool "GE Fanuc SBC310" 45 bool "GE Fanuc SBC310"
45 select DEFAULT_UIMAGE 46 select DEFAULT_UIMAGE
47 select MMIO_NVRAM
46 select GENERIC_GPIO 48 select GENERIC_GPIO
47 select ARCH_REQUIRE_GPIOLIB 49 select ARCH_REQUIRE_GPIOLIB
48 help 50 help
@@ -51,6 +53,7 @@ config GEF_SBC310
51config GEF_SBC610 53config GEF_SBC610
52 bool "GE Fanuc SBC610" 54 bool "GE Fanuc SBC610"
53 select DEFAULT_UIMAGE 55 select DEFAULT_UIMAGE
56 select MMIO_NVRAM
54 select GENERIC_GPIO 57 select GENERIC_GPIO
55 select ARCH_REQUIRE_GPIOLIB 58 select ARCH_REQUIRE_GPIOLIB
56 select HAS_RAPIDIO 59 select HAS_RAPIDIO
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c
index 50d0a2b63809..0110a8736d33 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.c
+++ b/arch/powerpc/platforms/86xx/gef_pic.c
@@ -149,7 +149,7 @@ static void gef_pic_unmask(unsigned int virq)
149} 149}
150 150
151static struct irq_chip gef_pic_chip = { 151static struct irq_chip gef_pic_chip = {
152 .typename = "gefp", 152 .name = "gefp",
153 .mask = gef_pic_mask, 153 .mask = gef_pic_mask,
154 .mask_ack = gef_pic_mask_ack, 154 .mask_ack = gef_pic_mask_ack,
155 .unmask = gef_pic_unmask, 155 .unmask = gef_pic_unmask,
@@ -163,14 +163,14 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq,
163 irq_hw_number_t hwirq) 163 irq_hw_number_t hwirq)
164{ 164{
165 /* All interrupts are LEVEL sensitive */ 165 /* All interrupts are LEVEL sensitive */
166 get_irq_desc(virq)->status |= IRQ_LEVEL; 166 irq_to_desc(virq)->status |= IRQ_LEVEL;
167 set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); 167 set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq);
168 168
169 return 0; 169 return 0;
170} 170}
171 171
172static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct, 172static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct,
173 u32 *intspec, unsigned int intsize, 173 const u32 *intspec, unsigned int intsize,
174 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 174 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
175{ 175{
176 176
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 287f7bd17dd9..a792e5d85813 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -33,6 +33,7 @@
33#include <asm/udbg.h> 33#include <asm/udbg.h>
34 34
35#include <asm/mpic.h> 35#include <asm/mpic.h>
36#include <asm/nvram.h>
36 37
37#include <sysdev/fsl_pci.h> 38#include <sysdev/fsl_pci.h>
38#include <sysdev/fsl_soc.h> 39#include <sysdev/fsl_soc.h>
@@ -95,6 +96,10 @@ static void __init gef_ppc9a_setup_arch(void)
95 printk(KERN_WARNING "Unable to map board registers\n"); 96 printk(KERN_WARNING "Unable to map board registers\n");
96 of_node_put(regs); 97 of_node_put(regs);
97 } 98 }
99
100#if defined(CONFIG_MMIO_NVRAM)
101 mmio_nvram_init();
102#endif
98} 103}
99 104
100/* Return the PCB revision */ 105/* Return the PCB revision */
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 90754e752bd8..6a1a613836c2 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -33,6 +33,7 @@
33#include <asm/udbg.h> 33#include <asm/udbg.h>
34 34
35#include <asm/mpic.h> 35#include <asm/mpic.h>
36#include <asm/nvram.h>
36 37
37#include <sysdev/fsl_pci.h> 38#include <sysdev/fsl_pci.h>
38#include <sysdev/fsl_soc.h> 39#include <sysdev/fsl_soc.h>
@@ -95,6 +96,10 @@ static void __init gef_sbc310_setup_arch(void)
95 printk(KERN_WARNING "Unable to map board registers\n"); 96 printk(KERN_WARNING "Unable to map board registers\n");
96 of_node_put(regs); 97 of_node_put(regs);
97 } 98 }
99
100#if defined(CONFIG_MMIO_NVRAM)
101 mmio_nvram_init();
102#endif
98} 103}
99 104
100/* Return the PCB revision */ 105/* Return the PCB revision */
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 72b31a6010a0..e10688a0fc4e 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -33,6 +33,7 @@
33#include <asm/udbg.h> 33#include <asm/udbg.h>
34 34
35#include <asm/mpic.h> 35#include <asm/mpic.h>
36#include <asm/nvram.h>
36 37
37#include <sysdev/fsl_pci.h> 38#include <sysdev/fsl_pci.h>
38#include <sysdev/fsl_soc.h> 39#include <sysdev/fsl_soc.h>
@@ -95,6 +96,10 @@ static void __init gef_sbc610_setup_arch(void)
95 printk(KERN_WARNING "Unable to map board registers\n"); 96 printk(KERN_WARNING "Unable to map board registers\n");
96 of_node_put(regs); 97 of_node_put(regs);
97 } 98 }
99
100#if defined(CONFIG_MMIO_NVRAM)
101 mmio_nvram_init();
102#endif
98} 103}
99 104
100/* Return the PCB revision */ 105/* Return the PCB revision */
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 627908a4cd77..5abe137f6309 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -19,6 +19,7 @@
19#include <linux/stddef.h> 19#include <linux/stddef.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/interrupt.h>
22#include <linux/kdev_t.h> 23#include <linux/kdev_t.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <linux/seq_file.h> 25#include <linux/seq_file.h>
@@ -41,10 +42,46 @@
41 42
42#include "mpc86xx.h" 43#include "mpc86xx.h"
43 44
45static struct device_node *pixis_node;
44static unsigned char *pixis_bdcfg0, *pixis_arch; 46static unsigned char *pixis_bdcfg0, *pixis_arch;
45 47
48#ifdef CONFIG_SUSPEND
49static irqreturn_t mpc8610_sw9_irq(int irq, void *data)
50{
51 pr_debug("%s: PIXIS' event (sw9/wakeup) IRQ handled\n", __func__);
52 return IRQ_HANDLED;
53}
54
55static void __init mpc8610_suspend_init(void)
56{
57 int irq;
58 int ret;
59
60 if (!pixis_node)
61 return;
62
63 irq = irq_of_parse_and_map(pixis_node, 0);
64 if (!irq) {
65 pr_err("%s: can't map pixis event IRQ.\n", __func__);
66 return;
67 }
68
69 ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9/wakeup", NULL);
70 if (ret) {
71 pr_err("%s: can't request pixis event IRQ: %d\n",
72 __func__, ret);
73 irq_dispose_mapping(irq);
74 }
75
76 enable_irq_wake(irq);
77}
78#else
79static inline void mpc8610_suspend_init(void) { }
80#endif /* CONFIG_SUSPEND */
81
46static struct of_device_id __initdata mpc8610_ids[] = { 82static struct of_device_id __initdata mpc8610_ids[] = {
47 { .compatible = "fsl,mpc8610-immr", }, 83 { .compatible = "fsl,mpc8610-immr", },
84 { .compatible = "fsl,mpc8610-guts", },
48 { .compatible = "simple-bus", }, 85 { .compatible = "simple-bus", },
49 { .compatible = "gianfar", }, 86 { .compatible = "gianfar", },
50 {} 87 {}
@@ -55,6 +92,9 @@ static int __init mpc8610_declare_of_platform_devices(void)
55 /* Firstly, register PIXIS GPIOs. */ 92 /* Firstly, register PIXIS GPIOs. */
56 simple_gpiochip_init("fsl,fpga-pixis-gpio-bank"); 93 simple_gpiochip_init("fsl,fpga-pixis-gpio-bank");
57 94
95 /* Enable wakeup on PIXIS' event IRQ. */
96 mpc8610_suspend_init();
97
58 /* Without this call, the SSI device driver won't get probed. */ 98 /* Without this call, the SSI device driver won't get probed. */
59 of_platform_bus_probe(NULL, mpc8610_ids, NULL); 99 of_platform_bus_probe(NULL, mpc8610_ids, NULL);
60 100
@@ -250,10 +290,10 @@ static void __init mpc86xx_hpcd_setup_arch(void)
250 diu_ops.set_sysfs_monitor_port = mpc8610hpcd_set_sysfs_monitor_port; 290 diu_ops.set_sysfs_monitor_port = mpc8610hpcd_set_sysfs_monitor_port;
251#endif 291#endif
252 292
253 np = of_find_compatible_node(NULL, NULL, "fsl,fpga-pixis"); 293 pixis_node = of_find_compatible_node(NULL, NULL, "fsl,fpga-pixis");
254 if (np) { 294 if (pixis_node) {
255 of_address_to_resource(np, 0, &r); 295 of_address_to_resource(pixis_node, 0, &r);
256 of_node_put(np); 296 of_node_put(pixis_node);
257 pixis = ioremap(r.start, 32); 297 pixis = ioremap(r.start, 32);
258 if (!pixis) { 298 if (!pixis) {
259 printk(KERN_ERR "Err: can't map FPGA cfg register!\n"); 299 printk(KERN_ERR "Err: can't map FPGA cfg register!\n");
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index 385acfc48397..242954c4293f 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -222,7 +222,7 @@ static void cpm_cascade(unsigned int irq, struct irq_desc *desc)
222 int cascade_irq; 222 int cascade_irq;
223 223
224 if ((cascade_irq = cpm_get_irq()) >= 0) { 224 if ((cascade_irq = cpm_get_irq()) >= 0) {
225 struct irq_desc *cdesc = irq_desc + cascade_irq; 225 struct irq_desc *cdesc = irq_to_desc(cascade_irq);
226 226
227 generic_handle_irq(cascade_irq); 227 generic_handle_irq(cascade_irq);
228 cdesc->chip->eoi(cascade_irq); 228 cdesc->chip->eoi(cascade_irq);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 04a8061045c4..d1663db7810f 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -86,6 +86,11 @@ config RTAS_ERROR_LOGGING
86 depends on PPC_RTAS 86 depends on PPC_RTAS
87 default n 87 default n
88 88
89config PPC_RTAS_DAEMON
90 bool
91 depends on PPC_RTAS
92 default n
93
89config RTAS_PROC 94config RTAS_PROC
90 bool "Proc interface to RTAS" 95 bool "Proc interface to RTAS"
91 depends on PPC_RTAS 96 depends on PPC_RTAS
@@ -255,7 +260,7 @@ config QE_GPIO
255 260
256config CPM2 261config CPM2
257 bool "Enable support for the CPM2 (Communications Processor Module)" 262 bool "Enable support for the CPM2 (Communications Processor Module)"
258 depends on MPC85xx || 8260 263 depends on (FSL_SOC_BOOKE && PPC32) || 8260
259 select CPM 264 select CPM
260 select PPC_LIB_RHEAP 265 select PPC_LIB_RHEAP
261 select PPC_PCI_CHOICE 266 select PPC_PCI_CHOICE
@@ -300,7 +305,7 @@ source "arch/powerpc/sysdev/bestcomm/Kconfig"
300 305
301config MPC8xxx_GPIO 306config MPC8xxx_GPIO
302 bool "MPC8xxx GPIO support" 307 bool "MPC8xxx GPIO support"
303 depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || PPC_85xx || PPC_86xx 308 depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || FSL_SOC_BOOKE || PPC_86xx
304 select GENERIC_GPIO 309 select GENERIC_GPIO
305 select ARCH_REQUIRE_GPIOLIB 310 select ARCH_REQUIRE_GPIOLIB
306 help 311 help
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index e382cae678b8..2eab27a94cc9 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -28,8 +28,6 @@ config PPC_BOOK3S_32
28config PPC_85xx 28config PPC_85xx
29 bool "Freescale 85xx" 29 bool "Freescale 85xx"
30 select E500 30 select E500
31 select FSL_SOC
32 select MPC85xx
33 31
34config PPC_8xx 32config PPC_8xx
35 bool "Freescale 8xx" 33 bool "Freescale 8xx"
@@ -138,6 +136,14 @@ config PPC_FPU
138 bool 136 bool
139 default y if PPC64 137 default y if PPC64
140 138
139config FSL_EMB_PERFMON
140 bool "Freescale Embedded Perfmon"
141 depends on E500 || PPC_83xx
142 help
143 This is the Performance Monitor support found on the e500 core
144 and some e300 cores (c3 and c4). Select this only if your
145 core supports the Embedded Performance Monitor APU
146
141config 4xx 147config 4xx
142 bool 148 bool
143 depends on 40x || 44x 149 depends on 40x || 44x
@@ -153,13 +159,6 @@ config FSL_BOOKE
153 depends on E200 || E500 159 depends on E200 || E500
154 default y 160 default y
155 161
156config FSL_EMB_PERFMON
157 bool "Freescale Embedded Perfmon"
158 depends on E500 || PPC_83xx
159 help
160 This is the Performance Monitor support found on the e500 core
161 and some e300 cores (c3 and c4). Select this only if your
162 core supports the Embedded Performance Monitor APU
163 162
164config PTE_64BIT 163config PTE_64BIT
165 bool 164 bool
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index a6812ee00100..fdb9f0b0d7a8 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_PPC_MPC52xx) += 52xx/
12obj-$(CONFIG_PPC_8xx) += 8xx/ 12obj-$(CONFIG_PPC_8xx) += 8xx/
13obj-$(CONFIG_PPC_82xx) += 82xx/ 13obj-$(CONFIG_PPC_82xx) += 82xx/
14obj-$(CONFIG_PPC_83xx) += 83xx/ 14obj-$(CONFIG_PPC_83xx) += 83xx/
15obj-$(CONFIG_PPC_85xx) += 85xx/ 15obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/
16obj-$(CONFIG_PPC_86xx) += 86xx/ 16obj-$(CONFIG_PPC_86xx) += 86xx/
17obj-$(CONFIG_PPC_PSERIES) += pseries/ 17obj-$(CONFIG_PPC_PSERIES) += pseries/
18obj-$(CONFIG_PPC_ISERIES) += iseries/ 18obj-$(CONFIG_PPC_ISERIES) += iseries/
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index a86c34b3bb84..96fe896f6df3 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -312,7 +312,7 @@ static struct irq_chip msic_irq_chip = {
312 .mask = mask_msi_irq, 312 .mask = mask_msi_irq,
313 .unmask = unmask_msi_irq, 313 .unmask = unmask_msi_irq,
314 .shutdown = unmask_msi_irq, 314 .shutdown = unmask_msi_irq,
315 .typename = "AXON-MSI", 315 .name = "AXON-MSI",
316}; 316};
317 317
318static int msic_host_map(struct irq_host *h, unsigned int virq, 318static int msic_host_map(struct irq_host *h, unsigned int virq,
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 72254848a228..36052a9ebcda 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -110,7 +110,7 @@ static void beatic_end_irq(unsigned int irq_plug)
110} 110}
111 111
112static struct irq_chip beatic_pic = { 112static struct irq_chip beatic_pic = {
113 .typename = " CELL-BEAT ", 113 .name = " CELL-BEAT ",
114 .unmask = beatic_unmask_irq, 114 .unmask = beatic_unmask_irq,
115 .mask = beatic_mask_irq, 115 .mask = beatic_mask_irq,
116 .eoi = beatic_end_irq, 116 .eoi = beatic_end_irq,
@@ -136,7 +136,7 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq)
136static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, 136static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
137 irq_hw_number_t hw) 137 irq_hw_number_t hw)
138{ 138{
139 struct irq_desc *desc = get_irq_desc(virq); 139 struct irq_desc *desc = irq_to_desc(virq);
140 int64_t err; 140 int64_t err;
141 141
142 err = beat_construct_and_connect_irq_plug(virq, hw); 142 err = beat_construct_and_connect_irq_plug(virq, hw);
@@ -166,11 +166,11 @@ static void beatic_pic_host_remap(struct irq_host *h, unsigned int virq,
166 * Note: We have only 1 entry to translate. 166 * Note: We have only 1 entry to translate.
167 */ 167 */
168static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, 168static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct,
169 u32 *intspec, unsigned int intsize, 169 const u32 *intspec, unsigned int intsize,
170 irq_hw_number_t *out_hwirq, 170 irq_hw_number_t *out_hwirq,
171 unsigned int *out_flags) 171 unsigned int *out_flags)
172{ 172{
173 u64 *intspec2 = (u64 *)intspec; 173 const u64 *intspec2 = (const u64 *)intspec;
174 174
175 *out_hwirq = *intspec2; 175 *out_hwirq = *intspec2;
176 *out_flags |= IRQ_TYPE_LEVEL_LOW; 176 *out_flags |= IRQ_TYPE_LEVEL_LOW;
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 882e47080e74..f9dbf76a763f 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -88,7 +88,7 @@ static void iic_eoi(unsigned int irq)
88} 88}
89 89
90static struct irq_chip iic_chip = { 90static struct irq_chip iic_chip = {
91 .typename = " CELL-IIC ", 91 .name = " CELL-IIC ",
92 .mask = iic_mask, 92 .mask = iic_mask,
93 .unmask = iic_unmask, 93 .unmask = iic_unmask,
94 .eoi = iic_eoi, 94 .eoi = iic_eoi,
@@ -133,7 +133,7 @@ static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
133 133
134 134
135static struct irq_chip iic_ioexc_chip = { 135static struct irq_chip iic_ioexc_chip = {
136 .typename = " CELL-IOEX", 136 .name = " CELL-IOEX",
137 .mask = iic_mask, 137 .mask = iic_mask,
138 .unmask = iic_unmask, 138 .unmask = iic_unmask,
139 .eoi = iic_ioexc_eoi, 139 .eoi = iic_ioexc_eoi,
@@ -297,7 +297,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq,
297} 297}
298 298
299static int iic_host_xlate(struct irq_host *h, struct device_node *ct, 299static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
300 u32 *intspec, unsigned int intsize, 300 const u32 *intspec, unsigned int intsize,
301 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 301 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
302 302
303{ 303{
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 4e5655624ae8..01244f254a11 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -102,7 +102,7 @@ static void spider_ack_irq(unsigned int virq)
102 102
103 /* Reset edge detection logic if necessary 103 /* Reset edge detection logic if necessary
104 */ 104 */
105 if (get_irq_desc(virq)->status & IRQ_LEVEL) 105 if (irq_to_desc(virq)->status & IRQ_LEVEL)
106 return; 106 return;
107 107
108 /* Only interrupts 47 to 50 can be set to edge */ 108 /* Only interrupts 47 to 50 can be set to edge */
@@ -119,7 +119,7 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type)
119 struct spider_pic *pic = spider_virq_to_pic(virq); 119 struct spider_pic *pic = spider_virq_to_pic(virq);
120 unsigned int hw = irq_map[virq].hwirq; 120 unsigned int hw = irq_map[virq].hwirq;
121 void __iomem *cfg = spider_get_irq_config(pic, hw); 121 void __iomem *cfg = spider_get_irq_config(pic, hw);
122 struct irq_desc *desc = get_irq_desc(virq); 122 struct irq_desc *desc = irq_to_desc(virq);
123 u32 old_mask; 123 u32 old_mask;
124 u32 ic; 124 u32 ic;
125 125
@@ -168,7 +168,7 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type)
168} 168}
169 169
170static struct irq_chip spider_pic = { 170static struct irq_chip spider_pic = {
171 .typename = " SPIDER ", 171 .name = " SPIDER ",
172 .unmask = spider_unmask_irq, 172 .unmask = spider_unmask_irq,
173 .mask = spider_mask_irq, 173 .mask = spider_mask_irq,
174 .ack = spider_ack_irq, 174 .ack = spider_ack_irq,
@@ -187,7 +187,7 @@ static int spider_host_map(struct irq_host *h, unsigned int virq,
187} 187}
188 188
189static int spider_host_xlate(struct irq_host *h, struct device_node *ct, 189static int spider_host_xlate(struct irq_host *h, struct device_node *ct,
190 u32 *intspec, unsigned int intsize, 190 const u32 *intspec, unsigned int intsize,
191 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 191 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
192 192
193{ 193{
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 884e8bcec499..64a4c2d85f7c 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -2494,7 +2494,7 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2494 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2494 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2495 int error = 0, cnt = 0; 2495 int error = 0, cnt = 0;
2496 2496
2497 if (!buf || len < 0) 2497 if (!buf)
2498 return -EINVAL; 2498 return -EINVAL;
2499 2499
2500 error = spu_acquire(ctx); 2500 error = spu_acquire(ctx);
diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig
index 37d438bd5b7a..bc0b0efdc5fe 100644
--- a/arch/powerpc/platforms/chrp/Kconfig
+++ b/arch/powerpc/platforms/chrp/Kconfig
@@ -5,6 +5,8 @@ config PPC_CHRP
5 select PPC_I8259 5 select PPC_I8259
6 select PPC_INDIRECT_PCI 6 select PPC_INDIRECT_PCI
7 select PPC_RTAS 7 select PPC_RTAS
8 select PPC_RTAS_DAEMON
9 select RTAS_ERROR_LOGGING
8 select PPC_MPC106 10 select PPC_MPC106
9 select PPC_UDBG_16550 11 select PPC_UDBG_16550
10 select PPC_NATIVE 12 select PPC_NATIVE
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index cd4ad9aea760..52f3df3b4ca0 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -364,19 +364,6 @@ void __init chrp_setup_arch(void)
364 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0); 364 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
365} 365}
366 366
367void
368chrp_event_scan(unsigned long unused)
369{
370 unsigned char log[1024];
371 int ret = 0;
372
373 /* XXX: we should loop until the hardware says no more error logs -- Cort */
374 rtas_call(rtas_token("event-scan"), 4, 1, &ret, 0xffffffff, 0,
375 __pa(log), 1024);
376 mod_timer(&__get_cpu_var(heartbeat_timer),
377 jiffies + event_scan_interval);
378}
379
380static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) 367static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc)
381{ 368{
382 unsigned int cascade_irq = i8259_irq(); 369 unsigned int cascade_irq = i8259_irq();
@@ -568,9 +555,6 @@ void __init chrp_init_IRQ(void)
568void __init 555void __init
569chrp_init2(void) 556chrp_init2(void)
570{ 557{
571 struct device_node *device;
572 const unsigned int *p = NULL;
573
574#ifdef CONFIG_NVRAM 558#ifdef CONFIG_NVRAM
575 chrp_nvram_init(); 559 chrp_nvram_init();
576#endif 560#endif
@@ -582,40 +566,6 @@ chrp_init2(void)
582 request_region(0x80,0x10,"dma page reg"); 566 request_region(0x80,0x10,"dma page reg");
583 request_region(0xc0,0x20,"dma2"); 567 request_region(0xc0,0x20,"dma2");
584 568
585 /* Get the event scan rate for the rtas so we know how
586 * often it expects a heartbeat. -- Cort
587 */
588 device = of_find_node_by_name(NULL, "rtas");
589 if (device)
590 p = of_get_property(device, "rtas-event-scan-rate", NULL);
591 if (p && *p) {
592 /*
593 * Arrange to call chrp_event_scan at least *p times
594 * per minute. We use 59 rather than 60 here so that
595 * the rate will be slightly higher than the minimum.
596 * This all assumes we don't do hotplug CPU on any
597 * machine that needs the event scans done.
598 */
599 unsigned long interval, offset;
600 int cpu, ncpus;
601 struct timer_list *timer;
602
603 interval = HZ * 59 / *p;
604 offset = HZ;
605 ncpus = num_online_cpus();
606 event_scan_interval = ncpus * interval;
607 for (cpu = 0; cpu < ncpus; ++cpu) {
608 timer = &per_cpu(heartbeat_timer, cpu);
609 setup_timer(timer, chrp_event_scan, 0);
610 timer->expires = jiffies + offset;
611 add_timer_on(timer, cpu);
612 offset += interval;
613 }
614 printk("RTAS Event Scan Rate: %u (%lu jiffies)\n",
615 *p, interval);
616 }
617 of_node_put(device);
618
619 if (ppc_md.progress) 569 if (ppc_md.progress)
620 ppc_md.progress(" Have fun! ", 0x7777); 570 ppc_md.progress(" Have fun! ", 0x7777);
621} 571}
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
index f99c6c4b6985..3ae66ab9d5e7 100644
--- a/arch/powerpc/platforms/iseries/htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -19,8 +19,7 @@
19 19
20#include "call_hpt.h" 20#include "call_hpt.h"
21 21
22static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = 22static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp;
23 { [0 ... 63] = SPIN_LOCK_UNLOCKED};
24 23
25/* 24/*
26 * Very primitive algorithm for picking up a lock 25 * Very primitive algorithm for picking up a lock
@@ -245,6 +244,11 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
245 244
246void __init hpte_init_iSeries(void) 245void __init hpte_init_iSeries(void)
247{ 246{
247 int i;
248
249 for (i = 0; i < ARRAY_SIZE(iSeries_hlocks); i++)
250 spin_lock_init(&iSeries_hlocks[i]);
251
248 ppc_md.hpte_invalidate = iSeries_hpte_invalidate; 252 ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
249 ppc_md.hpte_updatepp = iSeries_hpte_updatepp; 253 ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
250 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp; 254 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 94f444758836..07762259c60a 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -214,7 +214,7 @@ void __init iSeries_activate_IRQs()
214 unsigned long flags; 214 unsigned long flags;
215 215
216 for_each_irq (irq) { 216 for_each_irq (irq) {
217 struct irq_desc *desc = get_irq_desc(irq); 217 struct irq_desc *desc = irq_to_desc(irq);
218 218
219 if (desc && desc->chip && desc->chip->startup) { 219 if (desc && desc->chip && desc->chip->startup) {
220 spin_lock_irqsave(&desc->lock, flags); 220 spin_lock_irqsave(&desc->lock, flags);
@@ -273,7 +273,7 @@ static void iseries_end_IRQ(unsigned int irq)
273} 273}
274 274
275static struct irq_chip iseries_pic = { 275static struct irq_chip iseries_pic = {
276 .typename = "iSeries irq controller", 276 .name = "iSeries irq controller",
277 .startup = iseries_startup_IRQ, 277 .startup = iseries_startup_IRQ,
278 .shutdown = iseries_shutdown_IRQ, 278 .shutdown = iseries_shutdown_IRQ,
279 .unmask = iseries_enable_IRQ, 279 .unmask = iseries_enable_IRQ,
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index d212006a5b3c..09e827296276 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -152,12 +152,12 @@ static unsigned int pmac_startup_irq(unsigned int virq)
152 unsigned long bit = 1UL << (src & 0x1f); 152 unsigned long bit = 1UL << (src & 0x1f);
153 int i = src >> 5; 153 int i = src >> 5;
154 154
155 spin_lock_irqsave(&pmac_pic_lock, flags); 155 spin_lock_irqsave(&pmac_pic_lock, flags);
156 if ((irq_desc[virq].status & IRQ_LEVEL) == 0) 156 if ((irq_to_desc(virq)->status & IRQ_LEVEL) == 0)
157 out_le32(&pmac_irq_hw[i]->ack, bit); 157 out_le32(&pmac_irq_hw[i]->ack, bit);
158 __set_bit(src, ppc_cached_irq_mask); 158 __set_bit(src, ppc_cached_irq_mask);
159 __pmac_set_irq_mask(src, 0); 159 __pmac_set_irq_mask(src, 0);
160 spin_unlock_irqrestore(&pmac_pic_lock, flags); 160 spin_unlock_irqrestore(&pmac_pic_lock, flags);
161 161
162 return 0; 162 return 0;
163} 163}
@@ -195,7 +195,7 @@ static int pmac_retrigger(unsigned int virq)
195} 195}
196 196
197static struct irq_chip pmac_pic = { 197static struct irq_chip pmac_pic = {
198 .typename = " PMAC-PIC ", 198 .name = " PMAC-PIC ",
199 .startup = pmac_startup_irq, 199 .startup = pmac_startup_irq,
200 .mask = pmac_mask_irq, 200 .mask = pmac_mask_irq,
201 .ack = pmac_ack_irq, 201 .ack = pmac_ack_irq,
@@ -285,7 +285,7 @@ static int pmac_pic_host_match(struct irq_host *h, struct device_node *node)
285static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, 285static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
286 irq_hw_number_t hw) 286 irq_hw_number_t hw)
287{ 287{
288 struct irq_desc *desc = get_irq_desc(virq); 288 struct irq_desc *desc = irq_to_desc(virq);
289 int level; 289 int level;
290 290
291 if (hw >= max_irqs) 291 if (hw >= max_irqs)
@@ -303,7 +303,7 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
303} 303}
304 304
305static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct, 305static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct,
306 u32 *intspec, unsigned int intsize, 306 const u32 *intspec, unsigned int intsize,
307 irq_hw_number_t *out_hwirq, 307 irq_hw_number_t *out_hwirq,
308 unsigned int *out_flags) 308 unsigned int *out_flags)
309 309
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 8ec5ccf76b19..59d9712d7364 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -152,7 +152,7 @@ static void ps3_chip_eoi(unsigned int virq)
152 */ 152 */
153 153
154static struct irq_chip ps3_irq_chip = { 154static struct irq_chip ps3_irq_chip = {
155 .typename = "ps3", 155 .name = "ps3",
156 .mask = ps3_chip_mask, 156 .mask = ps3_chip_mask,
157 .unmask = ps3_chip_unmask, 157 .unmask = ps3_chip_unmask,
158 .eoi = ps3_chip_eoi, 158 .eoi = ps3_chip_eoi,
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 189a25b80735..e81b028a2a48 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -34,7 +34,7 @@
34#if defined(DEBUG) 34#if defined(DEBUG)
35#define DBG udbg_printf 35#define DBG udbg_printf
36#else 36#else
37#define DBG pr_debug 37#define DBG pr_devel
38#endif 38#endif
39 39
40enum { 40enum {
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index f0e6f28427bd..27554c807fd5 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -4,6 +4,7 @@ config PPC_PSERIES
4 select MPIC 4 select MPIC
5 select PPC_I8259 5 select PPC_I8259
6 select PPC_RTAS 6 select PPC_RTAS
7 select PPC_RTAS_DAEMON
7 select RTAS_ERROR_LOGGING 8 select RTAS_ERROR_LOGGING
8 select PPC_UDBG_16550 9 select PPC_UDBG_16550
9 select PPC_NATIVE 10 select PPC_NATIVE
@@ -59,7 +60,7 @@ config PPC_SMLPAR
59 60
60config CMM 61config CMM
61 tristate "Collaborative memory management" 62 tristate "Collaborative memory management"
62 depends on PPC_SMLPAR && !CRASH_DUMP 63 depends on PPC_SMLPAR
63 default y 64 default y
64 help 65 help
65 Select this option, if you want to enable the kernel interface 66 Select this option, if you want to enable the kernel interface
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 790c0b872d4f..0ff5174ae4f5 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -7,8 +7,8 @@ EXTRA_CFLAGS += -DDEBUG
7endif 7endif
8 8
9obj-y := lpar.o hvCall.o nvram.o reconfig.o \ 9obj-y := lpar.o hvCall.o nvram.o reconfig.o \
10 setup.o iommu.o ras.o rtasd.o \ 10 setup.o iommu.o ras.o \
11 firmware.o power.o 11 firmware.o power.o dlpar.o
12obj-$(CONFIG_SMP) += smp.o 12obj-$(CONFIG_SMP) += smp.o
13obj-$(CONFIG_XICS) += xics.o 13obj-$(CONFIG_XICS) += xics.o
14obj-$(CONFIG_SCANLOG) += scanlog.o 14obj-$(CONFIG_SCANLOG) += scanlog.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 6567439fe78d..bcdcf0ccc8d7 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -229,8 +229,9 @@ static void cmm_get_mpp(void)
229{ 229{
230 int rc; 230 int rc;
231 struct hvcall_mpp_data mpp_data; 231 struct hvcall_mpp_data mpp_data;
232 unsigned long active_pages_target; 232 signed long active_pages_target, page_loan_request, target;
233 signed long page_loan_request; 233 signed long total_pages = totalram_pages + loaned_pages;
234 signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
234 235
235 rc = h_get_mpp(&mpp_data); 236 rc = h_get_mpp(&mpp_data);
236 237
@@ -238,17 +239,25 @@ static void cmm_get_mpp(void)
238 return; 239 return;
239 240
240 page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE); 241 page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
241 loaned_pages_target = page_loan_request + loaned_pages; 242 target = page_loan_request + (signed long)loaned_pages;
242 if (loaned_pages_target > oom_freed_pages) 243
243 loaned_pages_target -= oom_freed_pages; 244 if (target < 0 || total_pages < min_mem_pages)
245 target = 0;
246
247 if (target > oom_freed_pages)
248 target -= oom_freed_pages;
244 else 249 else
245 loaned_pages_target = 0; 250 target = 0;
251
252 active_pages_target = total_pages - target;
253
254 if (min_mem_pages > active_pages_target)
255 target = total_pages - min_mem_pages;
246 256
247 active_pages_target = totalram_pages + loaned_pages - loaned_pages_target; 257 if (target < 0)
258 target = 0;
248 259
249 if ((min_mem_mb * 1024 * 1024) > (active_pages_target * PAGE_SIZE)) 260 loaned_pages_target = target;
250 loaned_pages_target = totalram_pages + loaned_pages -
251 ((min_mem_mb * 1024 * 1024) / PAGE_SIZE);
252 261
253 cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n", 262 cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
254 page_loan_request, loaned_pages, loaned_pages_target, 263 page_loan_request, loaned_pages, loaned_pages_target,
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
new file mode 100644
index 000000000000..12df9e8812a9
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -0,0 +1,558 @@
1/*
2 * Support for dynamic reconfiguration for PCI, Memory, and CPU
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
4 *
5 * Copyright (C) 2009 Nathan Fontenot
6 * Copyright (C) 2009 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/kref.h>
15#include <linux/notifier.h>
16#include <linux/proc_fs.h>
17#include <linux/spinlock.h>
18#include <linux/cpu.h>
19#include "offline_states.h"
20
21#include <asm/prom.h>
22#include <asm/machdep.h>
23#include <asm/uaccess.h>
24#include <asm/rtas.h>
25#include <asm/pSeries_reconfig.h>
26
27struct cc_workarea {
28 u32 drc_index;
29 u32 zero;
30 u32 name_offset;
31 u32 prop_length;
32 u32 prop_offset;
33};
34
35static void dlpar_free_cc_property(struct property *prop)
36{
37 kfree(prop->name);
38 kfree(prop->value);
39 kfree(prop);
40}
41
42static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
43{
44 struct property *prop;
45 char *name;
46 char *value;
47
48 prop = kzalloc(sizeof(*prop), GFP_KERNEL);
49 if (!prop)
50 return NULL;
51
52 name = (char *)ccwa + ccwa->name_offset;
53 prop->name = kstrdup(name, GFP_KERNEL);
54
55 prop->length = ccwa->prop_length;
56 value = (char *)ccwa + ccwa->prop_offset;
57 prop->value = kzalloc(prop->length, GFP_KERNEL);
58 if (!prop->value) {
59 dlpar_free_cc_property(prop);
60 return NULL;
61 }
62
63 memcpy(prop->value, value, prop->length);
64 return prop;
65}
66
67static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
68{
69 struct device_node *dn;
70 char *name;
71
72 dn = kzalloc(sizeof(*dn), GFP_KERNEL);
73 if (!dn)
74 return NULL;
75
76 /* The configure connector reported name does not contain a
77 * preceeding '/', so we allocate a buffer large enough to
78 * prepend this to the full_name.
79 */
80 name = (char *)ccwa + ccwa->name_offset;
81 dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL);
82 if (!dn->full_name) {
83 kfree(dn);
84 return NULL;
85 }
86
87 sprintf(dn->full_name, "/%s", name);
88 return dn;
89}
90
91static void dlpar_free_one_cc_node(struct device_node *dn)
92{
93 struct property *prop;
94
95 while (dn->properties) {
96 prop = dn->properties;
97 dn->properties = prop->next;
98 dlpar_free_cc_property(prop);
99 }
100
101 kfree(dn->full_name);
102 kfree(dn);
103}
104
105static void dlpar_free_cc_nodes(struct device_node *dn)
106{
107 if (dn->child)
108 dlpar_free_cc_nodes(dn->child);
109
110 if (dn->sibling)
111 dlpar_free_cc_nodes(dn->sibling);
112
113 dlpar_free_one_cc_node(dn);
114}
115
116#define NEXT_SIBLING 1
117#define NEXT_CHILD 2
118#define NEXT_PROPERTY 3
119#define PREV_PARENT 4
120#define MORE_MEMORY 5
121#define CALL_AGAIN -2
122#define ERR_CFG_USE -9003
123
124struct device_node *dlpar_configure_connector(u32 drc_index)
125{
126 struct device_node *dn;
127 struct device_node *first_dn = NULL;
128 struct device_node *last_dn = NULL;
129 struct property *property;
130 struct property *last_property = NULL;
131 struct cc_workarea *ccwa;
132 int cc_token;
133 int rc;
134
135 cc_token = rtas_token("ibm,configure-connector");
136 if (cc_token == RTAS_UNKNOWN_SERVICE)
137 return NULL;
138
139 spin_lock(&rtas_data_buf_lock);
140 ccwa = (struct cc_workarea *)&rtas_data_buf[0];
141 ccwa->drc_index = drc_index;
142 ccwa->zero = 0;
143
144 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
145 while (rc) {
146 switch (rc) {
147 case NEXT_SIBLING:
148 dn = dlpar_parse_cc_node(ccwa);
149 if (!dn)
150 goto cc_error;
151
152 dn->parent = last_dn->parent;
153 last_dn->sibling = dn;
154 last_dn = dn;
155 break;
156
157 case NEXT_CHILD:
158 dn = dlpar_parse_cc_node(ccwa);
159 if (!dn)
160 goto cc_error;
161
162 if (!first_dn)
163 first_dn = dn;
164 else {
165 dn->parent = last_dn;
166 if (last_dn)
167 last_dn->child = dn;
168 }
169
170 last_dn = dn;
171 break;
172
173 case NEXT_PROPERTY:
174 property = dlpar_parse_cc_property(ccwa);
175 if (!property)
176 goto cc_error;
177
178 if (!last_dn->properties)
179 last_dn->properties = property;
180 else
181 last_property->next = property;
182
183 last_property = property;
184 break;
185
186 case PREV_PARENT:
187 last_dn = last_dn->parent;
188 break;
189
190 case CALL_AGAIN:
191 break;
192
193 case MORE_MEMORY:
194 case ERR_CFG_USE:
195 default:
196 printk(KERN_ERR "Unexpected Error (%d) "
197 "returned from configure-connector\n", rc);
198 goto cc_error;
199 }
200
201 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
202 }
203
204 spin_unlock(&rtas_data_buf_lock);
205 return first_dn;
206
207cc_error:
208 if (first_dn)
209 dlpar_free_cc_nodes(first_dn);
210 spin_unlock(&rtas_data_buf_lock);
211 return NULL;
212}
213
214static struct device_node *derive_parent(const char *path)
215{
216 struct device_node *parent;
217 char *last_slash;
218
219 last_slash = strrchr(path, '/');
220 if (last_slash == path) {
221 parent = of_find_node_by_path("/");
222 } else {
223 char *parent_path;
224 int parent_path_len = last_slash - path + 1;
225 parent_path = kmalloc(parent_path_len, GFP_KERNEL);
226 if (!parent_path)
227 return NULL;
228
229 strlcpy(parent_path, path, parent_path_len);
230 parent = of_find_node_by_path(parent_path);
231 kfree(parent_path);
232 }
233
234 return parent;
235}
236
237int dlpar_attach_node(struct device_node *dn)
238{
239 struct proc_dir_entry *ent;
240 int rc;
241
242 of_node_set_flag(dn, OF_DYNAMIC);
243 kref_init(&dn->kref);
244 dn->parent = derive_parent(dn->full_name);
245 if (!dn->parent)
246 return -ENOMEM;
247
248 rc = blocking_notifier_call_chain(&pSeries_reconfig_chain,
249 PSERIES_RECONFIG_ADD, dn);
250 if (rc == NOTIFY_BAD) {
251 printk(KERN_ERR "Failed to add device node %s\n",
252 dn->full_name);
253 return -ENOMEM; /* For now, safe to assume kmalloc failure */
254 }
255
256 of_attach_node(dn);
257
258#ifdef CONFIG_PROC_DEVICETREE
259 ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde);
260 if (ent)
261 proc_device_tree_add_node(dn, ent);
262#endif
263
264 of_node_put(dn->parent);
265 return 0;
266}
267
268int dlpar_detach_node(struct device_node *dn)
269{
270 struct device_node *parent = dn->parent;
271 struct property *prop = dn->properties;
272
273#ifdef CONFIG_PROC_DEVICETREE
274 while (prop) {
275 remove_proc_entry(prop->name, dn->pde);
276 prop = prop->next;
277 }
278
279 if (dn->pde)
280 remove_proc_entry(dn->pde->name, parent->pde);
281#endif
282
283 blocking_notifier_call_chain(&pSeries_reconfig_chain,
284 PSERIES_RECONFIG_REMOVE, dn);
285 of_detach_node(dn);
286 of_node_put(dn); /* Must decrement the refcount */
287
288 return 0;
289}
290
291#define DR_ENTITY_SENSE 9003
292#define DR_ENTITY_PRESENT 1
293#define DR_ENTITY_UNUSABLE 2
294#define ALLOCATION_STATE 9003
295#define ALLOC_UNUSABLE 0
296#define ALLOC_USABLE 1
297#define ISOLATION_STATE 9001
298#define ISOLATE 0
299#define UNISOLATE 1
300
301int dlpar_acquire_drc(u32 drc_index)
302{
303 int dr_status, rc;
304
305 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
306 DR_ENTITY_SENSE, drc_index);
307 if (rc || dr_status != DR_ENTITY_UNUSABLE)
308 return -1;
309
310 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
311 if (rc)
312 return rc;
313
314 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
315 if (rc) {
316 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
317 return rc;
318 }
319
320 return 0;
321}
322
323int dlpar_release_drc(u32 drc_index)
324{
325 int dr_status, rc;
326
327 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
328 DR_ENTITY_SENSE, drc_index);
329 if (rc || dr_status != DR_ENTITY_PRESENT)
330 return -1;
331
332 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
333 if (rc)
334 return rc;
335
336 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
337 if (rc) {
338 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
339 return rc;
340 }
341
342 return 0;
343}
344
345#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
346
347static DEFINE_MUTEX(pseries_cpu_hotplug_mutex);
348
349void cpu_hotplug_driver_lock()
350{
351 mutex_lock(&pseries_cpu_hotplug_mutex);
352}
353
354void cpu_hotplug_driver_unlock()
355{
356 mutex_unlock(&pseries_cpu_hotplug_mutex);
357}
358
359static int dlpar_online_cpu(struct device_node *dn)
360{
361 int rc = 0;
362 unsigned int cpu;
363 int len, nthreads, i;
364 const u32 *intserv;
365
366 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
367 if (!intserv)
368 return -EINVAL;
369
370 nthreads = len / sizeof(u32);
371
372 cpu_maps_update_begin();
373 for (i = 0; i < nthreads; i++) {
374 for_each_present_cpu(cpu) {
375 if (get_hard_smp_processor_id(cpu) != intserv[i])
376 continue;
377 BUG_ON(get_cpu_current_state(cpu)
378 != CPU_STATE_OFFLINE);
379 cpu_maps_update_done();
380 rc = cpu_up(cpu);
381 if (rc)
382 goto out;
383 cpu_maps_update_begin();
384
385 break;
386 }
387 if (cpu == num_possible_cpus())
388 printk(KERN_WARNING "Could not find cpu to online "
389 "with physical id 0x%x\n", intserv[i]);
390 }
391 cpu_maps_update_done();
392
393out:
394 return rc;
395
396}
397
398static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
399{
400 struct device_node *dn;
401 unsigned long drc_index;
402 char *cpu_name;
403 int rc;
404
405 cpu_hotplug_driver_lock();
406 rc = strict_strtoul(buf, 0, &drc_index);
407 if (rc) {
408 rc = -EINVAL;
409 goto out;
410 }
411
412 dn = dlpar_configure_connector(drc_index);
413 if (!dn) {
414 rc = -EINVAL;
415 goto out;
416 }
417
418 /* configure-connector reports cpus as living in the base
419 * directory of the device tree. CPUs actually live in the
420 * cpus directory so we need to fixup the full_name.
421 */
422 cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1,
423 GFP_KERNEL);
424 if (!cpu_name) {
425 dlpar_free_cc_nodes(dn);
426 rc = -ENOMEM;
427 goto out;
428 }
429
430 sprintf(cpu_name, "/cpus%s", dn->full_name);
431 kfree(dn->full_name);
432 dn->full_name = cpu_name;
433
434 rc = dlpar_acquire_drc(drc_index);
435 if (rc) {
436 dlpar_free_cc_nodes(dn);
437 rc = -EINVAL;
438 goto out;
439 }
440
441 rc = dlpar_attach_node(dn);
442 if (rc) {
443 dlpar_release_drc(drc_index);
444 dlpar_free_cc_nodes(dn);
445 }
446
447 rc = dlpar_online_cpu(dn);
448out:
449 cpu_hotplug_driver_unlock();
450
451 return rc ? rc : count;
452}
453
454static int dlpar_offline_cpu(struct device_node *dn)
455{
456 int rc = 0;
457 unsigned int cpu;
458 int len, nthreads, i;
459 const u32 *intserv;
460
461 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
462 if (!intserv)
463 return -EINVAL;
464
465 nthreads = len / sizeof(u32);
466
467 cpu_maps_update_begin();
468 for (i = 0; i < nthreads; i++) {
469 for_each_present_cpu(cpu) {
470 if (get_hard_smp_processor_id(cpu) != intserv[i])
471 continue;
472
473 if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
474 break;
475
476 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
477 cpu_maps_update_done();
478 rc = cpu_down(cpu);
479 if (rc)
480 goto out;
481 cpu_maps_update_begin();
482 break;
483
484 }
485
486 /*
487 * The cpu is in CPU_STATE_INACTIVE.
488 * Upgrade it's state to CPU_STATE_OFFLINE.
489 */
490 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
491 BUG_ON(plpar_hcall_norets(H_PROD, intserv[i])
492 != H_SUCCESS);
493 __cpu_die(cpu);
494 break;
495 }
496 if (cpu == num_possible_cpus())
497 printk(KERN_WARNING "Could not find cpu to offline "
498 "with physical id 0x%x\n", intserv[i]);
499 }
500 cpu_maps_update_done();
501
502out:
503 return rc;
504
505}
506
507static ssize_t dlpar_cpu_release(const char *buf, size_t count)
508{
509 struct device_node *dn;
510 const u32 *drc_index;
511 int rc;
512
513 dn = of_find_node_by_path(buf);
514 if (!dn)
515 return -EINVAL;
516
517 drc_index = of_get_property(dn, "ibm,my-drc-index", NULL);
518 if (!drc_index) {
519 of_node_put(dn);
520 return -EINVAL;
521 }
522
523 cpu_hotplug_driver_lock();
524 rc = dlpar_offline_cpu(dn);
525 if (rc) {
526 of_node_put(dn);
527 rc = -EINVAL;
528 goto out;
529 }
530
531 rc = dlpar_release_drc(*drc_index);
532 if (rc) {
533 of_node_put(dn);
534 goto out;
535 }
536
537 rc = dlpar_detach_node(dn);
538 if (rc) {
539 dlpar_acquire_drc(*drc_index);
540 goto out;
541 }
542
543 of_node_put(dn);
544out:
545 cpu_hotplug_driver_unlock();
546 return rc ? rc : count;
547}
548
549static int __init pseries_dlpar_init(void)
550{
551 ppc_md.cpu_probe = dlpar_cpu_probe;
552 ppc_md.cpu_release = dlpar_cpu_release;
553
554 return 0;
555}
556machine_device_initcall(pseries, pseries_dlpar_init);
557
558#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 0e8db6771252..ef8e45448480 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -63,22 +63,6 @@ static void print_device_node_tree(struct pci_dn *pdn, int dent)
63} 63}
64#endif 64#endif
65 65
66/**
67 * irq_in_use - return true if this irq is being used
68 */
69static int irq_in_use(unsigned int irq)
70{
71 int rc = 0;
72 unsigned long flags;
73 struct irq_desc *desc = irq_desc + irq;
74
75 spin_lock_irqsave(&desc->lock, flags);
76 if (desc->action)
77 rc = 1;
78 spin_unlock_irqrestore(&desc->lock, flags);
79 return rc;
80}
81
82/** 66/**
83 * eeh_disable_irq - disable interrupt for the recovering device 67 * eeh_disable_irq - disable interrupt for the recovering device
84 */ 68 */
@@ -93,7 +77,7 @@ static void eeh_disable_irq(struct pci_dev *dev)
93 if (dev->msi_enabled || dev->msix_enabled) 77 if (dev->msi_enabled || dev->msix_enabled)
94 return; 78 return;
95 79
96 if (!irq_in_use(dev->irq)) 80 if (!irq_has_action(dev->irq))
97 return; 81 return;
98 82
99 PCI_DN(dn)->eeh_mode |= EEH_MODE_IRQ_DISABLED; 83 PCI_DN(dn)->eeh_mode |= EEH_MODE_IRQ_DISABLED;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index ebff6d9a4e39..6ea4698d9176 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -30,6 +30,7 @@
30#include <asm/pSeries_reconfig.h> 30#include <asm/pSeries_reconfig.h>
31#include "xics.h" 31#include "xics.h"
32#include "plpar_wrappers.h" 32#include "plpar_wrappers.h"
33#include "offline_states.h"
33 34
34/* This version can't take the spinlock, because it never returns */ 35/* This version can't take the spinlock, because it never returns */
35static struct rtas_args rtas_stop_self_args = { 36static struct rtas_args rtas_stop_self_args = {
@@ -39,6 +40,55 @@ static struct rtas_args rtas_stop_self_args = {
39 .rets = &rtas_stop_self_args.args[0], 40 .rets = &rtas_stop_self_args.args[0],
40}; 41};
41 42
43static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
44 CPU_STATE_OFFLINE;
45static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE;
46
47static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE;
48
49static int cede_offline_enabled __read_mostly = 1;
50
51/*
52 * Enable/disable cede_offline when available.
53 */
54static int __init setup_cede_offline(char *str)
55{
56 if (!strcmp(str, "off"))
57 cede_offline_enabled = 0;
58 else if (!strcmp(str, "on"))
59 cede_offline_enabled = 1;
60 else
61 return 0;
62 return 1;
63}
64
65__setup("cede_offline=", setup_cede_offline);
66
67enum cpu_state_vals get_cpu_current_state(int cpu)
68{
69 return per_cpu(current_state, cpu);
70}
71
72void set_cpu_current_state(int cpu, enum cpu_state_vals state)
73{
74 per_cpu(current_state, cpu) = state;
75}
76
77enum cpu_state_vals get_preferred_offline_state(int cpu)
78{
79 return per_cpu(preferred_offline_state, cpu);
80}
81
82void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
83{
84 per_cpu(preferred_offline_state, cpu) = state;
85}
86
87void set_default_offline_state(int cpu)
88{
89 per_cpu(preferred_offline_state, cpu) = default_offline_state;
90}
91
42static void rtas_stop_self(void) 92static void rtas_stop_self(void)
43{ 93{
44 struct rtas_args *args = &rtas_stop_self_args; 94 struct rtas_args *args = &rtas_stop_self_args;
@@ -56,11 +106,61 @@ static void rtas_stop_self(void)
56 106
57static void pseries_mach_cpu_die(void) 107static void pseries_mach_cpu_die(void)
58{ 108{
109 unsigned int cpu = smp_processor_id();
110 unsigned int hwcpu = hard_smp_processor_id();
111 u8 cede_latency_hint = 0;
112
59 local_irq_disable(); 113 local_irq_disable();
60 idle_task_exit(); 114 idle_task_exit();
61 xics_teardown_cpu(); 115 xics_teardown_cpu();
62 unregister_slb_shadow(hard_smp_processor_id(), __pa(get_slb_shadow())); 116
63 rtas_stop_self(); 117 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
118 set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
119 cede_latency_hint = 2;
120
121 get_lppaca()->idle = 1;
122 if (!get_lppaca()->shared_proc)
123 get_lppaca()->donate_dedicated_cpu = 1;
124
125 printk(KERN_INFO
126 "cpu %u (hwid %u) ceding for offline with hint %d\n",
127 cpu, hwcpu, cede_latency_hint);
128 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
129 extended_cede_processor(cede_latency_hint);
130 printk(KERN_INFO "cpu %u (hwid %u) returned from cede.\n",
131 cpu, hwcpu);
132 printk(KERN_INFO
133 "Decrementer value = %x Timebase value = %llx\n",
134 get_dec(), get_tb());
135 }
136
137 printk(KERN_INFO "cpu %u (hwid %u) got prodded to go online\n",
138 cpu, hwcpu);
139
140 if (!get_lppaca()->shared_proc)
141 get_lppaca()->donate_dedicated_cpu = 0;
142 get_lppaca()->idle = 0;
143 }
144
145 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
146 unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
147
148 /*
149 * NOTE: Calling start_secondary() here for now to
150 * start new context.
151 * However, need to do it cleanly by resetting the
152 * stack pointer.
153 */
154 start_secondary();
155
156 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
157
158 set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
159 unregister_slb_shadow(hard_smp_processor_id(),
160 __pa(get_slb_shadow()));
161 rtas_stop_self();
162 }
163
64 /* Should never get here... */ 164 /* Should never get here... */
65 BUG(); 165 BUG();
66 for(;;); 166 for(;;);
@@ -106,18 +206,43 @@ static int pseries_cpu_disable(void)
106 return 0; 206 return 0;
107} 207}
108 208
209/*
210 * pseries_cpu_die: Wait for the cpu to die.
211 * @cpu: logical processor id of the CPU whose death we're awaiting.
212 *
213 * This function is called from the context of the thread which is performing
214 * the cpu-offline. Here we wait for long enough to allow the cpu in question
215 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD
216 * notifications.
217 *
218 * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to
219 * self-destruct.
220 */
109static void pseries_cpu_die(unsigned int cpu) 221static void pseries_cpu_die(unsigned int cpu)
110{ 222{
111 int tries; 223 int tries;
112 int cpu_status; 224 int cpu_status = 1;
113 unsigned int pcpu = get_hard_smp_processor_id(cpu); 225 unsigned int pcpu = get_hard_smp_processor_id(cpu);
114 226
115 for (tries = 0; tries < 25; tries++) { 227 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
116 cpu_status = query_cpu_stopped(pcpu); 228 cpu_status = 1;
117 if (cpu_status == 0 || cpu_status == -1) 229 for (tries = 0; tries < 1000; tries++) {
118 break; 230 if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) {
119 cpu_relax(); 231 cpu_status = 0;
232 break;
233 }
234 cpu_relax();
235 }
236 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
237
238 for (tries = 0; tries < 25; tries++) {
239 cpu_status = query_cpu_stopped(pcpu);
240 if (cpu_status == 0 || cpu_status == -1)
241 break;
242 cpu_relax();
243 }
120 } 244 }
245
121 if (cpu_status != 0) { 246 if (cpu_status != 0) {
122 printk("Querying DEAD? cpu %i (%i) shows %i\n", 247 printk("Querying DEAD? cpu %i (%i) shows %i\n",
123 cpu, pcpu, cpu_status); 248 cpu, pcpu, cpu_status);
@@ -252,10 +377,41 @@ static struct notifier_block pseries_smp_nb = {
252 .notifier_call = pseries_smp_notifier, 377 .notifier_call = pseries_smp_notifier,
253}; 378};
254 379
380#define MAX_CEDE_LATENCY_LEVELS 4
381#define CEDE_LATENCY_PARAM_LENGTH 10
382#define CEDE_LATENCY_PARAM_MAX_LENGTH \
383 (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char))
384#define CEDE_LATENCY_TOKEN 45
385
386static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH];
387
388static int parse_cede_parameters(void)
389{
390 int call_status;
391
392 memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH);
393 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
394 NULL,
395 CEDE_LATENCY_TOKEN,
396 __pa(cede_parameters),
397 CEDE_LATENCY_PARAM_MAX_LENGTH);
398
399 if (call_status != 0)
400 printk(KERN_INFO "CEDE_LATENCY: \
401 %s %s Error calling get-system-parameter(0x%x)\n",
402 __FILE__, __func__, call_status);
403 else
404 printk(KERN_INFO "CEDE_LATENCY: \
405 get-system-parameter successful.\n");
406
407 return call_status;
408}
409
255static int __init pseries_cpu_hotplug_init(void) 410static int __init pseries_cpu_hotplug_init(void)
256{ 411{
257 struct device_node *np; 412 struct device_node *np;
258 const char *typep; 413 const char *typep;
414 int cpu;
259 415
260 for_each_node_by_name(np, "interrupt-controller") { 416 for_each_node_by_name(np, "interrupt-controller") {
261 typep = of_get_property(np, "compatible", NULL); 417 typep = of_get_property(np, "compatible", NULL);
@@ -283,8 +439,16 @@ static int __init pseries_cpu_hotplug_init(void)
283 smp_ops->cpu_die = pseries_cpu_die; 439 smp_ops->cpu_die = pseries_cpu_die;
284 440
285 /* Processors can be added/removed only on LPAR */ 441 /* Processors can be added/removed only on LPAR */
286 if (firmware_has_feature(FW_FEATURE_LPAR)) 442 if (firmware_has_feature(FW_FEATURE_LPAR)) {
287 pSeries_reconfig_notifier_register(&pseries_smp_nb); 443 pSeries_reconfig_notifier_register(&pseries_smp_nb);
444 cpu_maps_update_begin();
445 if (cede_offline_enabled && parse_cede_parameters() == 0) {
446 default_offline_state = CPU_STATE_INACTIVE;
447 for_each_online_cpu(cpu)
448 set_default_offline_state(cpu);
449 }
450 cpu_maps_update_done();
451 }
288 452
289 return 0; 453 return 0;
290} 454}
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
new file mode 100644
index 000000000000..22574e0d9d91
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/offline_states.h
@@ -0,0 +1,18 @@
1#ifndef _OFFLINE_STATES_H_
2#define _OFFLINE_STATES_H_
3
4/* Cpu offline states go here */
5enum cpu_state_vals {
6 CPU_STATE_OFFLINE,
7 CPU_STATE_INACTIVE,
8 CPU_STATE_ONLINE,
9 CPU_MAX_OFFLINE_STATES
10};
11
12extern enum cpu_state_vals get_cpu_current_state(int cpu);
13extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
14extern enum cpu_state_vals get_preferred_offline_state(int cpu);
15extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
16extern void set_default_offline_state(int cpu);
17extern int start_secondary(void);
18#endif
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index a24a6b2333b2..0603c91538ae 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -9,11 +9,33 @@ static inline long poll_pending(void)
9 return plpar_hcall_norets(H_POLL_PENDING); 9 return plpar_hcall_norets(H_POLL_PENDING);
10} 10}
11 11
12static inline u8 get_cede_latency_hint(void)
13{
14 return get_lppaca()->gpr5_dword.fields.cede_latency_hint;
15}
16
17static inline void set_cede_latency_hint(u8 latency_hint)
18{
19 get_lppaca()->gpr5_dword.fields.cede_latency_hint = latency_hint;
20}
21
12static inline long cede_processor(void) 22static inline long cede_processor(void)
13{ 23{
14 return plpar_hcall_norets(H_CEDE); 24 return plpar_hcall_norets(H_CEDE);
15} 25}
16 26
27static inline long extended_cede_processor(unsigned long latency_hint)
28{
29 long rc;
30 u8 old_latency_hint = get_cede_latency_hint();
31
32 set_cede_latency_hint(latency_hint);
33 rc = cede_processor();
34 set_cede_latency_hint(old_latency_hint);
35
36 return rc;
37}
38
17static inline long vpa_call(unsigned long flags, unsigned long cpu, 39static inline long vpa_call(unsigned long flags, unsigned long cpu,
18 unsigned long vpa) 40 unsigned long vpa)
19{ 41{
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 2e2bbe120b90..a2305d29bbbd 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -96,7 +96,7 @@ static struct device_node *derive_parent(const char *path)
96 return parent; 96 return parent;
97} 97}
98 98
99static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain); 99BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
100 100
101int pSeries_reconfig_notifier_register(struct notifier_block *nb) 101int pSeries_reconfig_notifier_register(struct notifier_block *nb)
102{ 102{
@@ -184,7 +184,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
184} 184}
185 185
186/* 186/*
187 * /proc/ppc64/ofdt - yucky binary interface for adding and removing 187 * /proc/powerpc/ofdt - yucky binary interface for adding and removing
188 * OF device nodes. Should be deprecated as soon as we get an 188 * OF device nodes. Should be deprecated as soon as we get an
189 * in-kernel wrapper for the RTAS ibm,configure-connector call. 189 * in-kernel wrapper for the RTAS ibm,configure-connector call.
190 */ 190 */
@@ -543,7 +543,7 @@ static const struct file_operations ofdt_fops = {
543 .write = ofdt_write 543 .write = ofdt_write
544}; 544};
545 545
546/* create /proc/ppc64/ofdt write-only by root */ 546/* create /proc/powerpc/ofdt write-only by root */
547static int proc_ppc64_create_ofdt(void) 547static int proc_ppc64_create_ofdt(void)
548{ 548{
549 struct proc_dir_entry *ent; 549 struct proc_dir_entry *ent;
@@ -551,7 +551,7 @@ static int proc_ppc64_create_ofdt(void)
551 if (!machine_is(pseries)) 551 if (!machine_is(pseries))
552 return 0; 552 return 0;
553 553
554 ent = proc_create("ppc64/ofdt", S_IWUSR, NULL, &ofdt_fops); 554 ent = proc_create("powerpc/ofdt", S_IWUSR, NULL, &ofdt_fops);
555 if (ent) 555 if (ent)
556 ent->size = 0; 556 ent->size = 0;
557 557
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index 417eca79df69..1b45c458f952 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -13,7 +13,7 @@
13 * of this data using this driver. A dump exists if the device-tree 13 * of this data using this driver. A dump exists if the device-tree
14 * /chosen/ibm,scan-log-data property exists. 14 * /chosen/ibm,scan-log-data property exists.
15 * 15 *
16 * This driver exports /proc/ppc64/scan-log-dump which can be read. 16 * This driver exports /proc/powerpc/scan-log-dump which can be read.
17 * The driver supports only sequential reads. 17 * The driver supports only sequential reads.
18 * 18 *
19 * The driver looks at a write to the driver for the single word "reset". 19 * The driver looks at a write to the driver for the single word "reset".
@@ -186,7 +186,7 @@ static int __init scanlog_init(void)
186 if (!data) 186 if (!data)
187 goto err; 187 goto err;
188 188
189 ent = proc_create_data("ppc64/rtas/scan-log-dump", S_IRUSR, NULL, 189 ent = proc_create_data("powerpc/rtas/scan-log-dump", S_IRUSR, NULL,
190 &scanlog_fops, data); 190 &scanlog_fops, data);
191 if (!ent) 191 if (!ent)
192 goto err; 192 goto err;
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 440000cc7130..8868c012268a 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -48,6 +48,7 @@
48#include "plpar_wrappers.h" 48#include "plpar_wrappers.h"
49#include "pseries.h" 49#include "pseries.h"
50#include "xics.h" 50#include "xics.h"
51#include "offline_states.h"
51 52
52 53
53/* 54/*
@@ -84,6 +85,9 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
84 /* Fixup atomic count: it exited inside IRQ handler. */ 85 /* Fixup atomic count: it exited inside IRQ handler. */
85 task_thread_info(paca[lcpu].__current)->preempt_count = 0; 86 task_thread_info(paca[lcpu].__current)->preempt_count = 0;
86 87
88 if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE)
89 goto out;
90
87 /* 91 /*
88 * If the RTAS start-cpu token does not exist then presume the 92 * If the RTAS start-cpu token does not exist then presume the
89 * cpu is already spinning. 93 * cpu is already spinning.
@@ -98,6 +102,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
98 return 0; 102 return 0;
99 } 103 }
100 104
105out:
101 return 1; 106 return 1;
102} 107}
103 108
@@ -111,12 +116,16 @@ static void __devinit smp_xics_setup_cpu(int cpu)
111 vpa_init(cpu); 116 vpa_init(cpu);
112 117
113 cpu_clear(cpu, of_spin_map); 118 cpu_clear(cpu, of_spin_map);
119 set_cpu_current_state(cpu, CPU_STATE_ONLINE);
120 set_default_offline_state(cpu);
114 121
115} 122}
116#endif /* CONFIG_XICS */ 123#endif /* CONFIG_XICS */
117 124
118static void __devinit smp_pSeries_kick_cpu(int nr) 125static void __devinit smp_pSeries_kick_cpu(int nr)
119{ 126{
127 long rc;
128 unsigned long hcpuid;
120 BUG_ON(nr < 0 || nr >= NR_CPUS); 129 BUG_ON(nr < 0 || nr >= NR_CPUS);
121 130
122 if (!smp_startup_cpu(nr)) 131 if (!smp_startup_cpu(nr))
@@ -128,6 +137,16 @@ static void __devinit smp_pSeries_kick_cpu(int nr)
128 * the processor will continue on to secondary_start 137 * the processor will continue on to secondary_start
129 */ 138 */
130 paca[nr].cpu_start = 1; 139 paca[nr].cpu_start = 1;
140
141 set_preferred_offline_state(nr, CPU_STATE_ONLINE);
142
143 if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) {
144 hcpuid = get_hard_smp_processor_id(nr);
145 rc = plpar_hcall_norets(H_PROD, hcpuid);
146 if (rc != H_SUCCESS)
147 panic("Error: Prod to wake up processor %d Ret= %ld\n",
148 nr, rc);
149 }
131} 150}
132 151
133static int smp_pSeries_cpu_bootable(unsigned int nr) 152static int smp_pSeries_cpu_bootable(unsigned int nr)
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index b9bf0eedccf2..7d01b58f3989 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -20,6 +20,7 @@
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/msi.h> 21#include <linux/msi.h>
22#include <linux/of.h> 22#include <linux/of.h>
23#include <linux/percpu.h>
23 24
24#include <asm/firmware.h> 25#include <asm/firmware.h>
25#include <asm/io.h> 26#include <asm/io.h>
@@ -46,6 +47,12 @@ static struct irq_host *xics_host;
46 */ 47 */
47#define IPI_PRIORITY 4 48#define IPI_PRIORITY 4
48 49
50/* The least favored priority */
51#define LOWEST_PRIORITY 0xFF
52
53/* The number of priorities defined above */
54#define MAX_NUM_PRIORITIES 3
55
49static unsigned int default_server = 0xFF; 56static unsigned int default_server = 0xFF;
50static unsigned int default_distrib_server = 0; 57static unsigned int default_distrib_server = 0;
51static unsigned int interrupt_server_size = 8; 58static unsigned int interrupt_server_size = 8;
@@ -56,6 +63,12 @@ static int ibm_set_xive;
56static int ibm_int_on; 63static int ibm_int_on;
57static int ibm_int_off; 64static int ibm_int_off;
58 65
66struct xics_cppr {
67 unsigned char stack[MAX_NUM_PRIORITIES];
68 int index;
69};
70
71static DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
59 72
60/* Direct hardware low level accessors */ 73/* Direct hardware low level accessors */
61 74
@@ -157,7 +170,7 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
157 cpumask_t cpumask; 170 cpumask_t cpumask;
158 cpumask_t tmp = CPU_MASK_NONE; 171 cpumask_t tmp = CPU_MASK_NONE;
159 172
160 cpumask_copy(&cpumask, irq_desc[virq].affinity); 173 cpumask_copy(&cpumask, irq_to_desc(virq)->affinity);
161 if (!distribute_irqs) 174 if (!distribute_irqs)
162 return default_server; 175 return default_server;
163 176
@@ -284,6 +297,19 @@ static inline unsigned int xics_xirr_vector(unsigned int xirr)
284 return xirr & 0x00ffffff; 297 return xirr & 0x00ffffff;
285} 298}
286 299
300static void push_cppr(unsigned int vec)
301{
302 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
303
304 if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
305 return;
306
307 if (vec == XICS_IPI)
308 os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
309 else
310 os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
311}
312
287static unsigned int xics_get_irq_direct(void) 313static unsigned int xics_get_irq_direct(void)
288{ 314{
289 unsigned int xirr = direct_xirr_info_get(); 315 unsigned int xirr = direct_xirr_info_get();
@@ -294,8 +320,10 @@ static unsigned int xics_get_irq_direct(void)
294 return NO_IRQ; 320 return NO_IRQ;
295 321
296 irq = irq_radix_revmap_lookup(xics_host, vec); 322 irq = irq_radix_revmap_lookup(xics_host, vec);
297 if (likely(irq != NO_IRQ)) 323 if (likely(irq != NO_IRQ)) {
324 push_cppr(vec);
298 return irq; 325 return irq;
326 }
299 327
300 /* We don't have a linux mapping, so have rtas mask it. */ 328 /* We don't have a linux mapping, so have rtas mask it. */
301 xics_mask_unknown_vec(vec); 329 xics_mask_unknown_vec(vec);
@@ -315,8 +343,10 @@ static unsigned int xics_get_irq_lpar(void)
315 return NO_IRQ; 343 return NO_IRQ;
316 344
317 irq = irq_radix_revmap_lookup(xics_host, vec); 345 irq = irq_radix_revmap_lookup(xics_host, vec);
318 if (likely(irq != NO_IRQ)) 346 if (likely(irq != NO_IRQ)) {
347 push_cppr(vec);
319 return irq; 348 return irq;
349 }
320 350
321 /* We don't have a linux mapping, so have RTAS mask it. */ 351 /* We don't have a linux mapping, so have RTAS mask it. */
322 xics_mask_unknown_vec(vec); 352 xics_mask_unknown_vec(vec);
@@ -326,12 +356,22 @@ static unsigned int xics_get_irq_lpar(void)
326 return NO_IRQ; 356 return NO_IRQ;
327} 357}
328 358
359static unsigned char pop_cppr(void)
360{
361 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
362
363 if (WARN_ON(os_cppr->index < 1))
364 return LOWEST_PRIORITY;
365
366 return os_cppr->stack[--os_cppr->index];
367}
368
329static void xics_eoi_direct(unsigned int virq) 369static void xics_eoi_direct(unsigned int virq)
330{ 370{
331 unsigned int irq = (unsigned int)irq_map[virq].hwirq; 371 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
332 372
333 iosync(); 373 iosync();
334 direct_xirr_info_set((0xff << 24) | irq); 374 direct_xirr_info_set((pop_cppr() << 24) | irq);
335} 375}
336 376
337static void xics_eoi_lpar(unsigned int virq) 377static void xics_eoi_lpar(unsigned int virq)
@@ -339,7 +379,7 @@ static void xics_eoi_lpar(unsigned int virq)
339 unsigned int irq = (unsigned int)irq_map[virq].hwirq; 379 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
340 380
341 iosync(); 381 iosync();
342 lpar_xirr_info_set((0xff << 24) | irq); 382 lpar_xirr_info_set((pop_cppr() << 24) | irq);
343} 383}
344 384
345static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) 385static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
@@ -388,7 +428,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
388} 428}
389 429
390static struct irq_chip xics_pic_direct = { 430static struct irq_chip xics_pic_direct = {
391 .typename = " XICS ", 431 .name = " XICS ",
392 .startup = xics_startup, 432 .startup = xics_startup,
393 .mask = xics_mask_irq, 433 .mask = xics_mask_irq,
394 .unmask = xics_unmask_irq, 434 .unmask = xics_unmask_irq,
@@ -397,7 +437,7 @@ static struct irq_chip xics_pic_direct = {
397}; 437};
398 438
399static struct irq_chip xics_pic_lpar = { 439static struct irq_chip xics_pic_lpar = {
400 .typename = " XICS ", 440 .name = " XICS ",
401 .startup = xics_startup, 441 .startup = xics_startup,
402 .mask = xics_mask_irq, 442 .mask = xics_mask_irq,
403 .unmask = xics_unmask_irq, 443 .unmask = xics_unmask_irq,
@@ -428,13 +468,13 @@ static int xics_host_map(struct irq_host *h, unsigned int virq,
428 /* Insert the interrupt mapping into the radix tree for fast lookup */ 468 /* Insert the interrupt mapping into the radix tree for fast lookup */
429 irq_radix_revmap_insert(xics_host, virq, hw); 469 irq_radix_revmap_insert(xics_host, virq, hw);
430 470
431 get_irq_desc(virq)->status |= IRQ_LEVEL; 471 irq_to_desc(virq)->status |= IRQ_LEVEL;
432 set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); 472 set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq);
433 return 0; 473 return 0;
434} 474}
435 475
436static int xics_host_xlate(struct irq_host *h, struct device_node *ct, 476static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
437 u32 *intspec, unsigned int intsize, 477 const u32 *intspec, unsigned int intsize,
438 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 478 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
439 479
440{ 480{
@@ -746,6 +786,12 @@ void __init xics_init_IRQ(void)
746 786
747static void xics_set_cpu_priority(unsigned char cppr) 787static void xics_set_cpu_priority(unsigned char cppr)
748{ 788{
789 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
790
791 BUG_ON(os_cppr->index != 0);
792
793 os_cppr->stack[os_cppr->index] = cppr;
794
749 if (firmware_has_feature(FW_FEATURE_LPAR)) 795 if (firmware_has_feature(FW_FEATURE_LPAR))
750 lpar_cppr_info(cppr); 796 lpar_cppr_info(cppr);
751 else 797 else
@@ -772,7 +818,7 @@ static void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
772 818
773void xics_setup_cpu(void) 819void xics_setup_cpu(void)
774{ 820{
775 xics_set_cpu_priority(0xff); 821 xics_set_cpu_priority(LOWEST_PRIORITY);
776 822
777 xics_set_cpu_giq(default_distrib_server, 1); 823 xics_set_cpu_giq(default_distrib_server, 1);
778} 824}
@@ -852,7 +898,7 @@ void xics_migrate_irqs_away(void)
852 /* We need to get IPIs still. */ 898 /* We need to get IPIs still. */
853 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) 899 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
854 continue; 900 continue;
855 desc = get_irq_desc(virq); 901 desc = irq_to_desc(virq);
856 902
857 /* We only need to migrate enabled IRQS */ 903 /* We only need to migrate enabled IRQS */
858 if (desc == NULL || desc->chip == NULL 904 if (desc == NULL || desc->chip == NULL
@@ -881,7 +927,7 @@ void xics_migrate_irqs_away(void)
881 virq, cpu); 927 virq, cpu);
882 928
883 /* Reset affinity to all cpus */ 929 /* Reset affinity to all cpus */
884 cpumask_setall(irq_desc[virq].affinity); 930 cpumask_setall(irq_to_desc(virq)->affinity);
885 desc->chip->set_affinity(virq, cpu_all_mask); 931 desc->chip->set_affinity(virq, cpu_all_mask);
886unlock: 932unlock:
887 spin_unlock_irqrestore(&desc->lock, flags); 933 spin_unlock_irqrestore(&desc->lock, flags);
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 9d4b17462f13..5642924fb9fb 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_U3_DART) += dart_iommu.o
16obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o 16obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
17obj-$(CONFIG_FSL_SOC) += fsl_soc.o 17obj-$(CONFIG_FSL_SOC) += fsl_soc.o
18obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y) 18obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y)
19obj-$(CONFIG_FSL_PMC) += fsl_pmc.o
19obj-$(CONFIG_FSL_LBC) += fsl_lbc.o 20obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
20obj-$(CONFIG_FSL_GTM) += fsl_gtm.o 21obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
21obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o 22obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 82424cd7e128..a4b41dbde128 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -77,7 +77,7 @@ static void cpm_end_irq(unsigned int irq)
77} 77}
78 78
79static struct irq_chip cpm_pic = { 79static struct irq_chip cpm_pic = {
80 .typename = " CPM PIC ", 80 .name = " CPM PIC ",
81 .mask = cpm_mask_irq, 81 .mask = cpm_mask_irq,
82 .unmask = cpm_unmask_irq, 82 .unmask = cpm_unmask_irq,
83 .eoi = cpm_end_irq, 83 .eoi = cpm_end_irq,
@@ -102,7 +102,7 @@ static int cpm_pic_host_map(struct irq_host *h, unsigned int virq,
102{ 102{
103 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); 103 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
104 104
105 get_irq_desc(virq)->status |= IRQ_LEVEL; 105 irq_to_desc(virq)->status |= IRQ_LEVEL;
106 set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); 106 set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
107 return 0; 107 return 0;
108} 108}
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index 78f1f7cca0a0..971483f0dfac 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -115,11 +115,13 @@ static void cpm2_ack(unsigned int virq)
115 115
116static void cpm2_end_irq(unsigned int virq) 116static void cpm2_end_irq(unsigned int virq)
117{ 117{
118 struct irq_desc *desc;
118 int bit, word; 119 int bit, word;
119 unsigned int irq_nr = virq_to_hw(virq); 120 unsigned int irq_nr = virq_to_hw(virq);
120 121
121 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS)) 122 desc = irq_to_desc(irq_nr);
122 && irq_desc[irq_nr].action) { 123 if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))
124 && desc->action) {
123 125
124 bit = irq_to_siubit[irq_nr]; 126 bit = irq_to_siubit[irq_nr];
125 word = irq_to_siureg[irq_nr]; 127 word = irq_to_siureg[irq_nr];
@@ -138,7 +140,7 @@ static void cpm2_end_irq(unsigned int virq)
138static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type) 140static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type)
139{ 141{
140 unsigned int src = virq_to_hw(virq); 142 unsigned int src = virq_to_hw(virq);
141 struct irq_desc *desc = get_irq_desc(virq); 143 struct irq_desc *desc = irq_to_desc(virq);
142 unsigned int vold, vnew, edibit; 144 unsigned int vold, vnew, edibit;
143 145
144 if (flow_type == IRQ_TYPE_NONE) 146 if (flow_type == IRQ_TYPE_NONE)
@@ -182,7 +184,7 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type)
182} 184}
183 185
184static struct irq_chip cpm2_pic = { 186static struct irq_chip cpm2_pic = {
185 .typename = " CPM2 SIU ", 187 .name = " CPM2 SIU ",
186 .mask = cpm2_mask_irq, 188 .mask = cpm2_mask_irq,
187 .unmask = cpm2_unmask_irq, 189 .unmask = cpm2_unmask_irq,
188 .ack = cpm2_ack, 190 .ack = cpm2_ack,
@@ -210,13 +212,13 @@ static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq,
210{ 212{
211 pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); 213 pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
212 214
213 get_irq_desc(virq)->status |= IRQ_LEVEL; 215 irq_to_desc(virq)->status |= IRQ_LEVEL;
214 set_irq_chip_and_handler(virq, &cpm2_pic, handle_level_irq); 216 set_irq_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
215 return 0; 217 return 0;
216} 218}
217 219
218static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct, 220static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct,
219 u32 *intspec, unsigned int intsize, 221 const u32 *intspec, unsigned int intsize,
220 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 222 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
221{ 223{
222 *out_hwirq = intspec[0]; 224 *out_hwirq = intspec[0];
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index e4b6d66d93de..9de72c96e6d1 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -72,7 +72,7 @@ static phys_addr_t muram_pbase;
72/* Max address size we deal with */ 72/* Max address size we deal with */
73#define OF_MAX_ADDR_CELLS 4 73#define OF_MAX_ADDR_CELLS 4
74 74
75int __init cpm_muram_init(void) 75int cpm_muram_init(void)
76{ 76{
77 struct device_node *np; 77 struct device_node *np;
78 struct resource r; 78 struct resource r;
@@ -81,6 +81,9 @@ int __init cpm_muram_init(void)
81 int i = 0; 81 int i = 0;
82 int ret = 0; 82 int ret = 0;
83 83
84 if (muram_pbase)
85 return 0;
86
84 spin_lock_init(&cpm_muram_lock); 87 spin_lock_init(&cpm_muram_lock);
85 /* initialize the info header */ 88 /* initialize the info header */
86 rh_init(&cpm_muram_info, 1, 89 rh_init(&cpm_muram_info, 1,
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index da38a1ff97bb..62e50258cdef 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -47,7 +47,7 @@ static struct irq_chip fsl_msi_chip = {
47 .mask = mask_msi_irq, 47 .mask = mask_msi_irq,
48 .unmask = unmask_msi_irq, 48 .unmask = unmask_msi_irq,
49 .ack = fsl_msi_end_irq, 49 .ack = fsl_msi_end_irq,
50 .typename = " FSL-MSI ", 50 .name = " FSL-MSI ",
51}; 51};
52 52
53static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, 53static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
@@ -55,7 +55,7 @@ static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
55{ 55{
56 struct irq_chip *chip = &fsl_msi_chip; 56 struct irq_chip *chip = &fsl_msi_chip;
57 57
58 get_irq_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING; 58 irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING;
59 59
60 set_irq_chip_and_handler(virq, chip, handle_edge_irq); 60 set_irq_chip_and_handler(virq, chip, handle_edge_irq);
61 61
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index ae88b1448018..4e3a3e345ab3 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -56,7 +56,7 @@ static int __init fsl_pcie_check_link(struct pci_controller *hose)
56 return 0; 56 return 0;
57} 57}
58 58
59#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx) 59#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
60static int __init setup_one_atmu(struct ccsr_pci __iomem *pci, 60static int __init setup_one_atmu(struct ccsr_pci __iomem *pci,
61 unsigned int index, const struct resource *res, 61 unsigned int index, const struct resource *res,
62 resource_size_t offset) 62 resource_size_t offset)
@@ -392,9 +392,23 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536, quirk_fsl_pcie_header);
392DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header); 392DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header);
393DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header); 393DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header);
394DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header); 394DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header);
395DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header);
396DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header);
397DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header);
398DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
399DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
400DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
401DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
402DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
403DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
404DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header);
395DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header); 405DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header);
396DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header); 406DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header);
397#endif /* CONFIG_PPC_85xx || CONFIG_PPC_86xx */ 407DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header);
408DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header);
409DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header);
410DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header);
411#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
398 412
399#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) 413#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
400DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314E, quirk_fsl_pcie_header); 414DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314E, quirk_fsl_pcie_header);
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
new file mode 100644
index 000000000000..a7635a993dca
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -0,0 +1,88 @@
1/*
2 * Suspend/resume support
3 *
4 * Copyright 2009 MontaVista Software, Inc.
5 *
6 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/init.h>
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/suspend.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/of_platform.h>
21
22struct pmc_regs {
23 __be32 devdisr;
24 __be32 devdisr2;
25 __be32 :32;
26 __be32 :32;
27 __be32 pmcsr;
28#define PMCSR_SLP (1 << 17)
29};
30
31static struct device *pmc_dev;
32static struct pmc_regs __iomem *pmc_regs;
33
34static int pmc_suspend_enter(suspend_state_t state)
35{
36 int ret;
37
38 setbits32(&pmc_regs->pmcsr, PMCSR_SLP);
39 /* At this point, the CPU is asleep. */
40
41 /* Upon resume, wait for SLP bit to be clear. */
42 ret = spin_event_timeout((in_be32(&pmc_regs->pmcsr) & PMCSR_SLP) == 0,
43 10000, 10) ? 0 : -ETIMEDOUT;
44 if (ret)
45 dev_err(pmc_dev, "tired waiting for SLP bit to clear\n");
46 return ret;
47}
48
49static int pmc_suspend_valid(suspend_state_t state)
50{
51 if (state != PM_SUSPEND_STANDBY)
52 return 0;
53 return 1;
54}
55
56static struct platform_suspend_ops pmc_suspend_ops = {
57 .valid = pmc_suspend_valid,
58 .enter = pmc_suspend_enter,
59};
60
61static int pmc_probe(struct of_device *ofdev, const struct of_device_id *id)
62{
63 pmc_regs = of_iomap(ofdev->node, 0);
64 if (!pmc_regs)
65 return -ENOMEM;
66
67 pmc_dev = &ofdev->dev;
68 suspend_set_ops(&pmc_suspend_ops);
69 return 0;
70}
71
72static const struct of_device_id pmc_ids[] = {
73 { .compatible = "fsl,mpc8548-pmc", },
74 { .compatible = "fsl,mpc8641d-pmc", },
75 { },
76};
77
78static struct of_platform_driver pmc_driver = {
79 .driver.name = "fsl-pmc",
80 .match_table = pmc_ids,
81 .probe = pmc_probe,
82};
83
84static int __init pmc_init(void)
85{
86 return of_register_platform_driver(&pmc_driver);
87}
88device_initcall(pmc_init);
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index adca4affcf1f..b91f7acdda6f 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -372,7 +372,7 @@ err:
372 372
373arch_initcall(fsl_usb_of_init); 373arch_initcall(fsl_usb_of_init);
374 374
375#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx) 375#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
376static __be32 __iomem *rstcr; 376static __be32 __iomem *rstcr;
377 377
378static int __init setup_rstcr(void) 378static int __init setup_rstcr(void)
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index a96584ab33dd..0a55db8a5a29 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -135,7 +135,7 @@ static void i8259_unmask_irq(unsigned int irq_nr)
135} 135}
136 136
137static struct irq_chip i8259_pic = { 137static struct irq_chip i8259_pic = {
138 .typename = " i8259 ", 138 .name = " i8259 ",
139 .mask = i8259_mask_irq, 139 .mask = i8259_mask_irq,
140 .disable = i8259_mask_irq, 140 .disable = i8259_mask_irq,
141 .unmask = i8259_unmask_irq, 141 .unmask = i8259_unmask_irq,
@@ -175,12 +175,12 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,
175 175
176 /* We block the internal cascade */ 176 /* We block the internal cascade */
177 if (hw == 2) 177 if (hw == 2)
178 get_irq_desc(virq)->status |= IRQ_NOREQUEST; 178 irq_to_desc(virq)->status |= IRQ_NOREQUEST;
179 179
180 /* We use the level handler only for now, we might want to 180 /* We use the level handler only for now, we might want to
181 * be more cautious here but that works for now 181 * be more cautious here but that works for now
182 */ 182 */
183 get_irq_desc(virq)->status |= IRQ_LEVEL; 183 irq_to_desc(virq)->status |= IRQ_LEVEL;
184 set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq); 184 set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq);
185 return 0; 185 return 0;
186} 186}
@@ -198,7 +198,7 @@ static void i8259_host_unmap(struct irq_host *h, unsigned int virq)
198} 198}
199 199
200static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, 200static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
201 u32 *intspec, unsigned int intsize, 201 const u32 *intspec, unsigned int intsize,
202 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 202 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
203{ 203{
204 static unsigned char map_isa_senses[4] = { 204 static unsigned char map_isa_senses[4] = {
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index cb7689c4bfbd..28cdddd2f89e 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -605,7 +605,7 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
605{ 605{
606 struct ipic *ipic = ipic_from_irq(virq); 606 struct ipic *ipic = ipic_from_irq(virq);
607 unsigned int src = ipic_irq_to_hw(virq); 607 unsigned int src = ipic_irq_to_hw(virq);
608 struct irq_desc *desc = get_irq_desc(virq); 608 struct irq_desc *desc = irq_to_desc(virq);
609 unsigned int vold, vnew, edibit; 609 unsigned int vold, vnew, edibit;
610 610
611 if (flow_type == IRQ_TYPE_NONE) 611 if (flow_type == IRQ_TYPE_NONE)
@@ -660,7 +660,7 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
660 660
661/* level interrupts and edge interrupts have different ack operations */ 661/* level interrupts and edge interrupts have different ack operations */
662static struct irq_chip ipic_level_irq_chip = { 662static struct irq_chip ipic_level_irq_chip = {
663 .typename = " IPIC ", 663 .name = " IPIC ",
664 .unmask = ipic_unmask_irq, 664 .unmask = ipic_unmask_irq,
665 .mask = ipic_mask_irq, 665 .mask = ipic_mask_irq,
666 .mask_ack = ipic_mask_irq, 666 .mask_ack = ipic_mask_irq,
@@ -668,7 +668,7 @@ static struct irq_chip ipic_level_irq_chip = {
668}; 668};
669 669
670static struct irq_chip ipic_edge_irq_chip = { 670static struct irq_chip ipic_edge_irq_chip = {
671 .typename = " IPIC ", 671 .name = " IPIC ",
672 .unmask = ipic_unmask_irq, 672 .unmask = ipic_unmask_irq,
673 .mask = ipic_mask_irq, 673 .mask = ipic_mask_irq,
674 .mask_ack = ipic_mask_irq_and_ack, 674 .mask_ack = ipic_mask_irq_and_ack,
@@ -697,7 +697,7 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq,
697} 697}
698 698
699static int ipic_host_xlate(struct irq_host *h, struct device_node *ct, 699static int ipic_host_xlate(struct irq_host *h, struct device_node *ct,
700 u32 *intspec, unsigned int intsize, 700 const u32 *intspec, unsigned int intsize,
701 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 701 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
702 702
703{ 703{
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 5d2d5522ef41..69bd6f4dff83 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -72,7 +72,7 @@ static void mpc8xx_end_irq(unsigned int virq)
72 72
73static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type) 73static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type)
74{ 74{
75 struct irq_desc *desc = get_irq_desc(virq); 75 struct irq_desc *desc = irq_to_desc(virq);
76 76
77 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 77 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
78 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; 78 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
@@ -94,7 +94,7 @@ static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type)
94} 94}
95 95
96static struct irq_chip mpc8xx_pic = { 96static struct irq_chip mpc8xx_pic = {
97 .typename = " MPC8XX SIU ", 97 .name = " MPC8XX SIU ",
98 .unmask = mpc8xx_unmask_irq, 98 .unmask = mpc8xx_unmask_irq,
99 .mask = mpc8xx_mask_irq, 99 .mask = mpc8xx_mask_irq,
100 .ack = mpc8xx_ack, 100 .ack = mpc8xx_ack,
@@ -130,7 +130,7 @@ static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq,
130 130
131 131
132static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct, 132static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct,
133 u32 *intspec, unsigned int intsize, 133 const u32 *intspec, unsigned int intsize,
134 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 134 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
135{ 135{
136 static unsigned char map_pic_senses[4] = { 136 static unsigned char map_pic_senses[4] = {
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 30c44e6b0413..aa9d06e5925b 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -572,7 +572,7 @@ static int irq_choose_cpu(unsigned int virt_irq)
572 cpumask_t mask; 572 cpumask_t mask;
573 int cpuid; 573 int cpuid;
574 574
575 cpumask_copy(&mask, irq_desc[virt_irq].affinity); 575 cpumask_copy(&mask, irq_to_desc(virt_irq)->affinity);
576 if (cpus_equal(mask, CPU_MASK_ALL)) { 576 if (cpus_equal(mask, CPU_MASK_ALL)) {
577 static int irq_rover; 577 static int irq_rover;
578 static DEFINE_SPINLOCK(irq_rover_lock); 578 static DEFINE_SPINLOCK(irq_rover_lock);
@@ -621,7 +621,7 @@ static struct mpic *mpic_find(unsigned int irq)
621 if (irq < NUM_ISA_INTERRUPTS) 621 if (irq < NUM_ISA_INTERRUPTS)
622 return NULL; 622 return NULL;
623 623
624 return irq_desc[irq].chip_data; 624 return irq_to_desc(irq)->chip_data;
625} 625}
626 626
627/* Determine if the linux irq is an IPI */ 627/* Determine if the linux irq is an IPI */
@@ -648,14 +648,14 @@ static inline u32 mpic_physmask(u32 cpumask)
648/* Get the mpic structure from the IPI number */ 648/* Get the mpic structure from the IPI number */
649static inline struct mpic * mpic_from_ipi(unsigned int ipi) 649static inline struct mpic * mpic_from_ipi(unsigned int ipi)
650{ 650{
651 return irq_desc[ipi].chip_data; 651 return irq_to_desc(ipi)->chip_data;
652} 652}
653#endif 653#endif
654 654
655/* Get the mpic structure from the irq number */ 655/* Get the mpic structure from the irq number */
656static inline struct mpic * mpic_from_irq(unsigned int irq) 656static inline struct mpic * mpic_from_irq(unsigned int irq)
657{ 657{
658 return irq_desc[irq].chip_data; 658 return irq_to_desc(irq)->chip_data;
659} 659}
660 660
661/* Send an EOI */ 661/* Send an EOI */
@@ -735,7 +735,7 @@ static void mpic_unmask_ht_irq(unsigned int irq)
735 735
736 mpic_unmask_irq(irq); 736 mpic_unmask_irq(irq);
737 737
738 if (irq_desc[irq].status & IRQ_LEVEL) 738 if (irq_to_desc(irq)->status & IRQ_LEVEL)
739 mpic_ht_end_irq(mpic, src); 739 mpic_ht_end_irq(mpic, src);
740} 740}
741 741
@@ -745,7 +745,7 @@ static unsigned int mpic_startup_ht_irq(unsigned int irq)
745 unsigned int src = mpic_irq_to_hw(irq); 745 unsigned int src = mpic_irq_to_hw(irq);
746 746
747 mpic_unmask_irq(irq); 747 mpic_unmask_irq(irq);
748 mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status); 748 mpic_startup_ht_interrupt(mpic, src, irq_to_desc(irq)->status);
749 749
750 return 0; 750 return 0;
751} 751}
@@ -755,7 +755,7 @@ static void mpic_shutdown_ht_irq(unsigned int irq)
755 struct mpic *mpic = mpic_from_irq(irq); 755 struct mpic *mpic = mpic_from_irq(irq);
756 unsigned int src = mpic_irq_to_hw(irq); 756 unsigned int src = mpic_irq_to_hw(irq);
757 757
758 mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status); 758 mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(irq)->status);
759 mpic_mask_irq(irq); 759 mpic_mask_irq(irq);
760} 760}
761 761
@@ -772,7 +772,7 @@ static void mpic_end_ht_irq(unsigned int irq)
772 * latched another edge interrupt coming in anyway 772 * latched another edge interrupt coming in anyway
773 */ 773 */
774 774
775 if (irq_desc[irq].status & IRQ_LEVEL) 775 if (irq_to_desc(irq)->status & IRQ_LEVEL)
776 mpic_ht_end_irq(mpic, src); 776 mpic_ht_end_irq(mpic, src);
777 mpic_eoi(mpic); 777 mpic_eoi(mpic);
778} 778}
@@ -856,7 +856,7 @@ int mpic_set_irq_type(unsigned int virq, unsigned int flow_type)
856{ 856{
857 struct mpic *mpic = mpic_from_irq(virq); 857 struct mpic *mpic = mpic_from_irq(virq);
858 unsigned int src = mpic_irq_to_hw(virq); 858 unsigned int src = mpic_irq_to_hw(virq);
859 struct irq_desc *desc = get_irq_desc(virq); 859 struct irq_desc *desc = irq_to_desc(virq);
860 unsigned int vecpri, vold, vnew; 860 unsigned int vecpri, vold, vnew;
861 861
862 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", 862 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
@@ -994,7 +994,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
994} 994}
995 995
996static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, 996static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
997 u32 *intspec, unsigned int intsize, 997 const u32 *intspec, unsigned int intsize,
998 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 998 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
999 999
1000{ 1000{
@@ -1062,19 +1062,19 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1062 mpic->name = name; 1062 mpic->name = name;
1063 1063
1064 mpic->hc_irq = mpic_irq_chip; 1064 mpic->hc_irq = mpic_irq_chip;
1065 mpic->hc_irq.typename = name; 1065 mpic->hc_irq.name = name;
1066 if (flags & MPIC_PRIMARY) 1066 if (flags & MPIC_PRIMARY)
1067 mpic->hc_irq.set_affinity = mpic_set_affinity; 1067 mpic->hc_irq.set_affinity = mpic_set_affinity;
1068#ifdef CONFIG_MPIC_U3_HT_IRQS 1068#ifdef CONFIG_MPIC_U3_HT_IRQS
1069 mpic->hc_ht_irq = mpic_irq_ht_chip; 1069 mpic->hc_ht_irq = mpic_irq_ht_chip;
1070 mpic->hc_ht_irq.typename = name; 1070 mpic->hc_ht_irq.name = name;
1071 if (flags & MPIC_PRIMARY) 1071 if (flags & MPIC_PRIMARY)
1072 mpic->hc_ht_irq.set_affinity = mpic_set_affinity; 1072 mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
1073#endif /* CONFIG_MPIC_U3_HT_IRQS */ 1073#endif /* CONFIG_MPIC_U3_HT_IRQS */
1074 1074
1075#ifdef CONFIG_SMP 1075#ifdef CONFIG_SMP
1076 mpic->hc_ipi = mpic_ipi_chip; 1076 mpic->hc_ipi = mpic_ipi_chip;
1077 mpic->hc_ipi.typename = name; 1077 mpic->hc_ipi.name = name;
1078#endif /* CONFIG_SMP */ 1078#endif /* CONFIG_SMP */
1079 1079
1080 mpic->flags = flags; 1080 mpic->flags = flags;
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 656cb772b691..0f6ab06f8474 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -60,7 +60,7 @@ static struct irq_chip mpic_pasemi_msi_chip = {
60 .eoi = mpic_end_irq, 60 .eoi = mpic_end_irq,
61 .set_type = mpic_set_irq_type, 61 .set_type = mpic_set_irq_type,
62 .set_affinity = mpic_set_affinity, 62 .set_affinity = mpic_set_affinity,
63 .typename = "PASEMI-MSI ", 63 .name = "PASEMI-MSI ",
64}; 64};
65 65
66static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type) 66static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 0a8f5a9e87c9..d3caf23e6312 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -42,7 +42,7 @@ static struct irq_chip mpic_u3msi_chip = {
42 .eoi = mpic_end_irq, 42 .eoi = mpic_end_irq,
43 .set_type = mpic_set_irq_type, 43 .set_type = mpic_set_irq_type,
44 .set_affinity = mpic_set_affinity, 44 .set_affinity = mpic_set_affinity,
45 .typename = "MPIC-U3MSI", 45 .name = "MPIC-U3MSI",
46}; 46};
47 47
48static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos) 48static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos)
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index 2aa4ed066db1..485b92477d7c 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -213,7 +213,7 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq,
213{ 213{
214 int level1; 214 int level1;
215 215
216 get_irq_desc(virq)->status |= IRQ_LEVEL; 216 irq_to_desc(virq)->status |= IRQ_LEVEL;
217 217
218 level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; 218 level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
219 BUG_ON(level1 > MV64x60_LEVEL1_GPP); 219 BUG_ON(level1 > MV64x60_LEVEL1_GPP);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 464271bea6c9..149393c02c3f 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -27,6 +27,8 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/ioport.h> 28#include <linux/ioport.h>
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/mod_devicetable.h>
31#include <linux/of_platform.h>
30#include <asm/irq.h> 32#include <asm/irq.h>
31#include <asm/page.h> 33#include <asm/page.h>
32#include <asm/pgtable.h> 34#include <asm/pgtable.h>
@@ -65,19 +67,6 @@ static unsigned int qe_num_of_snum;
65 67
66static phys_addr_t qebase = -1; 68static phys_addr_t qebase = -1;
67 69
68int qe_alive_during_sleep(void)
69{
70 static int ret = -1;
71
72 if (ret != -1)
73 return ret;
74
75 ret = !of_find_compatible_node(NULL, NULL, "fsl,mpc8569-pmc");
76
77 return ret;
78}
79EXPORT_SYMBOL(qe_alive_during_sleep);
80
81phys_addr_t get_qe_base(void) 70phys_addr_t get_qe_base(void)
82{ 71{
83 struct device_node *qe; 72 struct device_node *qe;
@@ -104,7 +93,7 @@ phys_addr_t get_qe_base(void)
104 93
105EXPORT_SYMBOL(get_qe_base); 94EXPORT_SYMBOL(get_qe_base);
106 95
107void __init qe_reset(void) 96void qe_reset(void)
108{ 97{
109 if (qe_immr == NULL) 98 if (qe_immr == NULL)
110 qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE); 99 qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
@@ -330,16 +319,18 @@ EXPORT_SYMBOL(qe_put_snum);
330static int qe_sdma_init(void) 319static int qe_sdma_init(void)
331{ 320{
332 struct sdma __iomem *sdma = &qe_immr->sdma; 321 struct sdma __iomem *sdma = &qe_immr->sdma;
333 unsigned long sdma_buf_offset; 322 static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
334 323
335 if (!sdma) 324 if (!sdma)
336 return -ENODEV; 325 return -ENODEV;
337 326
338 /* allocate 2 internal temporary buffers (512 bytes size each) for 327 /* allocate 2 internal temporary buffers (512 bytes size each) for
339 * the SDMA */ 328 * the SDMA */
340 sdma_buf_offset = qe_muram_alloc(512 * 2, 4096); 329 if (IS_ERR_VALUE(sdma_buf_offset)) {
341 if (IS_ERR_VALUE(sdma_buf_offset)) 330 sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
342 return -ENOMEM; 331 if (IS_ERR_VALUE(sdma_buf_offset))
332 return -ENOMEM;
333 }
343 334
344 out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK); 335 out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
345 out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | 336 out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
@@ -349,7 +340,7 @@ static int qe_sdma_init(void)
349} 340}
350 341
351/* The maximum number of RISCs we support */ 342/* The maximum number of RISCs we support */
352#define MAX_QE_RISC 2 343#define MAX_QE_RISC 4
353 344
354/* Firmware information stored here for qe_get_firmware_info() */ 345/* Firmware information stored here for qe_get_firmware_info() */
355static struct qe_firmware_info qe_firmware_info; 346static struct qe_firmware_info qe_firmware_info;
@@ -658,3 +649,35 @@ unsigned int qe_get_num_of_snums(void)
658 return num_of_snums; 649 return num_of_snums;
659} 650}
660EXPORT_SYMBOL(qe_get_num_of_snums); 651EXPORT_SYMBOL(qe_get_num_of_snums);
652
653#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
654static int qe_resume(struct of_device *ofdev)
655{
656 if (!qe_alive_during_sleep())
657 qe_reset();
658 return 0;
659}
660
661static int qe_probe(struct of_device *ofdev, const struct of_device_id *id)
662{
663 return 0;
664}
665
666static const struct of_device_id qe_ids[] = {
667 { .compatible = "fsl,qe", },
668 { },
669};
670
671static struct of_platform_driver qe_driver = {
672 .driver.name = "fsl-qe",
673 .match_table = qe_ids,
674 .probe = qe_probe,
675 .resume = qe_resume,
676};
677
678static int __init qe_drv_init(void)
679{
680 return of_register_platform_driver(&qe_driver);
681}
682device_initcall(qe_drv_init);
683#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 3faa42e03a85..2acc928d1920 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -189,7 +189,7 @@ static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg
189 189
190static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) 190static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
191{ 191{
192 return irq_desc[virq].chip_data; 192 return irq_to_desc(virq)->chip_data;
193} 193}
194 194
195#define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) 195#define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
@@ -237,7 +237,7 @@ static void qe_ic_mask_irq(unsigned int virq)
237} 237}
238 238
239static struct irq_chip qe_ic_irq_chip = { 239static struct irq_chip qe_ic_irq_chip = {
240 .typename = " QEIC ", 240 .name = " QEIC ",
241 .unmask = qe_ic_unmask_irq, 241 .unmask = qe_ic_unmask_irq,
242 .mask = qe_ic_mask_irq, 242 .mask = qe_ic_mask_irq,
243 .mask_ack = qe_ic_mask_irq, 243 .mask_ack = qe_ic_mask_irq,
@@ -263,7 +263,7 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
263 chip = &qe_ic->hc_irq; 263 chip = &qe_ic->hc_irq;
264 264
265 set_irq_chip_data(virq, qe_ic); 265 set_irq_chip_data(virq, qe_ic);
266 get_irq_desc(virq)->status |= IRQ_LEVEL; 266 irq_to_desc(virq)->status |= IRQ_LEVEL;
267 267
268 set_irq_chip_and_handler(virq, chip, handle_level_irq); 268 set_irq_chip_and_handler(virq, chip, handle_level_irq);
269 269
@@ -271,7 +271,7 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
271} 271}
272 272
273static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct, 273static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct,
274 u32 * intspec, unsigned int intsize, 274 const u32 * intspec, unsigned int intsize,
275 irq_hw_number_t * out_hwirq, 275 irq_hw_number_t * out_hwirq,
276 unsigned int *out_flags) 276 unsigned int *out_flags)
277{ 277{
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index cf244a419e96..595034cfb85a 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -376,7 +376,7 @@ static void tsi108_pci_irq_end(u_int irq)
376 */ 376 */
377 377
378static struct irq_chip tsi108_pci_irq = { 378static struct irq_chip tsi108_pci_irq = {
379 .typename = "tsi108_PCI_int", 379 .name = "tsi108_PCI_int",
380 .mask = tsi108_pci_irq_disable, 380 .mask = tsi108_pci_irq_disable,
381 .ack = tsi108_pci_irq_ack, 381 .ack = tsi108_pci_irq_ack,
382 .end = tsi108_pci_irq_end, 382 .end = tsi108_pci_irq_end,
@@ -384,7 +384,7 @@ static struct irq_chip tsi108_pci_irq = {
384}; 384};
385 385
386static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, 386static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
387 u32 *intspec, unsigned int intsize, 387 const u32 *intspec, unsigned int intsize,
388 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 388 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
389{ 389{
390 *out_hwirq = intspec[0]; 390 *out_hwirq = intspec[0];
@@ -398,7 +398,7 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
398 DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); 398 DBG("%s(%d, 0x%lx)\n", __func__, virq, hw);
399 if ((virq >= 1) && (virq <= 4)){ 399 if ((virq >= 1) && (virq <= 4)){
400 irq = virq + IRQ_PCI_INTAD_BASE - 1; 400 irq = virq + IRQ_PCI_INTAD_BASE - 1;
401 get_irq_desc(irq)->status |= IRQ_LEVEL; 401 irq_to_desc(irq)->status |= IRQ_LEVEL;
402 set_irq_chip(irq, &tsi108_pci_irq); 402 set_irq_chip(irq, &tsi108_pci_irq);
403 } 403 }
404 return 0; 404 return 0;
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 466ce9ace127..7d10074b3304 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -57,7 +57,7 @@ struct uic {
57 57
58static void uic_unmask_irq(unsigned int virq) 58static void uic_unmask_irq(unsigned int virq)
59{ 59{
60 struct irq_desc *desc = get_irq_desc(virq); 60 struct irq_desc *desc = irq_to_desc(virq);
61 struct uic *uic = get_irq_chip_data(virq); 61 struct uic *uic = get_irq_chip_data(virq);
62 unsigned int src = uic_irq_to_hw(virq); 62 unsigned int src = uic_irq_to_hw(virq);
63 unsigned long flags; 63 unsigned long flags;
@@ -101,7 +101,7 @@ static void uic_ack_irq(unsigned int virq)
101 101
102static void uic_mask_ack_irq(unsigned int virq) 102static void uic_mask_ack_irq(unsigned int virq)
103{ 103{
104 struct irq_desc *desc = get_irq_desc(virq); 104 struct irq_desc *desc = irq_to_desc(virq);
105 struct uic *uic = get_irq_chip_data(virq); 105 struct uic *uic = get_irq_chip_data(virq);
106 unsigned int src = uic_irq_to_hw(virq); 106 unsigned int src = uic_irq_to_hw(virq);
107 unsigned long flags; 107 unsigned long flags;
@@ -129,7 +129,7 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
129{ 129{
130 struct uic *uic = get_irq_chip_data(virq); 130 struct uic *uic = get_irq_chip_data(virq);
131 unsigned int src = uic_irq_to_hw(virq); 131 unsigned int src = uic_irq_to_hw(virq);
132 struct irq_desc *desc = get_irq_desc(virq); 132 struct irq_desc *desc = irq_to_desc(virq);
133 unsigned long flags; 133 unsigned long flags;
134 int trigger, polarity; 134 int trigger, polarity;
135 u32 tr, pr, mask; 135 u32 tr, pr, mask;
@@ -177,7 +177,7 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
177} 177}
178 178
179static struct irq_chip uic_irq_chip = { 179static struct irq_chip uic_irq_chip = {
180 .typename = " UIC ", 180 .name = " UIC ",
181 .unmask = uic_unmask_irq, 181 .unmask = uic_unmask_irq,
182 .mask = uic_mask_irq, 182 .mask = uic_mask_irq,
183 .mask_ack = uic_mask_ack_irq, 183 .mask_ack = uic_mask_ack_irq,
@@ -202,7 +202,7 @@ static int uic_host_map(struct irq_host *h, unsigned int virq,
202} 202}
203 203
204static int uic_host_xlate(struct irq_host *h, struct device_node *ct, 204static int uic_host_xlate(struct irq_host *h, struct device_node *ct,
205 u32 *intspec, unsigned int intsize, 205 const u32 *intspec, unsigned int intsize,
206 irq_hw_number_t *out_hwirq, unsigned int *out_type) 206 irq_hw_number_t *out_hwirq, unsigned int *out_type)
207 207
208{ 208{
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 40edad520770..1e0ccfaf403e 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -79,7 +79,7 @@ static void xilinx_intc_mask(unsigned int virq)
79 79
80static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type) 80static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type)
81{ 81{
82 struct irq_desc *desc = get_irq_desc(virq); 82 struct irq_desc *desc = irq_to_desc(virq);
83 83
84 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); 84 desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
85 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; 85 desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
@@ -106,7 +106,7 @@ static void xilinx_intc_level_unmask(unsigned int virq)
106} 106}
107 107
108static struct irq_chip xilinx_intc_level_irqchip = { 108static struct irq_chip xilinx_intc_level_irqchip = {
109 .typename = "Xilinx Level INTC", 109 .name = "Xilinx Level INTC",
110 .mask = xilinx_intc_mask, 110 .mask = xilinx_intc_mask,
111 .mask_ack = xilinx_intc_mask, 111 .mask_ack = xilinx_intc_mask,
112 .unmask = xilinx_intc_level_unmask, 112 .unmask = xilinx_intc_level_unmask,
@@ -133,7 +133,7 @@ static void xilinx_intc_edge_ack(unsigned int virq)
133} 133}
134 134
135static struct irq_chip xilinx_intc_edge_irqchip = { 135static struct irq_chip xilinx_intc_edge_irqchip = {
136 .typename = "Xilinx Edge INTC", 136 .name = "Xilinx Edge INTC",
137 .mask = xilinx_intc_mask, 137 .mask = xilinx_intc_mask,
138 .unmask = xilinx_intc_edge_unmask, 138 .unmask = xilinx_intc_edge_unmask,
139 .ack = xilinx_intc_edge_ack, 139 .ack = xilinx_intc_edge_ack,
@@ -148,7 +148,7 @@ static struct irq_chip xilinx_intc_edge_irqchip = {
148 * xilinx_intc_xlate - translate virq# from device tree interrupts property 148 * xilinx_intc_xlate - translate virq# from device tree interrupts property
149 */ 149 */
150static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct, 150static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct,
151 u32 *intspec, unsigned int intsize, 151 const u32 *intspec, unsigned int intsize,
152 irq_hw_number_t *out_hwirq, 152 irq_hw_number_t *out_hwirq,
153 unsigned int *out_flags) 153 unsigned int *out_flags)
154{ 154{
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index bdbe96c8a7e4..4e6152c13764 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1641,7 +1641,8 @@ static void super_regs(void)
1641 ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1); 1641 ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
1642 printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n", 1642 printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n",
1643 ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4); 1643 ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
1644 printf(" Saved Gpr5=%.16lx \n", ptrLpPaca->saved_gpr5); 1644 printf(" Saved Gpr5=%.16lx \n",
1645 ptrLpPaca->gpr5_dword.saved_gpr5);
1645 } 1646 }
1646#endif 1647#endif
1647 1648
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 676f08b004b3..85844d053846 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -790,5 +790,15 @@ config PATA_BF54X
790 790
791 If unsure, say N. 791 If unsure, say N.
792 792
793config PATA_MACIO
794 tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
795 depends on PPC_PMAC
796 help
797 Most IDE capable PowerMacs have IDE busses driven by a variant
798 of this controller which is part of the Apple chipset used on
799 most PowerMac models. Some models have multiple busses using
800 different chipsets, though generally, MacIO is one of them.
801
802
793endif # ATA_SFF 803endif # ATA_SFF
794endif # ATA 804endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index d909435e9d81..fc936d4471d6 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_SATA_MV) += sata_mv.o
18obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o 18obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
19obj-$(CONFIG_PDC_ADMA) += pdc_adma.o 19obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
20obj-$(CONFIG_SATA_FSL) += sata_fsl.o 20obj-$(CONFIG_SATA_FSL) += sata_fsl.o
21obj-$(CONFIG_PATA_MACIO) += pata_macio.o
21 22
22obj-$(CONFIG_PATA_ALI) += pata_ali.o 23obj-$(CONFIG_PATA_ALI) += pata_ali.o
23obj-$(CONFIG_PATA_AMD) += pata_amd.o 24obj-$(CONFIG_PATA_AMD) += pata_amd.o
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
new file mode 100644
index 000000000000..4cc7bbd10ec2
--- /dev/null
+++ b/drivers/ata/pata_macio.c
@@ -0,0 +1,1427 @@
1/*
2 * Libata based driver for Apple "macio" family of PATA controllers
3 *
4 * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
5 * <benh@kernel.crashing.org>
6 *
7 * Some bits and pieces from drivers/ide/ppc/pmac.c
8 *
9 */
10
11#undef DEBUG
12#undef DEBUG_DMA
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/blkdev.h>
18#include <linux/ata.h>
19#include <linux/libata.h>
20#include <linux/adb.h>
21#include <linux/pmu.h>
22#include <linux/scatterlist.h>
23#include <linux/of.h>
24
25#include <scsi/scsi.h>
26#include <scsi/scsi_host.h>
27#include <scsi/scsi_device.h>
28
29#include <asm/macio.h>
30#include <asm/io.h>
31#include <asm/dbdma.h>
32#include <asm/pci-bridge.h>
33#include <asm/machdep.h>
34#include <asm/pmac_feature.h>
35#include <asm/mediabay.h>
36
37#ifdef DEBUG_DMA
38#define dev_dbgdma(dev, format, arg...) \
39 dev_printk(KERN_DEBUG , dev , format , ## arg)
40#else
41#define dev_dbgdma(dev, format, arg...) \
42 ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
43#endif
44
45#define DRV_NAME "pata_macio"
46#define DRV_VERSION "0.9"
47
48/* Models of macio ATA controller */
49enum {
50 controller_ohare, /* OHare based */
51 controller_heathrow, /* Heathrow/Paddington */
52 controller_kl_ata3, /* KeyLargo ATA-3 */
53 controller_kl_ata4, /* KeyLargo ATA-4 */
54 controller_un_ata6, /* UniNorth2 ATA-6 */
55 controller_k2_ata6, /* K2 ATA-6 */
56 controller_sh_ata6, /* Shasta ATA-6 */
57};
58
59static const char* macio_ata_names[] = {
60 "OHare ATA", /* OHare based */
61 "Heathrow ATA", /* Heathrow/Paddington */
62 "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
63 "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
64 "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
65 "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
66 "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */
67};
68
69/*
70 * Extra registers, both 32-bit little-endian
71 */
72#define IDE_TIMING_CONFIG 0x200
73#define IDE_INTERRUPT 0x300
74
75/* Kauai (U2) ATA has different register setup */
76#define IDE_KAUAI_PIO_CONFIG 0x200
77#define IDE_KAUAI_ULTRA_CONFIG 0x210
78#define IDE_KAUAI_POLL_CONFIG 0x220
79
80/*
81 * Timing configuration register definitions
82 */
83
84/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
85#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
86#define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
87#define IDE_SYSCLK_NS 30 /* 33Mhz cell */
88#define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
89
90/* 133Mhz cell, found in shasta.
91 * See comments about 100 Mhz Uninorth 2...
92 * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
93 * weird and I don't now why .. at this stage
94 */
95#define TR_133_PIOREG_PIO_MASK 0xff000fff
96#define TR_133_PIOREG_MDMA_MASK 0x00fff800
97#define TR_133_UDMAREG_UDMA_MASK 0x0003ffff
98#define TR_133_UDMAREG_UDMA_EN 0x00000001
99
100/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
101 * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
102 * controlled like gem or fw. It appears to be an evolution of keylargo
103 * ATA4 with a timing register extended to 2x32bits registers (one
104 * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
105 * It has it's own local feature control register as well.
106 *
107 * After scratching my mind over the timing values, at least for PIO
108 * and MDMA, I think I've figured the format of the timing register,
109 * though I use pre-calculated tables for UDMA as usual...
110 */
111#define TR_100_PIO_ADDRSETUP_MASK 0xff000000 /* Size of field unknown */
112#define TR_100_PIO_ADDRSETUP_SHIFT 24
113#define TR_100_MDMA_MASK 0x00fff000
114#define TR_100_MDMA_RECOVERY_MASK 0x00fc0000
115#define TR_100_MDMA_RECOVERY_SHIFT 18
116#define TR_100_MDMA_ACCESS_MASK 0x0003f000
117#define TR_100_MDMA_ACCESS_SHIFT 12
118#define TR_100_PIO_MASK 0xff000fff
119#define TR_100_PIO_RECOVERY_MASK 0x00000fc0
120#define TR_100_PIO_RECOVERY_SHIFT 6
121#define TR_100_PIO_ACCESS_MASK 0x0000003f
122#define TR_100_PIO_ACCESS_SHIFT 0
123
124#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
125#define TR_100_UDMAREG_UDMA_EN 0x00000001
126
127
128/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
129 * 40 connector cable and to 4 on 80 connector one.
130 * Clock unit is 15ns (66Mhz)
131 *
132 * 3 Values can be programmed:
133 * - Write data setup, which appears to match the cycle time. They
134 * also call it DIOW setup.
135 * - Ready to pause time (from spec)
136 * - Address setup. That one is weird. I don't see where exactly
137 * it fits in UDMA cycles, I got it's name from an obscure piece
138 * of commented out code in Darwin. They leave it to 0, we do as
139 * well, despite a comment that would lead to think it has a
140 * min value of 45ns.
141 * Apple also add 60ns to the write data setup (or cycle time ?) on
142 * reads.
143 */
144#define TR_66_UDMA_MASK 0xfff00000
145#define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
146#define TR_66_PIO_ADDRSETUP_MASK 0xe0000000 /* Address setup */
147#define TR_66_PIO_ADDRSETUP_SHIFT 29
148#define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
149#define TR_66_UDMA_RDY2PAUS_SHIFT 25
150#define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
151#define TR_66_UDMA_WRDATASETUP_SHIFT 21
152#define TR_66_MDMA_MASK 0x000ffc00
153#define TR_66_MDMA_RECOVERY_MASK 0x000f8000
154#define TR_66_MDMA_RECOVERY_SHIFT 15
155#define TR_66_MDMA_ACCESS_MASK 0x00007c00
156#define TR_66_MDMA_ACCESS_SHIFT 10
157#define TR_66_PIO_MASK 0xe00003ff
158#define TR_66_PIO_RECOVERY_MASK 0x000003e0
159#define TR_66_PIO_RECOVERY_SHIFT 5
160#define TR_66_PIO_ACCESS_MASK 0x0000001f
161#define TR_66_PIO_ACCESS_SHIFT 0
162
163/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
164 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
165 *
166 * The access time and recovery time can be programmed. Some older
167 * Darwin code base limit OHare to 150ns cycle time. I decided to do
168 * the same here fore safety against broken old hardware ;)
169 * The HalfTick bit, when set, adds half a clock (15ns) to the access
170 * time and removes one from recovery. It's not supported on KeyLargo
171 * implementation afaik. The E bit appears to be set for PIO mode 0 and
172 * is used to reach long timings used in this mode.
173 */
174#define TR_33_MDMA_MASK 0x003ff800
175#define TR_33_MDMA_RECOVERY_MASK 0x001f0000
176#define TR_33_MDMA_RECOVERY_SHIFT 16
177#define TR_33_MDMA_ACCESS_MASK 0x0000f800
178#define TR_33_MDMA_ACCESS_SHIFT 11
179#define TR_33_MDMA_HALFTICK 0x00200000
180#define TR_33_PIO_MASK 0x000007ff
181#define TR_33_PIO_E 0x00000400
182#define TR_33_PIO_RECOVERY_MASK 0x000003e0
183#define TR_33_PIO_RECOVERY_SHIFT 5
184#define TR_33_PIO_ACCESS_MASK 0x0000001f
185#define TR_33_PIO_ACCESS_SHIFT 0
186
187/*
188 * Interrupt register definitions. Only present on newer cells
189 * (Keylargo and later afaik) so we don't use it.
190 */
191#define IDE_INTR_DMA 0x80000000
192#define IDE_INTR_DEVICE 0x40000000
193
194/*
195 * FCR Register on Kauai. Not sure what bit 0x4 is ...
196 */
197#define KAUAI_FCR_UATA_MAGIC 0x00000004
198#define KAUAI_FCR_UATA_RESET_N 0x00000002
199#define KAUAI_FCR_UATA_ENABLE 0x00000001
200
201
202/* Allow up to 256 DBDMA commands per xfer */
203#define MAX_DCMDS 256
204
205/* Don't let a DMA segment go all the way to 64K */
206#define MAX_DBDMA_SEG 0xff00
207
208
209/*
210 * Wait 1s for disk to answer on IDE bus after a hard reset
211 * of the device (via GPIO/FCR).
212 *
213 * Some devices seem to "pollute" the bus even after dropping
214 * the BSY bit (typically some combo drives slave on the UDMA
215 * bus) after a hard reset. Since we hard reset all drives on
216 * KeyLargo ATA66, we have to keep that delay around. I may end
217 * up not hard resetting anymore on these and keep the delay only
218 * for older interfaces instead (we have to reset when coming
219 * from MacOS...) --BenH.
220 */
221#define IDE_WAKEUP_DELAY_MS 1000
222
223struct pata_macio_timing;
224
225struct pata_macio_priv {
226 int kind;
227 int aapl_bus_id;
228 int mediabay : 1;
229 struct device_node *node;
230 struct macio_dev *mdev;
231 struct pci_dev *pdev;
232 struct device *dev;
233 int irq;
234 u32 treg[2][2];
235 void __iomem *tfregs;
236 void __iomem *kauai_fcr;
237 struct dbdma_cmd * dma_table_cpu;
238 dma_addr_t dma_table_dma;
239 struct ata_host *host;
240 const struct pata_macio_timing *timings;
241};
242
243/* Previous variants of this driver used to calculate timings
244 * for various variants of the chip and use tables for others.
245 *
246 * Not only was this confusing, but in addition, it isn't clear
247 * whether our calculation code was correct. It didn't entirely
248 * match the darwin code and whatever documentation I could find
249 * on these cells
250 *
251 * I decided to entirely rely on a table instead for this version
252 * of the driver. Also, because I don't really care about derated
253 * modes and really old HW other than making it work, I'm not going
254 * to calculate / snoop timing values for something else than the
255 * standard modes.
256 */
257struct pata_macio_timing {
258 int mode;
259 u32 reg1; /* Bits to set in first timing reg */
260 u32 reg2; /* Bits to set in second timing reg */
261};
262
263static const struct pata_macio_timing pata_macio_ohare_timings[] = {
264 { XFER_PIO_0, 0x00000526, 0, },
265 { XFER_PIO_1, 0x00000085, 0, },
266 { XFER_PIO_2, 0x00000025, 0, },
267 { XFER_PIO_3, 0x00000025, 0, },
268 { XFER_PIO_4, 0x00000025, 0, },
269 { XFER_MW_DMA_0, 0x00074000, 0, },
270 { XFER_MW_DMA_1, 0x00221000, 0, },
271 { XFER_MW_DMA_2, 0x00211000, 0, },
272 { -1, 0, 0 }
273};
274
275static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
276 { XFER_PIO_0, 0x00000526, 0, },
277 { XFER_PIO_1, 0x00000085, 0, },
278 { XFER_PIO_2, 0x00000025, 0, },
279 { XFER_PIO_3, 0x00000025, 0, },
280 { XFER_PIO_4, 0x00000025, 0, },
281 { XFER_MW_DMA_0, 0x00074000, 0, },
282 { XFER_MW_DMA_1, 0x00221000, 0, },
283 { XFER_MW_DMA_2, 0x00211000, 0, },
284 { -1, 0, 0 }
285};
286
287static const struct pata_macio_timing pata_macio_kl33_timings[] = {
288 { XFER_PIO_0, 0x00000526, 0, },
289 { XFER_PIO_1, 0x00000085, 0, },
290 { XFER_PIO_2, 0x00000025, 0, },
291 { XFER_PIO_3, 0x00000025, 0, },
292 { XFER_PIO_4, 0x00000025, 0, },
293 { XFER_MW_DMA_0, 0x00084000, 0, },
294 { XFER_MW_DMA_1, 0x00021800, 0, },
295 { XFER_MW_DMA_2, 0x00011800, 0, },
296 { -1, 0, 0 }
297};
298
299static const struct pata_macio_timing pata_macio_kl66_timings[] = {
300 { XFER_PIO_0, 0x0000038c, 0, },
301 { XFER_PIO_1, 0x0000020a, 0, },
302 { XFER_PIO_2, 0x00000127, 0, },
303 { XFER_PIO_3, 0x000000c6, 0, },
304 { XFER_PIO_4, 0x00000065, 0, },
305 { XFER_MW_DMA_0, 0x00084000, 0, },
306 { XFER_MW_DMA_1, 0x00029800, 0, },
307 { XFER_MW_DMA_2, 0x00019400, 0, },
308 { XFER_UDMA_0, 0x19100000, 0, },
309 { XFER_UDMA_1, 0x14d00000, 0, },
310 { XFER_UDMA_2, 0x10900000, 0, },
311 { XFER_UDMA_3, 0x0c700000, 0, },
312 { XFER_UDMA_4, 0x0c500000, 0, },
313 { -1, 0, 0 }
314};
315
316static const struct pata_macio_timing pata_macio_kauai_timings[] = {
317 { XFER_PIO_0, 0x08000a92, 0, },
318 { XFER_PIO_1, 0x0800060f, 0, },
319 { XFER_PIO_2, 0x0800038b, 0, },
320 { XFER_PIO_3, 0x05000249, 0, },
321 { XFER_PIO_4, 0x04000148, 0, },
322 { XFER_MW_DMA_0, 0x00618000, 0, },
323 { XFER_MW_DMA_1, 0x00209000, 0, },
324 { XFER_MW_DMA_2, 0x00148000, 0, },
325 { XFER_UDMA_0, 0, 0x000070c1, },
326 { XFER_UDMA_1, 0, 0x00005d81, },
327 { XFER_UDMA_2, 0, 0x00004a61, },
328 { XFER_UDMA_3, 0, 0x00003a51, },
329 { XFER_UDMA_4, 0, 0x00002a31, },
330 { XFER_UDMA_5, 0, 0x00002921, },
331 { -1, 0, 0 }
332};
333
334static const struct pata_macio_timing pata_macio_shasta_timings[] = {
335 { XFER_PIO_0, 0x0a000c97, 0, },
336 { XFER_PIO_1, 0x07000712, 0, },
337 { XFER_PIO_2, 0x040003cd, 0, },
338 { XFER_PIO_3, 0x0500028b, 0, },
339 { XFER_PIO_4, 0x0400010a, 0, },
340 { XFER_MW_DMA_0, 0x00820800, 0, },
341 { XFER_MW_DMA_1, 0x0028b000, 0, },
342 { XFER_MW_DMA_2, 0x001ca000, 0, },
343 { XFER_UDMA_0, 0, 0x00035901, },
344 { XFER_UDMA_1, 0, 0x000348b1, },
345 { XFER_UDMA_2, 0, 0x00033881, },
346 { XFER_UDMA_3, 0, 0x00033861, },
347 { XFER_UDMA_4, 0, 0x00033841, },
348 { XFER_UDMA_5, 0, 0x00033031, },
349 { XFER_UDMA_6, 0, 0x00033021, },
350 { -1, 0, 0 }
351};
352
353static const struct pata_macio_timing *pata_macio_find_timing(
354 struct pata_macio_priv *priv,
355 int mode)
356{
357 int i;
358
359 for (i = 0; priv->timings[i].mode > 0; i++) {
360 if (priv->timings[i].mode == mode)
361 return &priv->timings[i];
362 }
363 return NULL;
364}
365
366
367static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
368{
369 struct pata_macio_priv *priv = ap->private_data;
370 void __iomem *rbase = ap->ioaddr.cmd_addr;
371
372 if (priv->kind == controller_sh_ata6 ||
373 priv->kind == controller_un_ata6 ||
374 priv->kind == controller_k2_ata6) {
375 writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
376 writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
377 } else
378 writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
379}
380
381static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
382{
383 ata_sff_dev_select(ap, device);
384
385 /* Apply timings */
386 pata_macio_apply_timings(ap, device);
387}
388
389static void pata_macio_set_timings(struct ata_port *ap,
390 struct ata_device *adev)
391{
392 struct pata_macio_priv *priv = ap->private_data;
393 const struct pata_macio_timing *t;
394
395 dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
396 adev->devno,
397 adev->pio_mode,
398 ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
399 adev->dma_mode,
400 ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
401
402 /* First clear timings */
403 priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
404
405 /* Now get the PIO timings */
406 t = pata_macio_find_timing(priv, adev->pio_mode);
407 if (t == NULL) {
408 dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
409 adev->pio_mode);
410 t = pata_macio_find_timing(priv, XFER_PIO_0);
411 }
412 BUG_ON(t == NULL);
413
414 /* PIO timings only ever use the first treg */
415 priv->treg[adev->devno][0] |= t->reg1;
416
417 /* Now get DMA timings */
418 t = pata_macio_find_timing(priv, adev->dma_mode);
419 if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
420 dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
421 t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
422 }
423 BUG_ON(t == NULL);
424
425 /* DMA timings can use both tregs */
426 priv->treg[adev->devno][0] |= t->reg1;
427 priv->treg[adev->devno][1] |= t->reg2;
428
429 dev_dbg(priv->dev, " -> %08x %08x\n",
430 priv->treg[adev->devno][0],
431 priv->treg[adev->devno][1]);
432
433 /* Apply to hardware */
434 pata_macio_apply_timings(ap, adev->devno);
435}
436
437/*
438 * Blast some well known "safe" values to the timing registers at init or
439 * wakeup from sleep time, before we do real calculation
440 */
441static void pata_macio_default_timings(struct pata_macio_priv *priv)
442{
443 unsigned int value, value2 = 0;
444
445 switch(priv->kind) {
446 case controller_sh_ata6:
447 value = 0x0a820c97;
448 value2 = 0x00033031;
449 break;
450 case controller_un_ata6:
451 case controller_k2_ata6:
452 value = 0x08618a92;
453 value2 = 0x00002921;
454 break;
455 case controller_kl_ata4:
456 value = 0x0008438c;
457 break;
458 case controller_kl_ata3:
459 value = 0x00084526;
460 break;
461 case controller_heathrow:
462 case controller_ohare:
463 default:
464 value = 0x00074526;
465 break;
466 }
467 priv->treg[0][0] = priv->treg[1][0] = value;
468 priv->treg[0][1] = priv->treg[1][1] = value2;
469}
470
471static int pata_macio_cable_detect(struct ata_port *ap)
472{
473 struct pata_macio_priv *priv = ap->private_data;
474
475 /* Get cable type from device-tree */
476 if (priv->kind == controller_kl_ata4 ||
477 priv->kind == controller_un_ata6 ||
478 priv->kind == controller_k2_ata6 ||
479 priv->kind == controller_sh_ata6) {
480 const char* cable = of_get_property(priv->node, "cable-type",
481 NULL);
482 struct device_node *root = of_find_node_by_path("/");
483 const char *model = of_get_property(root, "model", NULL);
484
485 if (cable && !strncmp(cable, "80-", 3)) {
486 /* Some drives fail to detect 80c cable in PowerBook
487 * These machine use proprietary short IDE cable
488 * anyway
489 */
490 if (!strncmp(model, "PowerBook", 9))
491 return ATA_CBL_PATA40_SHORT;
492 else
493 return ATA_CBL_PATA80;
494 }
495 }
496
497 /* G5's seem to have incorrect cable type in device-tree.
498 * Let's assume they always have a 80 conductor cable, this seem to
499 * be always the case unless the user mucked around
500 */
501 if (of_device_is_compatible(priv->node, "K2-UATA") ||
502 of_device_is_compatible(priv->node, "shasta-ata"))
503 return ATA_CBL_PATA80;
504
505 /* Anything else is 40 connectors */
506 return ATA_CBL_PATA40;
507}
508
509static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
510{
511 unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
512 struct ata_port *ap = qc->ap;
513 struct pata_macio_priv *priv = ap->private_data;
514 struct scatterlist *sg;
515 struct dbdma_cmd *table;
516 unsigned int si, pi;
517
518 dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
519 __func__, qc, qc->flags, write, qc->dev->devno);
520
521 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
522 return;
523
524 table = (struct dbdma_cmd *) priv->dma_table_cpu;
525
526 pi = 0;
527 for_each_sg(qc->sg, sg, qc->n_elem, si) {
528 u32 addr, sg_len, len;
529
530 /* determine if physical DMA addr spans 64K boundary.
531 * Note h/w doesn't support 64-bit, so we unconditionally
532 * truncate dma_addr_t to u32.
533 */
534 addr = (u32) sg_dma_address(sg);
535 sg_len = sg_dma_len(sg);
536
537 while (sg_len) {
538 /* table overflow should never happen */
539 BUG_ON (pi++ >= MAX_DCMDS);
540
541 len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
542 st_le16(&table->command, write ? OUTPUT_MORE: INPUT_MORE);
543 st_le16(&table->req_count, len);
544 st_le32(&table->phy_addr, addr);
545 table->cmd_dep = 0;
546 table->xfer_status = 0;
547 table->res_count = 0;
548 addr += len;
549 sg_len -= len;
550 ++table;
551 }
552 }
553
554 /* Should never happen according to Tejun */
555 BUG_ON(!pi);
556
557 /* Convert the last command to an input/output */
558 table--;
559 st_le16(&table->command, write ? OUTPUT_LAST: INPUT_LAST);
560 table++;
561
562 /* Add the stop command to the end of the list */
563 memset(table, 0, sizeof(struct dbdma_cmd));
564 st_le16(&table->command, DBDMA_STOP);
565
566 dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
567}
568
569
570static void pata_macio_freeze(struct ata_port *ap)
571{
572 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
573
574 if (dma_regs) {
575 unsigned int timeout = 1000000;
576
577 /* Make sure DMA controller is stopped */
578 writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
579 while (--timeout && (readl(&dma_regs->status) & RUN))
580 udelay(1);
581 }
582
583 ata_sff_freeze(ap);
584}
585
586
587static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
588{
589 struct ata_port *ap = qc->ap;
590 struct pata_macio_priv *priv = ap->private_data;
591 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
592 int dev = qc->dev->devno;
593
594 dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
595
596 /* Make sure DMA commands updates are visible */
597 writel(priv->dma_table_dma, &dma_regs->cmdptr);
598
599 /* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
600 * UDMA reads
601 */
602 if (priv->kind == controller_kl_ata4 &&
603 (priv->treg[dev][0] & TR_66_UDMA_EN)) {
604 void __iomem *rbase = ap->ioaddr.cmd_addr;
605 u32 reg = priv->treg[dev][0];
606
607 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
608 reg += 0x00800000;
609 writel(reg, rbase + IDE_TIMING_CONFIG);
610 }
611
612 /* issue r/w command */
613 ap->ops->sff_exec_command(ap, &qc->tf);
614}
615
616static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
617{
618 struct ata_port *ap = qc->ap;
619 struct pata_macio_priv *priv = ap->private_data;
620 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
621
622 dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
623
624 writel((RUN << 16) | RUN, &dma_regs->control);
625 /* Make sure it gets to the controller right now */
626 (void)readl(&dma_regs->control);
627}
628
629static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
630{
631 struct ata_port *ap = qc->ap;
632 struct pata_macio_priv *priv = ap->private_data;
633 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
634 unsigned int timeout = 1000000;
635
636 dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
637
638 /* Stop the DMA engine and wait for it to full halt */
639 writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
640 while (--timeout && (readl(&dma_regs->status) & RUN))
641 udelay(1);
642}
643
644static u8 pata_macio_bmdma_status(struct ata_port *ap)
645{
646 struct pata_macio_priv *priv = ap->private_data;
647 struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
648 u32 dstat, rstat = ATA_DMA_INTR;
649 unsigned long timeout = 0;
650
651 dstat = readl(&dma_regs->status);
652
653 dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
654
655 /* We have two things to deal with here:
656 *
657 * - The dbdma won't stop if the command was started
658 * but completed with an error without transferring all
659 * datas. This happens when bad blocks are met during
660 * a multi-block transfer.
661 *
662 * - The dbdma fifo hasn't yet finished flushing to
663 * to system memory when the disk interrupt occurs.
664 *
665 */
666
667 /* First check for errors */
668 if ((dstat & (RUN|DEAD)) != RUN)
669 rstat |= ATA_DMA_ERR;
670
671 /* If ACTIVE is cleared, the STOP command has been hit and
672 * the transfer is complete. If not, we have to flush the
673 * channel.
674 */
675 if ((dstat & ACTIVE) == 0)
676 return rstat;
677
678 dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
679
680 /* If dbdma didn't execute the STOP command yet, the
681 * active bit is still set. We consider that we aren't
682 * sharing interrupts (which is hopefully the case with
683 * those controllers) and so we just try to flush the
684 * channel for pending data in the fifo
685 */
686 udelay(1);
687 writel((FLUSH << 16) | FLUSH, &dma_regs->control);
688 for (;;) {
689 udelay(1);
690 dstat = readl(&dma_regs->status);
691 if ((dstat & FLUSH) == 0)
692 break;
693 if (++timeout > 1000) {
694 dev_warn(priv->dev, "timeout flushing DMA\n");
695 rstat |= ATA_DMA_ERR;
696 break;
697 }
698 }
699 return rstat;
700}
701
702/* port_start is when we allocate the DMA command list */
703static int pata_macio_port_start(struct ata_port *ap)
704{
705 struct pata_macio_priv *priv = ap->private_data;
706
707 if (ap->ioaddr.bmdma_addr == NULL)
708 return 0;
709
710 /* Allocate space for the DBDMA commands.
711 *
712 * The +2 is +1 for the stop command and +1 to allow for
713 * aligning the start address to a multiple of 16 bytes.
714 */
715 priv->dma_table_cpu =
716 dmam_alloc_coherent(priv->dev,
717 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
718 &priv->dma_table_dma, GFP_KERNEL);
719 if (priv->dma_table_cpu == NULL) {
720 dev_err(priv->dev, "Unable to allocate DMA command list\n");
721 ap->ioaddr.bmdma_addr = NULL;
722 }
723 return 0;
724}
725
726static void pata_macio_irq_clear(struct ata_port *ap)
727{
728 struct pata_macio_priv *priv = ap->private_data;
729
730 /* Nothing to do here */
731
732 dev_dbgdma(priv->dev, "%s\n", __func__);
733}
734
735static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
736{
737 dev_dbg(priv->dev, "Enabling & resetting... \n");
738
739 if (priv->mediabay)
740 return;
741
742 if (priv->kind == controller_ohare && !resume) {
743 /* The code below is having trouble on some ohare machines
744 * (timing related ?). Until I can put my hand on one of these
745 * units, I keep the old way
746 */
747 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
748 } else {
749 int rc;
750
751 /* Reset and enable controller */
752 rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
753 priv->node, priv->aapl_bus_id, 1);
754 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
755 priv->node, priv->aapl_bus_id, 1);
756 msleep(10);
757 /* Only bother waiting if there's a reset control */
758 if (rc == 0) {
759 ppc_md.feature_call(PMAC_FTR_IDE_RESET,
760 priv->node, priv->aapl_bus_id, 0);
761 msleep(IDE_WAKEUP_DELAY_MS);
762 }
763 }
764
765 /* If resuming a PCI device, restore the config space here */
766 if (priv->pdev && resume) {
767 int rc;
768
769 pci_restore_state(priv->pdev);
770 rc = pcim_enable_device(priv->pdev);
771 if (rc)
772 dev_printk(KERN_ERR, &priv->pdev->dev,
773 "Failed to enable device after resume (%d)\n", rc);
774 else
775 pci_set_master(priv->pdev);
776 }
777
778 /* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
779 * seem necessary and speeds up the boot process
780 */
781 if (priv->kauai_fcr)
782 writel(KAUAI_FCR_UATA_MAGIC |
783 KAUAI_FCR_UATA_RESET_N |
784 KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
785}
786
787/* Hook the standard slave config to fixup some HW related alignment
788 * restrictions
789 */
790static int pata_macio_slave_config(struct scsi_device *sdev)
791{
792 struct ata_port *ap = ata_shost_to_port(sdev->host);
793 struct pata_macio_priv *priv = ap->private_data;
794 struct ata_device *dev;
795 u16 cmd;
796 int rc;
797
798 /* First call original */
799 rc = ata_scsi_slave_config(sdev);
800 if (rc)
801 return rc;
802
803 /* This is lifted from sata_nv */
804 dev = &ap->link.device[sdev->id];
805
806 /* OHare has issues with non cache aligned DMA on some chipsets */
807 if (priv->kind == controller_ohare) {
808 blk_queue_update_dma_alignment(sdev->request_queue, 31);
809 blk_queue_update_dma_pad(sdev->request_queue, 31);
810
811 /* Tell the world about it */
812 ata_dev_printk(dev, KERN_INFO, "OHare alignment limits applied\n");
813 return 0;
814 }
815
816 /* We only have issues with ATAPI */
817 if (dev->class != ATA_DEV_ATAPI)
818 return 0;
819
820 /* Shasta and K2 seem to have "issues" with reads ... */
821 if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
822 /* Allright these are bad, apply restrictions */
823 blk_queue_update_dma_alignment(sdev->request_queue, 15);
824 blk_queue_update_dma_pad(sdev->request_queue, 15);
825
826 /* We enable MWI and hack cache line size directly here, this
827 * is specific to this chipset and not normal values, we happen
828 * to somewhat know what we are doing here (which is basically
829 * to do the same Apple does and pray they did not get it wrong :-)
830 */
831 BUG_ON(!priv->pdev);
832 pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
833 pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
834 pci_write_config_word(priv->pdev, PCI_COMMAND,
835 cmd | PCI_COMMAND_INVALIDATE);
836
837 /* Tell the world about it */
838 ata_dev_printk(dev, KERN_INFO,
839 "K2/Shasta alignment limits applied\n");
840 }
841
842 return 0;
843}
844
845#ifdef CONFIG_PM
846
847static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
848{
849 int rc;
850
851 /* First, core libata suspend to do most of the work */
852 rc = ata_host_suspend(priv->host, mesg);
853 if (rc)
854 return rc;
855
856 /* Restore to default timings */
857 pata_macio_default_timings(priv);
858
859 /* Mask interrupt. Not strictly necessary but old driver did
860 * it and I'd rather not change that here */
861 disable_irq(priv->irq);
862
863 /* The media bay will handle itself just fine */
864 if (priv->mediabay)
865 return 0;
866
867 /* Kauai has bus control FCRs directly here */
868 if (priv->kauai_fcr) {
869 u32 fcr = readl(priv->kauai_fcr);
870 fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
871 writel(fcr, priv->kauai_fcr);
872 }
873
874 /* For PCI, save state and disable DMA. No need to call
875 * pci_set_power_state(), the HW doesn't do D states that
876 * way, the platform code will take care of suspending the
877 * ASIC properly
878 */
879 if (priv->pdev) {
880 pci_save_state(priv->pdev);
881 pci_disable_device(priv->pdev);
882 }
883
884 /* Disable the bus on older machines and the cell on kauai */
885 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
886 priv->aapl_bus_id, 0);
887
888 return 0;
889}
890
891static int pata_macio_do_resume(struct pata_macio_priv *priv)
892{
893 /* Reset and re-enable the HW */
894 pata_macio_reset_hw(priv, 1);
895
896 /* Sanitize drive timings */
897 pata_macio_apply_timings(priv->host->ports[0], 0);
898
899 /* We want our IRQ back ! */
900 enable_irq(priv->irq);
901
902 /* Let the libata core take it from there */
903 ata_host_resume(priv->host);
904
905 return 0;
906}
907
908#endif /* CONFIG_PM */
909
910static struct scsi_host_template pata_macio_sht = {
911 ATA_BASE_SHT(DRV_NAME),
912 .sg_tablesize = MAX_DCMDS,
913 /* We may not need that strict one */
914 .dma_boundary = ATA_DMA_BOUNDARY,
915 .slave_configure = pata_macio_slave_config,
916};
917
918static struct ata_port_operations pata_macio_ops = {
919 .inherits = &ata_sff_port_ops,
920
921 .freeze = pata_macio_freeze,
922 .set_piomode = pata_macio_set_timings,
923 .set_dmamode = pata_macio_set_timings,
924 .cable_detect = pata_macio_cable_detect,
925 .sff_dev_select = pata_macio_dev_select,
926 .qc_prep = pata_macio_qc_prep,
927 .mode_filter = ata_bmdma_mode_filter,
928 .bmdma_setup = pata_macio_bmdma_setup,
929 .bmdma_start = pata_macio_bmdma_start,
930 .bmdma_stop = pata_macio_bmdma_stop,
931 .bmdma_status = pata_macio_bmdma_status,
932 .port_start = pata_macio_port_start,
933 .sff_irq_clear = pata_macio_irq_clear,
934};
935
936static void __devinit pata_macio_invariants(struct pata_macio_priv *priv)
937{
938 const int *bidp;
939
940 /* Identify the type of controller */
941 if (of_device_is_compatible(priv->node, "shasta-ata")) {
942 priv->kind = controller_sh_ata6;
943 priv->timings = pata_macio_shasta_timings;
944 } else if (of_device_is_compatible(priv->node, "kauai-ata")) {
945 priv->kind = controller_un_ata6;
946 priv->timings = pata_macio_kauai_timings;
947 } else if (of_device_is_compatible(priv->node, "K2-UATA")) {
948 priv->kind = controller_k2_ata6;
949 priv->timings = pata_macio_kauai_timings;
950 } else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
951 if (strcmp(priv->node->name, "ata-4") == 0) {
952 priv->kind = controller_kl_ata4;
953 priv->timings = pata_macio_kl66_timings;
954 } else {
955 priv->kind = controller_kl_ata3;
956 priv->timings = pata_macio_kl33_timings;
957 }
958 } else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
959 priv->kind = controller_heathrow;
960 priv->timings = pata_macio_heathrow_timings;
961 } else {
962 priv->kind = controller_ohare;
963 priv->timings = pata_macio_ohare_timings;
964 }
965
966 /* XXX FIXME --- setup priv->mediabay here */
967
968 /* Get Apple bus ID (for clock and ASIC control) */
969 bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
970 priv->aapl_bus_id = bidp ? *bidp : 0;
971
972 /* Fixup missing Apple bus ID in case of media-bay */
973 if (priv->mediabay && bidp == 0)
974 priv->aapl_bus_id = 1;
975}
976
977static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr,
978 void __iomem * base,
979 void __iomem * dma)
980{
981 /* cmd_addr is the base of regs for that port */
982 ioaddr->cmd_addr = base;
983
984 /* taskfile registers */
985 ioaddr->data_addr = base + (ATA_REG_DATA << 4);
986 ioaddr->error_addr = base + (ATA_REG_ERR << 4);
987 ioaddr->feature_addr = base + (ATA_REG_FEATURE << 4);
988 ioaddr->nsect_addr = base + (ATA_REG_NSECT << 4);
989 ioaddr->lbal_addr = base + (ATA_REG_LBAL << 4);
990 ioaddr->lbam_addr = base + (ATA_REG_LBAM << 4);
991 ioaddr->lbah_addr = base + (ATA_REG_LBAH << 4);
992 ioaddr->device_addr = base + (ATA_REG_DEVICE << 4);
993 ioaddr->status_addr = base + (ATA_REG_STATUS << 4);
994 ioaddr->command_addr = base + (ATA_REG_CMD << 4);
995 ioaddr->altstatus_addr = base + 0x160;
996 ioaddr->ctl_addr = base + 0x160;
997 ioaddr->bmdma_addr = dma;
998}
999
1000static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
1001 struct ata_port_info *pinfo)
1002{
1003 int i = 0;
1004
1005 pinfo->pio_mask = 0;
1006 pinfo->mwdma_mask = 0;
1007 pinfo->udma_mask = 0;
1008
1009 while (priv->timings[i].mode > 0) {
1010 unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
1011 switch(priv->timings[i].mode & 0xf0) {
1012 case 0x00: /* PIO */
1013 pinfo->pio_mask |= (mask >> 8);
1014 break;
1015 case 0x20: /* MWDMA */
1016 pinfo->mwdma_mask |= mask;
1017 break;
1018 case 0x40: /* UDMA */
1019 pinfo->udma_mask |= mask;
1020 break;
1021 }
1022 i++;
1023 }
1024 dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n",
1025 pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
1026}
1027
1028static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
1029 resource_size_t tfregs,
1030 resource_size_t dmaregs,
1031 resource_size_t fcregs,
1032 unsigned long irq)
1033{
1034 struct ata_port_info pinfo;
1035 const struct ata_port_info *ppi[] = { &pinfo, NULL };
1036 void __iomem *dma_regs = NULL;
1037
1038 /* Fill up privates with various invariants collected from the
1039 * device-tree
1040 */
1041 pata_macio_invariants(priv);
1042
1043 /* Make sure we have sane initial timings in the cache */
1044 pata_macio_default_timings(priv);
1045
1046 /* Not sure what the real max is but we know it's less than 64K, let's
1047 * use 64K minus 256
1048 */
1049 dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
1050
1051 /* Allocate libata host for 1 port */
1052 memset(&pinfo, 0, sizeof(struct ata_port_info));
1053 pmac_macio_calc_timing_masks(priv, &pinfo);
1054 pinfo.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO |
1055 ATA_FLAG_NO_LEGACY;
1056 pinfo.port_ops = &pata_macio_ops;
1057 pinfo.private_data = priv;
1058
1059 priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
1060 if (priv->host == NULL) {
1061 dev_err(priv->dev, "Failed to allocate ATA port structure\n");
1062 return -ENOMEM;
1063 }
1064
1065 /* Setup the private data in host too */
1066 priv->host->private_data = priv;
1067
1068 /* Map base registers */
1069 priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
1070 if (priv->tfregs == NULL) {
1071 dev_err(priv->dev, "Failed to map ATA ports\n");
1072 return -ENOMEM;
1073 }
1074 priv->host->iomap = &priv->tfregs;
1075
1076 /* Map DMA regs */
1077 if (dmaregs != 0) {
1078 dma_regs = devm_ioremap(priv->dev, dmaregs,
1079 sizeof(struct dbdma_regs));
1080 if (dma_regs == NULL)
1081 dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
1082 }
1083
1084 /* If chip has local feature control, map those regs too */
1085 if (fcregs != 0) {
1086 priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
1087 if (priv->kauai_fcr == NULL) {
1088 dev_err(priv->dev, "Failed to map ATA FCR register\n");
1089 return -ENOMEM;
1090 }
1091 }
1092
1093 /* Setup port data structure */
1094 pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
1095 priv->tfregs, dma_regs);
1096 priv->host->ports[0]->private_data = priv;
1097
1098 /* hard-reset the controller */
1099 pata_macio_reset_hw(priv, 0);
1100 pata_macio_apply_timings(priv->host->ports[0], 0);
1101
1102 /* Enable bus master if necessary */
1103 if (priv->pdev && dma_regs)
1104 pci_set_master(priv->pdev);
1105
1106 dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
1107 macio_ata_names[priv->kind], priv->aapl_bus_id);
1108
1109 /* Start it up */
1110 priv->irq = irq;
1111 return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0,
1112 &pata_macio_sht);
1113}
1114
1115static int __devinit pata_macio_attach(struct macio_dev *mdev,
1116 const struct of_device_id *match)
1117{
1118 struct pata_macio_priv *priv;
1119 resource_size_t tfregs, dmaregs = 0;
1120 unsigned long irq;
1121 int rc;
1122
1123 /* Check for broken device-trees */
1124 if (macio_resource_count(mdev) == 0) {
1125 dev_err(&mdev->ofdev.dev,
1126 "No addresses for controller\n");
1127 return -ENXIO;
1128 }
1129
1130 /* Enable managed resources */
1131 macio_enable_devres(mdev);
1132
1133 /* Allocate and init private data structure */
1134 priv = devm_kzalloc(&mdev->ofdev.dev,
1135 sizeof(struct pata_macio_priv), GFP_KERNEL);
1136 if (priv == NULL) {
1137 dev_err(&mdev->ofdev.dev,
1138 "Failed to allocate private memory\n");
1139 return -ENOMEM;
1140 }
1141 priv->node = of_node_get(mdev->ofdev.node);
1142 priv->mdev = mdev;
1143 priv->dev = &mdev->ofdev.dev;
1144
1145 /* Request memory resource for taskfile registers */
1146 if (macio_request_resource(mdev, 0, "pata-macio")) {
1147 dev_err(&mdev->ofdev.dev,
1148 "Cannot obtain taskfile resource\n");
1149 return -EBUSY;
1150 }
1151 tfregs = macio_resource_start(mdev, 0);
1152
1153 /* Request resources for DMA registers if any */
1154 if (macio_resource_count(mdev) >= 2) {
1155 if (macio_request_resource(mdev, 1, "pata-macio-dma"))
1156 dev_err(&mdev->ofdev.dev,
1157 "Cannot obtain DMA resource\n");
1158 else
1159 dmaregs = macio_resource_start(mdev, 1);
1160 }
1161
1162 /*
1163 * Fixup missing IRQ for some old implementations with broken
1164 * device-trees.
1165 *
1166 * This is a bit bogus, it should be fixed in the device-tree itself,
1167 * via the existing macio fixups, based on the type of interrupt
1168 * controller in the machine. However, I have no test HW for this case,
1169 * and this trick works well enough on those old machines...
1170 */
1171 if (macio_irq_count(mdev) == 0) {
1172 dev_warn(&mdev->ofdev.dev,
1173 "No interrupts for controller, using 13\n");
1174 irq = irq_create_mapping(NULL, 13);
1175 } else
1176 irq = macio_irq(mdev, 0);
1177
1178 /* Prevvent media bay callbacks until fully registered */
1179 lock_media_bay(priv->mdev->media_bay);
1180
1181 /* Get register addresses and call common initialization */
1182 rc = pata_macio_common_init(priv,
1183 tfregs, /* Taskfile regs */
1184 dmaregs, /* DBDMA regs */
1185 0, /* Feature control */
1186 irq);
1187 unlock_media_bay(priv->mdev->media_bay);
1188
1189 return rc;
1190}
1191
1192static int __devexit pata_macio_detach(struct macio_dev *mdev)
1193{
1194 struct ata_host *host = macio_get_drvdata(mdev);
1195 struct pata_macio_priv *priv = host->private_data;
1196
1197 lock_media_bay(priv->mdev->media_bay);
1198
1199 /* Make sure the mediabay callback doesn't try to access
1200 * dead stuff
1201 */
1202 priv->host->private_data = NULL;
1203
1204 ata_host_detach(host);
1205
1206 unlock_media_bay(priv->mdev->media_bay);
1207
1208 return 0;
1209}
1210
1211#ifdef CONFIG_PM
1212
1213static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1214{
1215 struct ata_host *host = macio_get_drvdata(mdev);
1216
1217 return pata_macio_do_suspend(host->private_data, mesg);
1218}
1219
1220static int pata_macio_resume(struct macio_dev *mdev)
1221{
1222 struct ata_host *host = macio_get_drvdata(mdev);
1223
1224 return pata_macio_do_resume(host->private_data);
1225}
1226
1227#endif /* CONFIG_PM */
1228
1229#ifdef CONFIG_PMAC_MEDIABAY
1230static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
1231{
1232 struct ata_host *host = macio_get_drvdata(mdev);
1233 struct ata_port *ap;
1234 struct ata_eh_info *ehi;
1235 struct ata_device *dev;
1236 unsigned long flags;
1237
1238 if (!host || !host->private_data)
1239 return;
1240 ap = host->ports[0];
1241 spin_lock_irqsave(ap->lock, flags);
1242 ehi = &ap->link.eh_info;
1243 if (mb_state == MB_CD) {
1244 ata_ehi_push_desc(ehi, "mediabay plug");
1245 ata_ehi_hotplugged(ehi);
1246 ata_port_freeze(ap);
1247 } else {
1248 ata_ehi_push_desc(ehi, "mediabay unplug");
1249 ata_for_each_dev(dev, &ap->link, ALL)
1250 dev->flags |= ATA_DFLAG_DETACH;
1251 ata_port_abort(ap);
1252 }
1253 spin_unlock_irqrestore(ap->lock, flags);
1254
1255}
1256#endif /* CONFIG_PMAC_MEDIABAY */
1257
1258
1259static int __devinit pata_macio_pci_attach(struct pci_dev *pdev,
1260 const struct pci_device_id *id)
1261{
1262 struct pata_macio_priv *priv;
1263 struct device_node *np;
1264 resource_size_t rbase;
1265
1266 /* We cannot use a MacIO controller without its OF device node */
1267 np = pci_device_to_OF_node(pdev);
1268 if (np == NULL) {
1269 dev_err(&pdev->dev,
1270 "Cannot find OF device node for controller\n");
1271 return -ENODEV;
1272 }
1273
1274 /* Check that it can be enabled */
1275 if (pcim_enable_device(pdev)) {
1276 dev_err(&pdev->dev,
1277 "Cannot enable controller PCI device\n");
1278 return -ENXIO;
1279 }
1280
1281 /* Allocate and init private data structure */
1282 priv = devm_kzalloc(&pdev->dev,
1283 sizeof(struct pata_macio_priv), GFP_KERNEL);
1284 if (priv == NULL) {
1285 dev_err(&pdev->dev,
1286 "Failed to allocate private memory\n");
1287 return -ENOMEM;
1288 }
1289 priv->node = of_node_get(np);
1290 priv->pdev = pdev;
1291 priv->dev = &pdev->dev;
1292
1293 /* Get MMIO regions */
1294 if (pci_request_regions(pdev, "pata-macio")) {
1295 dev_err(&pdev->dev,
1296 "Cannot obtain PCI resources\n");
1297 return -EBUSY;
1298 }
1299
1300 /* Get register addresses and call common initialization */
1301 rbase = pci_resource_start(pdev, 0);
1302 if (pata_macio_common_init(priv,
1303 rbase + 0x2000, /* Taskfile regs */
1304 rbase + 0x1000, /* DBDMA regs */
1305 rbase, /* Feature control */
1306 pdev->irq))
1307 return -ENXIO;
1308
1309 return 0;
1310}
1311
1312static void __devexit pata_macio_pci_detach(struct pci_dev *pdev)
1313{
1314 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1315
1316 ata_host_detach(host);
1317}
1318
1319#ifdef CONFIG_PM
1320
1321static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1322{
1323 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1324
1325 return pata_macio_do_suspend(host->private_data, mesg);
1326}
1327
1328static int pata_macio_pci_resume(struct pci_dev *pdev)
1329{
1330 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1331
1332 return pata_macio_do_resume(host->private_data);
1333}
1334
1335#endif /* CONFIG_PM */
1336
1337static struct of_device_id pata_macio_match[] =
1338{
1339 {
1340 .name = "IDE",
1341 },
1342 {
1343 .name = "ATA",
1344 },
1345 {
1346 .type = "ide",
1347 },
1348 {
1349 .type = "ata",
1350 },
1351 {},
1352};
1353
1354static struct macio_driver pata_macio_driver =
1355{
1356 .name = "pata-macio",
1357 .match_table = pata_macio_match,
1358 .probe = pata_macio_attach,
1359 .remove = pata_macio_detach,
1360#ifdef CONFIG_PM
1361 .suspend = pata_macio_suspend,
1362 .resume = pata_macio_resume,
1363#endif
1364#ifdef CONFIG_PMAC_MEDIABAY
1365 .mediabay_event = pata_macio_mb_event,
1366#endif
1367 .driver = {
1368 .owner = THIS_MODULE,
1369 },
1370};
1371
1372static const struct pci_device_id pata_macio_pci_match[] = {
1373 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 },
1374 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 },
1375 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 },
1376 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 },
1377 { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 },
1378 {},
1379};
1380
1381static struct pci_driver pata_macio_pci_driver = {
1382 .name = "pata-pci-macio",
1383 .id_table = pata_macio_pci_match,
1384 .probe = pata_macio_pci_attach,
1385 .remove = pata_macio_pci_detach,
1386#ifdef CONFIG_PM
1387 .suspend = pata_macio_pci_suspend,
1388 .resume = pata_macio_pci_resume,
1389#endif
1390 .driver = {
1391 .owner = THIS_MODULE,
1392 },
1393};
1394MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
1395
1396
1397static int __init pata_macio_init(void)
1398{
1399 int rc;
1400
1401 if (!machine_is(powermac))
1402 return -ENODEV;
1403
1404 rc = pci_register_driver(&pata_macio_pci_driver);
1405 if (rc)
1406 return rc;
1407 rc = macio_register_driver(&pata_macio_driver);
1408 if (rc) {
1409 pci_unregister_driver(&pata_macio_pci_driver);
1410 return rc;
1411 }
1412 return 0;
1413}
1414
1415static void __exit pata_macio_exit(void)
1416{
1417 macio_unregister_driver(&pata_macio_driver);
1418 pci_unregister_driver(&pata_macio_pci_driver);
1419}
1420
1421module_init(pata_macio_init);
1422module_exit(pata_macio_exit);
1423
1424MODULE_AUTHOR("Benjamin Herrenschmidt");
1425MODULE_DESCRIPTION("Apple MacIO PATA driver");
1426MODULE_LICENSE("GPL");
1427MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index e62a4ccea54d..27fd775375b0 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -35,6 +35,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
35 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 35 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
36 ssize_t ret; 36 ssize_t ret;
37 37
38 cpu_hotplug_driver_lock();
38 switch (buf[0]) { 39 switch (buf[0]) {
39 case '0': 40 case '0':
40 ret = cpu_down(cpu->sysdev.id); 41 ret = cpu_down(cpu->sysdev.id);
@@ -49,6 +50,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
49 default: 50 default:
50 ret = -EINVAL; 51 ret = -EINVAL;
51 } 52 }
53 cpu_hotplug_driver_unlock();
52 54
53 if (ret >= 0) 55 if (ret >= 0)
54 ret = count; 56 ret = count;
@@ -72,6 +74,38 @@ void unregister_cpu(struct cpu *cpu)
72 per_cpu(cpu_sys_devices, logical_cpu) = NULL; 74 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
73 return; 75 return;
74} 76}
77
78#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
79static ssize_t cpu_probe_store(struct class *class, const char *buf,
80 size_t count)
81{
82 return arch_cpu_probe(buf, count);
83}
84
85static ssize_t cpu_release_store(struct class *class, const char *buf,
86 size_t count)
87{
88 return arch_cpu_release(buf, count);
89}
90
91static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
92static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
93
94int __init cpu_probe_release_init(void)
95{
96 int rc;
97
98 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
99 &class_attr_probe.attr);
100 if (!rc)
101 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
102 &class_attr_release.attr);
103
104 return rc;
105}
106device_initcall(cpu_probe_release_init);
107#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
108
75#else /* ... !CONFIG_HOTPLUG_CPU */ 109#else /* ... !CONFIG_HOTPLUG_CPU */
76static inline void register_cpu_control(struct cpu *cpu) 110static inline void register_cpu_control(struct cpu *cpu)
77{ 111{
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 6380ad8d91bd..59ca2b77b574 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -200,7 +200,7 @@ struct floppy_state {
200 int ejected; 200 int ejected;
201 wait_queue_head_t wait; 201 wait_queue_head_t wait;
202 int wanted; 202 int wanted;
203 struct device_node* media_bay; /* NULL when not in bay */ 203 struct macio_dev *mdev;
204 char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; 204 char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
205}; 205};
206 206
@@ -303,14 +303,13 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
303static void do_fd_request(struct request_queue * q) 303static void do_fd_request(struct request_queue * q)
304{ 304{
305 int i; 305 int i;
306 for(i=0;i<floppy_count;i++) 306
307 { 307 for(i=0; i<floppy_count; i++) {
308#ifdef CONFIG_PMAC_MEDIABAY 308 struct floppy_state *fs = &floppy_states[i];
309 if (floppy_states[i].media_bay && 309 if (fs->mdev->media_bay &&
310 check_media_bay(floppy_states[i].media_bay, MB_FD)) 310 check_media_bay(fs->mdev->media_bay) != MB_FD)
311 continue; 311 continue;
312#endif /* CONFIG_PMAC_MEDIABAY */ 312 start_request(fs);
313 start_request(&floppy_states[i]);
314 } 313 }
315} 314}
316 315
@@ -849,10 +848,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
849 if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)) 848 if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
850 return -EPERM; 849 return -EPERM;
851 850
852#ifdef CONFIG_PMAC_MEDIABAY 851 if (fs->mdev->media_bay &&
853 if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) 852 check_media_bay(fs->mdev->media_bay) != MB_FD)
854 return -ENXIO; 853 return -ENXIO;
855#endif
856 854
857 switch (cmd) { 855 switch (cmd) {
858 case FDEJECT: 856 case FDEJECT:
@@ -876,10 +874,9 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
876 int n, err = 0; 874 int n, err = 0;
877 875
878 if (fs->ref_count == 0) { 876 if (fs->ref_count == 0) {
879#ifdef CONFIG_PMAC_MEDIABAY 877 if (fs->mdev->media_bay &&
880 if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) 878 check_media_bay(fs->mdev->media_bay) != MB_FD)
881 return -ENXIO; 879 return -ENXIO;
882#endif
883 out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2); 880 out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
884 out_8(&sw->control_bic, 0xff); 881 out_8(&sw->control_bic, 0xff);
885 out_8(&sw->mode, 0x95); 882 out_8(&sw->mode, 0x95);
@@ -963,10 +960,9 @@ static int floppy_revalidate(struct gendisk *disk)
963 struct swim3 __iomem *sw; 960 struct swim3 __iomem *sw;
964 int ret, n; 961 int ret, n;
965 962
966#ifdef CONFIG_PMAC_MEDIABAY 963 if (fs->mdev->media_bay &&
967 if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) 964 check_media_bay(fs->mdev->media_bay) != MB_FD)
968 return -ENXIO; 965 return -ENXIO;
969#endif
970 966
971 sw = fs->swim3; 967 sw = fs->swim3;
972 grab_drive(fs, revalidating, 0); 968 grab_drive(fs, revalidating, 0);
@@ -1009,7 +1005,6 @@ static const struct block_device_operations floppy_fops = {
1009static int swim3_add_device(struct macio_dev *mdev, int index) 1005static int swim3_add_device(struct macio_dev *mdev, int index)
1010{ 1006{
1011 struct device_node *swim = mdev->ofdev.node; 1007 struct device_node *swim = mdev->ofdev.node;
1012 struct device_node *mediabay;
1013 struct floppy_state *fs = &floppy_states[index]; 1008 struct floppy_state *fs = &floppy_states[index];
1014 int rc = -EBUSY; 1009 int rc = -EBUSY;
1015 1010
@@ -1036,9 +1031,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1036 } 1031 }
1037 dev_set_drvdata(&mdev->ofdev.dev, fs); 1032 dev_set_drvdata(&mdev->ofdev.dev, fs);
1038 1033
1039 mediabay = (strcasecmp(swim->parent->type, "media-bay") == 0) ? 1034 if (mdev->media_bay == NULL)
1040 swim->parent : NULL;
1041 if (mediabay == NULL)
1042 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); 1035 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
1043 1036
1044 memset(fs, 0, sizeof(*fs)); 1037 memset(fs, 0, sizeof(*fs));
@@ -1068,7 +1061,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1068 fs->secpercyl = 36; 1061 fs->secpercyl = 36;
1069 fs->secpertrack = 18; 1062 fs->secpertrack = 18;
1070 fs->total_secs = 2880; 1063 fs->total_secs = 2880;
1071 fs->media_bay = mediabay; 1064 fs->mdev = mdev;
1072 init_waitqueue_head(&fs->wait); 1065 init_waitqueue_head(&fs->wait);
1073 1066
1074 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); 1067 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
@@ -1093,7 +1086,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
1093 init_timer(&fs->timeout); 1086 init_timer(&fs->timeout);
1094 1087
1095 printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, 1088 printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
1096 mediabay ? "in media bay" : ""); 1089 mdev->media_bay ? "in media bay" : "");
1097 1090
1098 return 0; 1091 return 0;
1099 1092
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 703959eba45a..d89da4ac061f 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -144,16 +144,13 @@ static int uninorth_configure(void)
144 return 0; 144 return 0;
145} 145}
146 146
147static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, 147static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
148 int type)
149{ 148{
150 int i, j, num_entries; 149 int i, num_entries;
151 void *temp; 150 void *temp;
151 u32 *gp;
152 int mask_type; 152 int mask_type;
153 153
154 temp = agp_bridge->current_size;
155 num_entries = A_SIZE_32(temp)->num_entries;
156
157 if (type != mem->type) 154 if (type != mem->type)
158 return -EINVAL; 155 return -EINVAL;
159 156
@@ -163,49 +160,12 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
163 return -EINVAL; 160 return -EINVAL;
164 } 161 }
165 162
166 if ((pg_start + mem->page_count) > num_entries) 163 if (mem->page_count == 0)
167 return -EINVAL; 164 return 0;
168
169 j = pg_start;
170
171 while (j < (pg_start + mem->page_count)) {
172 if (agp_bridge->gatt_table[j])
173 return -EBUSY;
174 j++;
175 }
176
177 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
178 agp_bridge->gatt_table[j] =
179 cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | 0x1UL);
180 flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
181 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
182 }
183 (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]);
184 mb();
185
186 uninorth_tlbflush(mem);
187 return 0;
188}
189
190static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
191{
192 int i, num_entries;
193 void *temp;
194 u32 *gp;
195 int mask_type;
196 165
197 temp = agp_bridge->current_size; 166 temp = agp_bridge->current_size;
198 num_entries = A_SIZE_32(temp)->num_entries; 167 num_entries = A_SIZE_32(temp)->num_entries;
199 168
200 if (type != mem->type)
201 return -EINVAL;
202
203 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
204 if (mask_type != 0) {
205 /* We know nothing of memory types */
206 return -EINVAL;
207 }
208
209 if ((pg_start + mem->page_count) > num_entries) 169 if ((pg_start + mem->page_count) > num_entries)
210 return -EINVAL; 170 return -EINVAL;
211 171
@@ -213,14 +173,18 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
213 for (i = 0; i < mem->page_count; ++i) { 173 for (i = 0; i < mem->page_count; ++i) {
214 if (gp[i]) { 174 if (gp[i]) {
215 dev_info(&agp_bridge->dev->dev, 175 dev_info(&agp_bridge->dev->dev,
216 "u3_insert_memory: entry 0x%x occupied (%x)\n", 176 "uninorth_insert_memory: entry 0x%x occupied (%x)\n",
217 i, gp[i]); 177 i, gp[i]);
218 return -EBUSY; 178 return -EBUSY;
219 } 179 }
220 } 180 }
221 181
222 for (i = 0; i < mem->page_count; i++) { 182 for (i = 0; i < mem->page_count; i++) {
223 gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL; 183 if (is_u3)
184 gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL;
185 else
186 gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) |
187 0x1UL);
224 flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])), 188 flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
225 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); 189 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
226 } 190 }
@@ -230,14 +194,23 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
230 return 0; 194 return 0;
231} 195}
232 196
233int u3_remove_memory(struct agp_memory *mem, off_t pg_start, int type) 197int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
234{ 198{
235 size_t i; 199 size_t i;
236 u32 *gp; 200 u32 *gp;
201 int mask_type;
202
203 if (type != mem->type)
204 return -EINVAL;
237 205
238 if (type != 0 || mem->type != 0) 206 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
207 if (mask_type != 0) {
239 /* We know nothing of memory types */ 208 /* We know nothing of memory types */
240 return -EINVAL; 209 return -EINVAL;
210 }
211
212 if (mem->page_count == 0)
213 return 0;
241 214
242 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; 215 gp = (u32 *) &agp_bridge->gatt_table[pg_start];
243 for (i = 0; i < mem->page_count; ++i) 216 for (i = 0; i < mem->page_count; ++i)
@@ -536,7 +509,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
536 .create_gatt_table = uninorth_create_gatt_table, 509 .create_gatt_table = uninorth_create_gatt_table,
537 .free_gatt_table = uninorth_free_gatt_table, 510 .free_gatt_table = uninorth_free_gatt_table,
538 .insert_memory = uninorth_insert_memory, 511 .insert_memory = uninorth_insert_memory,
539 .remove_memory = agp_generic_remove_memory, 512 .remove_memory = uninorth_remove_memory,
540 .alloc_by_type = agp_generic_alloc_by_type, 513 .alloc_by_type = agp_generic_alloc_by_type,
541 .free_by_type = agp_generic_free_by_type, 514 .free_by_type = agp_generic_free_by_type,
542 .agp_alloc_page = agp_generic_alloc_page, 515 .agp_alloc_page = agp_generic_alloc_page,
@@ -562,8 +535,8 @@ const struct agp_bridge_driver u3_agp_driver = {
562 .agp_enable = uninorth_agp_enable, 535 .agp_enable = uninorth_agp_enable,
563 .create_gatt_table = uninorth_create_gatt_table, 536 .create_gatt_table = uninorth_create_gatt_table,
564 .free_gatt_table = uninorth_free_gatt_table, 537 .free_gatt_table = uninorth_free_gatt_table,
565 .insert_memory = u3_insert_memory, 538 .insert_memory = uninorth_insert_memory,
566 .remove_memory = u3_remove_memory, 539 .remove_memory = uninorth_remove_memory,
567 .alloc_by_type = agp_generic_alloc_by_type, 540 .alloc_by_type = agp_generic_alloc_by_type,
568 .free_by_type = agp_generic_free_by_type, 541 .free_by_type = agp_generic_free_by_type,
569 .agp_alloc_page = agp_generic_alloc_page, 542 .agp_alloc_page = agp_generic_alloc_page,
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index a632f25f144a..416d3423150d 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -832,6 +832,7 @@ int hvc_remove(struct hvc_struct *hp)
832 tty_hangup(tty); 832 tty_hangup(tty);
833 return 0; 833 return 0;
834} 834}
835EXPORT_SYMBOL_GPL(hvc_remove);
835 836
836/* Driver initialization: called as soon as someone uses hvc_alloc(). */ 837/* Driver initialization: called as soon as someone uses hvc_alloc(). */
837static int hvc_init(void) 838static int hvc_init(void)
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 97642a7a79c4..7a4e788cab2f 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -43,10 +43,7 @@
43#include <asm/pmac_feature.h> 43#include <asm/pmac_feature.h>
44#include <asm/sections.h> 44#include <asm/sections.h>
45#include <asm/irq.h> 45#include <asm/irq.h>
46
47#ifndef CONFIG_PPC64
48#include <asm/mediabay.h> 46#include <asm/mediabay.h>
49#endif
50 47
51#define DRV_NAME "ide-pmac" 48#define DRV_NAME "ide-pmac"
52 49
@@ -59,13 +56,14 @@ typedef struct pmac_ide_hwif {
59 int irq; 56 int irq;
60 int kind; 57 int kind;
61 int aapl_bus_id; 58 int aapl_bus_id;
62 unsigned mediabay : 1;
63 unsigned broken_dma : 1; 59 unsigned broken_dma : 1;
64 unsigned broken_dma_warn : 1; 60 unsigned broken_dma_warn : 1;
65 struct device_node* node; 61 struct device_node* node;
66 struct macio_dev *mdev; 62 struct macio_dev *mdev;
67 u32 timings[4]; 63 u32 timings[4];
68 volatile u32 __iomem * *kauai_fcr; 64 volatile u32 __iomem * *kauai_fcr;
65 ide_hwif_t *hwif;
66
69 /* Those fields are duplicating what is in hwif. We currently 67 /* Those fields are duplicating what is in hwif. We currently
70 * can't use the hwif ones because of some assumptions that are 68 * can't use the hwif ones because of some assumptions that are
71 * beeing done by the generic code about the kind of dma controller 69 * beeing done by the generic code about the kind of dma controller
@@ -854,6 +852,11 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
854 pmif->timings[2] = pmif->timings[3] = value2; 852 pmif->timings[2] = pmif->timings[3] = value2;
855} 853}
856 854
855static int on_media_bay(pmac_ide_hwif_t *pmif)
856{
857 return pmif->mdev && pmif->mdev->media_bay != NULL;
858}
859
857/* Suspend call back, should be called after the child devices 860/* Suspend call back, should be called after the child devices
858 * have actually been suspended 861 * have actually been suspended
859 */ 862 */
@@ -866,7 +869,7 @@ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
866 disable_irq(pmif->irq); 869 disable_irq(pmif->irq);
867 870
868 /* The media bay will handle itself just fine */ 871 /* The media bay will handle itself just fine */
869 if (pmif->mediabay) 872 if (on_media_bay(pmif))
870 return 0; 873 return 0;
871 874
872 /* Kauai has bus control FCRs directly here */ 875 /* Kauai has bus control FCRs directly here */
@@ -889,7 +892,7 @@ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
889static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif) 892static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
890{ 893{
891 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ 894 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
892 if (!pmif->mediabay) { 895 if (!on_media_bay(pmif)) {
893 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); 896 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
894 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1); 897 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
895 msleep(10); 898 msleep(10);
@@ -950,13 +953,11 @@ static void pmac_ide_init_dev(ide_drive_t *drive)
950 pmac_ide_hwif_t *pmif = 953 pmac_ide_hwif_t *pmif =
951 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); 954 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
952 955
953 if (pmif->mediabay) { 956 if (on_media_bay(pmif)) {
954#ifdef CONFIG_PMAC_MEDIABAY 957 if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
955 if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) {
956 drive->dev_flags &= ~IDE_DFLAG_NOPROBE; 958 drive->dev_flags &= ~IDE_DFLAG_NOPROBE;
957 return; 959 return;
958 } 960 }
959#endif
960 drive->dev_flags |= IDE_DFLAG_NOPROBE; 961 drive->dev_flags |= IDE_DFLAG_NOPROBE;
961 } 962 }
962} 963}
@@ -1072,26 +1073,23 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
1072 writel(KAUAI_FCR_UATA_MAGIC | 1073 writel(KAUAI_FCR_UATA_MAGIC |
1073 KAUAI_FCR_UATA_RESET_N | 1074 KAUAI_FCR_UATA_RESET_N |
1074 KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr); 1075 KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr);
1075
1076 pmif->mediabay = 0;
1077 1076
1078 /* Make sure we have sane timings */ 1077 /* Make sure we have sane timings */
1079 sanitize_timings(pmif); 1078 sanitize_timings(pmif);
1080 1079
1080 /* If we are on a media bay, wait for it to settle and lock it */
1081 if (pmif->mdev)
1082 lock_media_bay(pmif->mdev->media_bay);
1083
1081 host = ide_host_alloc(&d, hws, 1); 1084 host = ide_host_alloc(&d, hws, 1);
1082 if (host == NULL) 1085 if (host == NULL) {
1083 return -ENOMEM; 1086 rc = -ENOMEM;
1084 hwif = host->ports[0]; 1087 goto bail;
1088 }
1089 hwif = pmif->hwif = host->ports[0];
1085 1090
1086#ifndef CONFIG_PPC64 1091 if (on_media_bay(pmif)) {
1087 /* XXX FIXME: Media bay stuff need re-organizing */ 1092 /* Fixup bus ID for media bay */
1088 if (np->parent && np->parent->name
1089 && strcasecmp(np->parent->name, "media-bay") == 0) {
1090#ifdef CONFIG_PMAC_MEDIABAY
1091 media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq,
1092 hwif);
1093#endif /* CONFIG_PMAC_MEDIABAY */
1094 pmif->mediabay = 1;
1095 if (!bidp) 1093 if (!bidp)
1096 pmif->aapl_bus_id = 1; 1094 pmif->aapl_bus_id = 1;
1097 } else if (pmif->kind == controller_ohare) { 1095 } else if (pmif->kind == controller_ohare) {
@@ -1100,9 +1098,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
1100 * units, I keep the old way 1098 * units, I keep the old way
1101 */ 1099 */
1102 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1); 1100 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
1103 } else 1101 } else {
1104#endif
1105 {
1106 /* This is necessary to enable IDE when net-booting */ 1102 /* This is necessary to enable IDE when net-booting */
1107 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1); 1103 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
1108 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1); 1104 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
@@ -1112,17 +1108,21 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
1112 } 1108 }
1113 1109
1114 printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), " 1110 printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
1115 "bus ID %d%s, irq %d\n", model_name[pmif->kind], 1111 "bus ID %d%s, irq %d\n", model_name[pmif->kind],
1116 pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id, 1112 pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
1117 pmif->mediabay ? " (mediabay)" : "", hw->irq); 1113 on_media_bay(pmif) ? " (mediabay)" : "", hw->irq);
1118 1114
1119 rc = ide_host_register(host, &d, hws); 1115 rc = ide_host_register(host, &d, hws);
1120 if (rc) { 1116 if (rc)
1121 ide_host_free(host); 1117 pmif->hwif = NULL;
1122 return rc;
1123 }
1124 1118
1125 return 0; 1119 if (pmif->mdev)
1120 unlock_media_bay(pmif->mdev->media_bay);
1121
1122 bail:
1123 if (rc && host)
1124 ide_host_free(host);
1125 return rc;
1126} 1126}
1127 1127
1128static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base) 1128static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
@@ -1362,6 +1362,25 @@ pmac_ide_pci_resume(struct pci_dev *pdev)
1362 return rc; 1362 return rc;
1363} 1363}
1364 1364
1365#ifdef CONFIG_PMAC_MEDIABAY
1366static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
1367{
1368 pmac_ide_hwif_t *pmif =
1369 (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1370
1371 switch(mb_state) {
1372 case MB_CD:
1373 if (!pmif->hwif->present)
1374 ide_port_scan(pmif->hwif);
1375 break;
1376 default:
1377 if (pmif->hwif->present)
1378 ide_port_unregister_devices(pmif->hwif);
1379 }
1380}
1381#endif /* CONFIG_PMAC_MEDIABAY */
1382
1383
1365static struct of_device_id pmac_ide_macio_match[] = 1384static struct of_device_id pmac_ide_macio_match[] =
1366{ 1385{
1367 { 1386 {
@@ -1386,6 +1405,9 @@ static struct macio_driver pmac_ide_macio_driver =
1386 .probe = pmac_ide_macio_attach, 1405 .probe = pmac_ide_macio_attach,
1387 .suspend = pmac_ide_macio_suspend, 1406 .suspend = pmac_ide_macio_suspend,
1388 .resume = pmac_ide_macio_resume, 1407 .resume = pmac_ide_macio_resume,
1408#ifdef CONFIG_PMAC_MEDIABAY
1409 .mediabay_event = pmac_ide_macio_mb_event,
1410#endif
1389}; 1411};
1390 1412
1391static const struct pci_device_id pmac_ide_pci_match[] = { 1413static const struct pci_device_id pmac_ide_pci_match[] = {
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 588a5b0bc4b5..26a303a1d1ab 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -379,6 +379,11 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
379 dev->ofdev.dev.parent = parent; 379 dev->ofdev.dev.parent = parent;
380 dev->ofdev.dev.bus = &macio_bus_type; 380 dev->ofdev.dev.bus = &macio_bus_type;
381 dev->ofdev.dev.release = macio_release_dev; 381 dev->ofdev.dev.release = macio_release_dev;
382 dev->ofdev.dev.dma_parms = &dev->dma_parms;
383
384 /* Standard DMA paremeters */
385 dma_set_max_seg_size(&dev->ofdev.dev, 65536);
386 dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff);
382 387
383#ifdef CONFIG_PCI 388#ifdef CONFIG_PCI
384 /* Set the DMA ops to the ones from the PCI device, this could be 389 /* Set the DMA ops to the ones from the PCI device, this could be
@@ -538,6 +543,42 @@ void macio_unregister_driver(struct macio_driver *drv)
538 driver_unregister(&drv->driver); 543 driver_unregister(&drv->driver);
539} 544}
540 545
546/* Managed MacIO resources */
547struct macio_devres {
548 u32 res_mask;
549};
550
551static void maciom_release(struct device *gendev, void *res)
552{
553 struct macio_dev *dev = to_macio_device(gendev);
554 struct macio_devres *dr = res;
555 int i, max;
556
557 max = min(dev->n_resources, 32);
558 for (i = 0; i < max; i++) {
559 if (dr->res_mask & (1 << i))
560 macio_release_resource(dev, i);
561 }
562}
563
564int macio_enable_devres(struct macio_dev *dev)
565{
566 struct macio_devres *dr;
567
568 dr = devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL);
569 if (!dr) {
570 dr = devres_alloc(maciom_release, sizeof(*dr), GFP_KERNEL);
571 if (!dr)
572 return -ENOMEM;
573 }
574 return devres_get(&dev->ofdev.dev, dr, NULL, NULL) != NULL;
575}
576
577static struct macio_devres * find_macio_dr(struct macio_dev *dev)
578{
579 return devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL);
580}
581
541/** 582/**
542 * macio_request_resource - Request an MMIO resource 583 * macio_request_resource - Request an MMIO resource
543 * @dev: pointer to the device holding the resource 584 * @dev: pointer to the device holding the resource
@@ -555,6 +596,8 @@ void macio_unregister_driver(struct macio_driver *drv)
555int macio_request_resource(struct macio_dev *dev, int resource_no, 596int macio_request_resource(struct macio_dev *dev, int resource_no,
556 const char *name) 597 const char *name)
557{ 598{
599 struct macio_devres *dr = find_macio_dr(dev);
600
558 if (macio_resource_len(dev, resource_no) == 0) 601 if (macio_resource_len(dev, resource_no) == 0)
559 return 0; 602 return 0;
560 603
@@ -562,6 +605,9 @@ int macio_request_resource(struct macio_dev *dev, int resource_no,
562 macio_resource_len(dev, resource_no), 605 macio_resource_len(dev, resource_no),
563 name)) 606 name))
564 goto err_out; 607 goto err_out;
608
609 if (dr && resource_no < 32)
610 dr->res_mask |= 1 << resource_no;
565 611
566 return 0; 612 return 0;
567 613
@@ -582,10 +628,14 @@ err_out:
582 */ 628 */
583void macio_release_resource(struct macio_dev *dev, int resource_no) 629void macio_release_resource(struct macio_dev *dev, int resource_no)
584{ 630{
631 struct macio_devres *dr = find_macio_dr(dev);
632
585 if (macio_resource_len(dev, resource_no) == 0) 633 if (macio_resource_len(dev, resource_no) == 0)
586 return; 634 return;
587 release_mem_region(macio_resource_start(dev, resource_no), 635 release_mem_region(macio_resource_start(dev, resource_no),
588 macio_resource_len(dev, resource_no)); 636 macio_resource_len(dev, resource_no));
637 if (dr && resource_no < 32)
638 dr->res_mask &= ~(1 << resource_no);
589} 639}
590 640
591/** 641/**
@@ -744,3 +794,5 @@ EXPORT_SYMBOL(macio_request_resource);
744EXPORT_SYMBOL(macio_release_resource); 794EXPORT_SYMBOL(macio_release_resource);
745EXPORT_SYMBOL(macio_request_resources); 795EXPORT_SYMBOL(macio_request_resources);
746EXPORT_SYMBOL(macio_release_resources); 796EXPORT_SYMBOL(macio_release_resources);
797EXPORT_SYMBOL(macio_enable_devres);
798
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 029ad8ce8a7e..08002b88f342 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -33,15 +33,6 @@
33#include <linux/adb.h> 33#include <linux/adb.h>
34#include <linux/pmu.h> 34#include <linux/pmu.h>
35 35
36
37#define MB_DEBUG
38
39#ifdef MB_DEBUG
40#define MBDBG(fmt, arg...) printk(KERN_INFO fmt , ## arg)
41#else
42#define MBDBG(fmt, arg...) do { } while (0)
43#endif
44
45#define MB_FCR32(bay, r) ((bay)->base + ((r) >> 2)) 36#define MB_FCR32(bay, r) ((bay)->base + ((r) >> 2))
46#define MB_FCR8(bay, r) (((volatile u8 __iomem *)((bay)->base)) + (r)) 37#define MB_FCR8(bay, r) (((volatile u8 __iomem *)((bay)->base)) + (r))
47 38
@@ -76,28 +67,14 @@ struct media_bay_info {
76 int index; 67 int index;
77 int cached_gpio; 68 int cached_gpio;
78 int sleeping; 69 int sleeping;
70 int user_lock;
79 struct mutex lock; 71 struct mutex lock;
80#ifdef CONFIG_BLK_DEV_IDE_PMAC
81 ide_hwif_t *cd_port;
82 void __iomem *cd_base;
83 int cd_irq;
84 int cd_retry;
85#endif
86#if defined(CONFIG_BLK_DEV_IDE_PMAC)
87 int cd_index;
88#endif
89}; 72};
90 73
91#define MAX_BAYS 2 74#define MAX_BAYS 2
92 75
93static struct media_bay_info media_bays[MAX_BAYS]; 76static struct media_bay_info media_bays[MAX_BAYS];
94int media_bay_count = 0; 77static int media_bay_count = 0;
95
96#ifdef CONFIG_BLK_DEV_IDE_PMAC
97/* check the busy bit in the media-bay ide interface
98 (assumes the media-bay contains an ide device) */
99#define MB_IDE_READY(i) ((readb(media_bays[i].cd_base + 0x70) & 0x80) == 0)
100#endif
101 78
102/* 79/*
103 * Wait that number of ms between each step in normal polling mode 80 * Wait that number of ms between each step in normal polling mode
@@ -130,21 +107,11 @@ int media_bay_count = 0;
130 107
131/* 108/*
132 * Wait this many ticks after an IDE device (e.g. CD-ROM) is inserted 109 * Wait this many ticks after an IDE device (e.g. CD-ROM) is inserted
133 * (or until the device is ready) before waiting for busy bit to disappear 110 * (or until the device is ready) before calling into the driver
134 */ 111 */
135#define MB_IDE_WAIT 1000 112#define MB_IDE_WAIT 1000
136 113
137/* 114/*
138 * Timeout waiting for busy bit of an IDE device to go down
139 */
140#define MB_IDE_TIMEOUT 5000
141
142/*
143 * Max retries of the full power up/down sequence for an IDE device
144 */
145#define MAX_CD_RETRIES 3
146
147/*
148 * States of a media bay 115 * States of a media bay
149 */ 116 */
150enum { 117enum {
@@ -153,7 +120,6 @@ enum {
153 mb_enabling_bay, /* enable bits set, waiting MB_RESET_DELAY */ 120 mb_enabling_bay, /* enable bits set, waiting MB_RESET_DELAY */
154 mb_resetting, /* reset bit unset, waiting MB_SETUP_DELAY */ 121 mb_resetting, /* reset bit unset, waiting MB_SETUP_DELAY */
155 mb_ide_resetting, /* IDE reset bit unser, waiting MB_IDE_WAIT */ 122 mb_ide_resetting, /* IDE reset bit unser, waiting MB_IDE_WAIT */
156 mb_ide_waiting, /* Waiting for BUSY bit to go away until MB_IDE_TIMEOUT */
157 mb_up, /* Media bay full */ 123 mb_up, /* Media bay full */
158 mb_powering_down /* Powering down (avoid too fast down/up) */ 124 mb_powering_down /* Powering down (avoid too fast down/up) */
159}; 125};
@@ -373,12 +339,12 @@ static inline void set_mb_power(struct media_bay_info* bay, int onoff)
373 if (onoff) { 339 if (onoff) {
374 bay->ops->power(bay, 1); 340 bay->ops->power(bay, 1);
375 bay->state = mb_powering_up; 341 bay->state = mb_powering_up;
376 MBDBG("mediabay%d: powering up\n", bay->index); 342 pr_debug("mediabay%d: powering up\n", bay->index);
377 } else { 343 } else {
378 /* Make sure everything is powered down & disabled */ 344 /* Make sure everything is powered down & disabled */
379 bay->ops->power(bay, 0); 345 bay->ops->power(bay, 0);
380 bay->state = mb_powering_down; 346 bay->state = mb_powering_down;
381 MBDBG("mediabay%d: powering down\n", bay->index); 347 pr_debug("mediabay%d: powering down\n", bay->index);
382 } 348 }
383 bay->timer = msecs_to_jiffies(MB_POWER_DELAY); 349 bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
384} 350}
@@ -387,107 +353,118 @@ static void poll_media_bay(struct media_bay_info* bay)
387{ 353{
388 int id = bay->ops->content(bay); 354 int id = bay->ops->content(bay);
389 355
390 if (id == bay->last_value) { 356 static char *mb_content_types[] = {
391 if (id != bay->content_id) { 357 "a floppy drive",
392 bay->value_count += msecs_to_jiffies(MB_POLL_DELAY); 358 "a floppy drive",
393 if (bay->value_count >= msecs_to_jiffies(MB_STABLE_DELAY)) { 359 "an unsuported audio device",
394 /* If the device type changes without going thru 360 "an ATA device",
395 * "MB_NO", we force a pass by "MB_NO" to make sure 361 "an unsupported PCI device",
396 * things are properly reset 362 "an unknown device",
397 */ 363 };
398 if ((id != MB_NO) && (bay->content_id != MB_NO)) { 364
399 id = MB_NO; 365 if (id != bay->last_value) {
400 MBDBG("mediabay%d: forcing MB_NO\n", bay->index);
401 }
402 MBDBG("mediabay%d: switching to %d\n", bay->index, id);
403 set_mb_power(bay, id != MB_NO);
404 bay->content_id = id;
405 if (id == MB_NO) {
406#ifdef CONFIG_BLK_DEV_IDE_PMAC
407 bay->cd_retry = 0;
408#endif
409 printk(KERN_INFO "media bay %d is empty\n", bay->index);
410 }
411 }
412 }
413 } else {
414 bay->last_value = id; 366 bay->last_value = id;
415 bay->value_count = 0; 367 bay->value_count = 0;
368 return;
369 }
370 if (id == bay->content_id)
371 return;
372
373 bay->value_count += msecs_to_jiffies(MB_POLL_DELAY);
374 if (bay->value_count >= msecs_to_jiffies(MB_STABLE_DELAY)) {
375 /* If the device type changes without going thru
376 * "MB_NO", we force a pass by "MB_NO" to make sure
377 * things are properly reset
378 */
379 if ((id != MB_NO) && (bay->content_id != MB_NO)) {
380 id = MB_NO;
381 pr_debug("mediabay%d: forcing MB_NO\n", bay->index);
382 }
383 pr_debug("mediabay%d: switching to %d\n", bay->index, id);
384 set_mb_power(bay, id != MB_NO);
385 bay->content_id = id;
386 if (id >= MB_NO || id < 0)
387 printk(KERN_INFO "mediabay%d: Bay is now empty\n", bay->index);
388 else
389 printk(KERN_INFO "mediabay%d: Bay contains %s\n",
390 bay->index, mb_content_types[id]);
416 } 391 }
417} 392}
418 393
419#ifdef CONFIG_BLK_DEV_IDE_PMAC 394int check_media_bay(struct macio_dev *baydev)
420int check_media_bay(struct device_node *which_bay, int what)
421{ 395{
422 int i; 396 struct media_bay_info* bay;
397 int id;
423 398
424 for (i=0; i<media_bay_count; i++) 399 if (baydev == NULL)
425 if (media_bays[i].mdev && which_bay == media_bays[i].mdev->ofdev.node) { 400 return MB_NO;
426 if ((what == media_bays[i].content_id) && media_bays[i].state == mb_up) 401
427 return 0; 402 /* This returns an instant snapshot, not locking, sine
428 media_bays[i].cd_index = -1; 403 * we may be called with the bay lock held. The resulting
429 return -EINVAL; 404 * fuzzyness of the result if called at the wrong time is
430 } 405 * not actually a huge deal
431 return -ENODEV; 406 */
407 bay = macio_get_drvdata(baydev);
408 if (bay == NULL)
409 return MB_NO;
410 id = bay->content_id;
411 if (bay->state != mb_up)
412 return MB_NO;
413 if (id == MB_FD1)
414 return MB_FD;
415 return id;
432} 416}
433EXPORT_SYMBOL(check_media_bay); 417EXPORT_SYMBOL_GPL(check_media_bay);
434 418
435int check_media_bay_by_base(unsigned long base, int what) 419void lock_media_bay(struct macio_dev *baydev)
436{ 420{
437 int i; 421 struct media_bay_info* bay;
438
439 for (i=0; i<media_bay_count; i++)
440 if (media_bays[i].mdev && base == (unsigned long) media_bays[i].cd_base) {
441 if ((what == media_bays[i].content_id) && media_bays[i].state == mb_up)
442 return 0;
443 media_bays[i].cd_index = -1;
444 return -EINVAL;
445 }
446 422
447 return -ENODEV; 423 if (baydev == NULL)
424 return;
425 bay = macio_get_drvdata(baydev);
426 if (bay == NULL)
427 return;
428 mutex_lock(&bay->lock);
429 bay->user_lock = 1;
448} 430}
449EXPORT_SYMBOL_GPL(check_media_bay_by_base); 431EXPORT_SYMBOL_GPL(lock_media_bay);
450 432
451int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base, 433void unlock_media_bay(struct macio_dev *baydev)
452 int irq, ide_hwif_t *hwif)
453{ 434{
454 int i; 435 struct media_bay_info* bay;
455 436
456 for (i=0; i<media_bay_count; i++) { 437 if (baydev == NULL)
457 struct media_bay_info* bay = &media_bays[i]; 438 return;
458 439 bay = macio_get_drvdata(baydev);
459 if (bay->mdev && which_bay == bay->mdev->ofdev.node) { 440 if (bay == NULL)
460 int timeout = 5000, index = hwif->index; 441 return;
461 442 if (bay->user_lock) {
462 mutex_lock(&bay->lock); 443 bay->user_lock = 0;
463 444 mutex_unlock(&bay->lock);
464 bay->cd_port = hwif;
465 bay->cd_base = (void __iomem *) base;
466 bay->cd_irq = irq;
467
468 if ((MB_CD != bay->content_id) || bay->state != mb_up) {
469 mutex_unlock(&bay->lock);
470 return 0;
471 }
472 printk(KERN_DEBUG "Registered ide%d for media bay %d\n", index, i);
473 do {
474 if (MB_IDE_READY(i)) {
475 bay->cd_index = index;
476 mutex_unlock(&bay->lock);
477 return 0;
478 }
479 mdelay(1);
480 } while(--timeout);
481 printk(KERN_DEBUG "Timeount waiting IDE in bay %d\n", i);
482 mutex_unlock(&bay->lock);
483 return -ENODEV;
484 }
485 } 445 }
446}
447EXPORT_SYMBOL_GPL(unlock_media_bay);
486 448
487 return -ENODEV; 449static int mb_broadcast_hotplug(struct device *dev, void *data)
450{
451 struct media_bay_info* bay = data;
452 struct macio_dev *mdev;
453 struct macio_driver *drv;
454 int state;
455
456 if (dev->bus != &macio_bus_type)
457 return 0;
458
459 state = bay->state == mb_up ? bay->content_id : MB_NO;
460 if (state == MB_FD1)
461 state = MB_FD;
462 mdev = to_macio_device(dev);
463 drv = to_macio_driver(dev->driver);
464 if (dev->driver && drv->mediabay_event)
465 drv->mediabay_event(mdev, state);
466 return 0;
488} 467}
489EXPORT_SYMBOL_GPL(media_bay_set_ide_infos);
490#endif /* CONFIG_BLK_DEV_IDE_PMAC */
491 468
492static void media_bay_step(int i) 469static void media_bay_step(int i)
493{ 470{
@@ -497,8 +474,8 @@ static void media_bay_step(int i)
497 if (bay->state != mb_powering_down) 474 if (bay->state != mb_powering_down)
498 poll_media_bay(bay); 475 poll_media_bay(bay);
499 476
500 /* If timer expired or polling IDE busy, run state machine */ 477 /* If timer expired run state machine */
501 if ((bay->state != mb_ide_waiting) && (bay->timer != 0)) { 478 if (bay->timer != 0) {
502 bay->timer -= msecs_to_jiffies(MB_POLL_DELAY); 479 bay->timer -= msecs_to_jiffies(MB_POLL_DELAY);
503 if (bay->timer > 0) 480 if (bay->timer > 0)
504 return; 481 return;
@@ -508,100 +485,50 @@ static void media_bay_step(int i)
508 switch(bay->state) { 485 switch(bay->state) {
509 case mb_powering_up: 486 case mb_powering_up:
510 if (bay->ops->setup_bus(bay, bay->last_value) < 0) { 487 if (bay->ops->setup_bus(bay, bay->last_value) < 0) {
511 MBDBG("mediabay%d: device not supported (kind:%d)\n", i, bay->content_id); 488 pr_debug("mediabay%d: device not supported (kind:%d)\n",
489 i, bay->content_id);
512 set_mb_power(bay, 0); 490 set_mb_power(bay, 0);
513 break; 491 break;
514 } 492 }
515 bay->timer = msecs_to_jiffies(MB_RESET_DELAY); 493 bay->timer = msecs_to_jiffies(MB_RESET_DELAY);
516 bay->state = mb_enabling_bay; 494 bay->state = mb_enabling_bay;
517 MBDBG("mediabay%d: enabling (kind:%d)\n", i, bay->content_id); 495 pr_debug("mediabay%d: enabling (kind:%d)\n", i, bay->content_id);
518 break; 496 break;
519 case mb_enabling_bay: 497 case mb_enabling_bay:
520 bay->ops->un_reset(bay); 498 bay->ops->un_reset(bay);
521 bay->timer = msecs_to_jiffies(MB_SETUP_DELAY); 499 bay->timer = msecs_to_jiffies(MB_SETUP_DELAY);
522 bay->state = mb_resetting; 500 bay->state = mb_resetting;
523 MBDBG("mediabay%d: waiting reset (kind:%d)\n", i, bay->content_id); 501 pr_debug("mediabay%d: releasing bay reset (kind:%d)\n",
502 i, bay->content_id);
524 break; 503 break;
525 case mb_resetting: 504 case mb_resetting:
526 if (bay->content_id != MB_CD) { 505 if (bay->content_id != MB_CD) {
527 MBDBG("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id); 506 pr_debug("mediabay%d: bay is up (kind:%d)\n", i,
507 bay->content_id);
528 bay->state = mb_up; 508 bay->state = mb_up;
509 device_for_each_child(&bay->mdev->ofdev.dev,
510 bay, mb_broadcast_hotplug);
529 break; 511 break;
530 } 512 }
531#ifdef CONFIG_BLK_DEV_IDE_PMAC 513 pr_debug("mediabay%d: releasing ATA reset (kind:%d)\n",
532 MBDBG("mediabay%d: waiting IDE reset (kind:%d)\n", i, bay->content_id); 514 i, bay->content_id);
533 bay->ops->un_reset_ide(bay); 515 bay->ops->un_reset_ide(bay);
534 bay->timer = msecs_to_jiffies(MB_IDE_WAIT); 516 bay->timer = msecs_to_jiffies(MB_IDE_WAIT);
535 bay->state = mb_ide_resetting; 517 bay->state = mb_ide_resetting;
536#else
537 printk(KERN_DEBUG "media-bay %d is ide (not compiled in kernel)\n", i);
538 set_mb_power(bay, 0);
539#endif /* CONFIG_BLK_DEV_IDE_PMAC */
540 break; 518 break;
541#ifdef CONFIG_BLK_DEV_IDE_PMAC 519
542 case mb_ide_resetting: 520 case mb_ide_resetting:
543 bay->timer = msecs_to_jiffies(MB_IDE_TIMEOUT); 521 pr_debug("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id);
544 bay->state = mb_ide_waiting; 522 bay->state = mb_up;
545 MBDBG("mediabay%d: waiting IDE ready (kind:%d)\n", i, bay->content_id); 523 device_for_each_child(&bay->mdev->ofdev.dev,
524 bay, mb_broadcast_hotplug);
546 break; 525 break;
547 case mb_ide_waiting: 526
548 if (bay->cd_base == NULL) {
549 bay->timer = 0;
550 bay->state = mb_up;
551 MBDBG("mediabay%d: up before IDE init\n", i);
552 break;
553 } else if (MB_IDE_READY(i)) {
554 bay->timer = 0;
555 bay->state = mb_up;
556 if (bay->cd_index < 0) {
557 printk("mediabay %d, registering IDE...\n", i);
558 pmu_suspend();
559 ide_port_scan(bay->cd_port);
560 if (bay->cd_port->present)
561 bay->cd_index = bay->cd_port->index;
562 pmu_resume();
563 }
564 if (bay->cd_index == -1) {
565 /* We eventually do a retry */
566 bay->cd_retry++;
567 printk("IDE register error\n");
568 set_mb_power(bay, 0);
569 } else {
570 printk(KERN_DEBUG "media-bay %d is ide%d\n", i, bay->cd_index);
571 MBDBG("mediabay %d IDE ready\n", i);
572 }
573 break;
574 } else if (bay->timer > 0)
575 bay->timer -= msecs_to_jiffies(MB_POLL_DELAY);
576 if (bay->timer <= 0) {
577 printk("\nIDE Timeout in bay %d !, IDE state is: 0x%02x\n",
578 i, readb(bay->cd_base + 0x70));
579 MBDBG("mediabay%d: nIDE Timeout !\n", i);
580 set_mb_power(bay, 0);
581 bay->timer = 0;
582 }
583 break;
584#endif /* CONFIG_BLK_DEV_IDE_PMAC */
585 case mb_powering_down: 527 case mb_powering_down:
586 bay->state = mb_empty; 528 bay->state = mb_empty;
587#ifdef CONFIG_BLK_DEV_IDE_PMAC 529 device_for_each_child(&bay->mdev->ofdev.dev,
588 if (bay->cd_index >= 0) { 530 bay, mb_broadcast_hotplug);
589 printk(KERN_DEBUG "Unregistering mb %d ide, index:%d\n", i, 531 pr_debug("mediabay%d: end of power down\n", i);
590 bay->cd_index);
591 ide_port_unregister_devices(bay->cd_port);
592 bay->cd_index = -1;
593 }
594 if (bay->cd_retry) {
595 if (bay->cd_retry > MAX_CD_RETRIES) {
596 /* Should add an error sound (sort of beep in dmasound) */
597 printk("\nmedia-bay %d, IDE device badly inserted or unrecognised\n", i);
598 } else {
599 /* Force a new power down/up sequence */
600 bay->content_id = MB_NO;
601 }
602 }
603#endif /* CONFIG_BLK_DEV_IDE_PMAC */
604 MBDBG("mediabay%d: end of power down\n", i);
605 break; 532 break;
606 } 533 }
607} 534}
@@ -676,11 +603,6 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
676 bay->last_value = bay->ops->content(bay); 603 bay->last_value = bay->ops->content(bay);
677 bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY); 604 bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY);
678 bay->state = mb_empty; 605 bay->state = mb_empty;
679 do {
680 msleep(MB_POLL_DELAY);
681 media_bay_step(i);
682 } while((bay->state != mb_empty) &&
683 (bay->state != mb_up));
684 606
685 /* Mark us ready by filling our mdev data */ 607 /* Mark us ready by filling our mdev data */
686 macio_set_drvdata(mdev, bay); 608 macio_set_drvdata(mdev, bay);
@@ -725,7 +647,7 @@ static int media_bay_resume(struct macio_dev *mdev)
725 set_mb_power(bay, 0); 647 set_mb_power(bay, 0);
726 msleep(MB_POWER_DELAY); 648 msleep(MB_POWER_DELAY);
727 if (bay->ops->content(bay) != bay->content_id) { 649 if (bay->ops->content(bay) != bay->content_id) {
728 printk("mediabay%d: content changed during sleep...\n", bay->index); 650 printk("mediabay%d: Content changed during sleep...\n", bay->index);
729 mutex_unlock(&bay->lock); 651 mutex_unlock(&bay->lock);
730 return 0; 652 return 0;
731 } 653 }
@@ -733,9 +655,6 @@ static int media_bay_resume(struct macio_dev *mdev)
733 bay->last_value = bay->content_id; 655 bay->last_value = bay->content_id;
734 bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY); 656 bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY);
735 bay->timer = msecs_to_jiffies(MB_POWER_DELAY); 657 bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
736#ifdef CONFIG_BLK_DEV_IDE_PMAC
737 bay->cd_retry = 0;
738#endif
739 do { 658 do {
740 msleep(MB_POLL_DELAY); 659 msleep(MB_POLL_DELAY);
741 media_bay_step(bay->index); 660 media_bay_step(bay->index);
@@ -823,9 +742,6 @@ static int __init media_bay_init(void)
823 for (i=0; i<MAX_BAYS; i++) { 742 for (i=0; i<MAX_BAYS; i++) {
824 memset((char *)&media_bays[i], 0, sizeof(struct media_bay_info)); 743 memset((char *)&media_bays[i], 0, sizeof(struct media_bay_info));
825 media_bays[i].content_id = -1; 744 media_bays[i].content_id = -1;
826#ifdef CONFIG_BLK_DEV_IDE_PMAC
827 media_bays[i].cd_index = -1;
828#endif
829 } 745 }
830 if (!machine_is(powermac)) 746 if (!machine_is(powermac))
831 return 0; 747 return 0;
diff --git a/drivers/macintosh/nvram.c b/drivers/macintosh/nvram.c
index b195d753d2ed..c876349c32de 100644
--- a/drivers/macintosh/nvram.c
+++ b/drivers/macintosh/nvram.c
@@ -13,7 +13,6 @@
13#include <linux/fcntl.h> 13#include <linux/fcntl.h>
14#include <linux/nvram.h> 14#include <linux/nvram.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/smp_lock.h>
17#include <asm/uaccess.h> 16#include <asm/uaccess.h>
18#include <asm/nvram.h> 17#include <asm/nvram.h>
19 18
@@ -21,7 +20,6 @@
21 20
22static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) 21static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
23{ 22{
24 lock_kernel();
25 switch (origin) { 23 switch (origin) {
26 case 1: 24 case 1:
27 offset += file->f_pos; 25 offset += file->f_pos;
@@ -30,12 +28,10 @@ static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
30 offset += NVRAM_SIZE; 28 offset += NVRAM_SIZE;
31 break; 29 break;
32 } 30 }
33 if (offset < 0) { 31 if (offset < 0)
34 unlock_kernel();
35 return -EINVAL; 32 return -EINVAL;
36 } 33
37 file->f_pos = offset; 34 file->f_pos = offset;
38 unlock_kernel();
39 return file->f_pos; 35 return file->f_pos;
40} 36}
41 37
@@ -76,8 +72,7 @@ static ssize_t write_nvram(struct file *file, const char __user *buf,
76 return p - buf; 72 return p - buf;
77} 73}
78 74
79static int nvram_ioctl(struct inode *inode, struct file *file, 75static long nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
80 unsigned int cmd, unsigned long arg)
81{ 76{
82 switch(cmd) { 77 switch(cmd) {
83 case PMAC_NVRAM_GET_OFFSET: 78 case PMAC_NVRAM_GET_OFFSET:
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 556f0feaa4df..5ff47ba7f2d0 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -79,6 +79,7 @@ struct thermostat {
79 u8 limits[3]; 79 u8 limits[3];
80 int last_speed[2]; 80 int last_speed[2];
81 int last_var[2]; 81 int last_var[2];
82 int pwm_inv[2];
82}; 83};
83 84
84static enum {ADT7460, ADT7467} therm_type; 85static enum {ADT7460, ADT7467} therm_type;
@@ -229,19 +230,23 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
229 230
230 if (speed >= 0) { 231 if (speed >= 0) {
231 manual = read_reg(th, MANUAL_MODE[fan]); 232 manual = read_reg(th, MANUAL_MODE[fan]);
233 manual &= ~INVERT_MASK;
232 write_reg(th, MANUAL_MODE[fan], 234 write_reg(th, MANUAL_MODE[fan],
233 (manual|MANUAL_MASK) & (~INVERT_MASK)); 235 manual | MANUAL_MASK | th->pwm_inv[fan]);
234 write_reg(th, FAN_SPD_SET[fan], speed); 236 write_reg(th, FAN_SPD_SET[fan], speed);
235 } else { 237 } else {
236 /* back to automatic */ 238 /* back to automatic */
237 if(therm_type == ADT7460) { 239 if(therm_type == ADT7460) {
238 manual = read_reg(th, 240 manual = read_reg(th,
239 MANUAL_MODE[fan]) & (~MANUAL_MASK); 241 MANUAL_MODE[fan]) & (~MANUAL_MASK);
240 242 manual &= ~INVERT_MASK;
243 manual |= th->pwm_inv[fan];
241 write_reg(th, 244 write_reg(th,
242 MANUAL_MODE[fan], manual|REM_CONTROL[fan]); 245 MANUAL_MODE[fan], manual|REM_CONTROL[fan]);
243 } else { 246 } else {
244 manual = read_reg(th, MANUAL_MODE[fan]); 247 manual = read_reg(th, MANUAL_MODE[fan]);
248 manual &= ~INVERT_MASK;
249 manual |= th->pwm_inv[fan];
245 write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK)); 250 write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK));
246 } 251 }
247 } 252 }
@@ -387,7 +392,7 @@ static int probe_thermostat(struct i2c_client *client,
387 i2c_set_clientdata(client, th); 392 i2c_set_clientdata(client, th);
388 th->clt = client; 393 th->clt = client;
389 394
390 rc = read_reg(th, 0); 395 rc = read_reg(th, CONFIG_REG);
391 if (rc < 0) { 396 if (rc < 0) {
392 dev_err(&client->dev, "Thermostat failed to read config!\n"); 397 dev_err(&client->dev, "Thermostat failed to read config!\n");
393 kfree(th); 398 kfree(th);
@@ -418,6 +423,10 @@ static int probe_thermostat(struct i2c_client *client,
418 423
419 thermostat = th; 424 thermostat = th;
420 425
426 /* record invert bit status because fw can corrupt it after suspend */
427 th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK;
428 th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK;
429
421 /* be sure to really write fan speed the first time */ 430 /* be sure to really write fan speed the first time */
422 th->last_speed[0] = -2; 431 th->last_speed[0] = -2;
423 th->last_speed[1] = -2; 432 th->last_speed[1] = -2;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 6f308a4757ee..db379c381432 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -36,6 +36,7 @@
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/pm.h> 37#include <linux/pm.h>
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/seq_file.h>
39#include <linux/init.h> 40#include <linux/init.h>
40#include <linux/interrupt.h> 41#include <linux/interrupt.h>
41#include <linux/device.h> 42#include <linux/device.h>
@@ -186,17 +187,11 @@ static int init_pmu(void);
186static void pmu_start(void); 187static void pmu_start(void);
187static irqreturn_t via_pmu_interrupt(int irq, void *arg); 188static irqreturn_t via_pmu_interrupt(int irq, void *arg);
188static irqreturn_t gpio1_interrupt(int irq, void *arg); 189static irqreturn_t gpio1_interrupt(int irq, void *arg);
189static int proc_get_info(char *page, char **start, off_t off, 190static const struct file_operations pmu_info_proc_fops;
190 int count, int *eof, void *data); 191static const struct file_operations pmu_irqstats_proc_fops;
191static int proc_get_irqstats(char *page, char **start, off_t off,
192 int count, int *eof, void *data);
193static void pmu_pass_intr(unsigned char *data, int len); 192static void pmu_pass_intr(unsigned char *data, int len);
194static int proc_get_batt(char *page, char **start, off_t off, 193static const struct file_operations pmu_battery_proc_fops;
195 int count, int *eof, void *data); 194static const struct file_operations pmu_options_proc_fops;
196static int proc_read_options(char *page, char **start, off_t off,
197 int count, int *eof, void *data);
198static int proc_write_options(struct file *file, const char __user *buffer,
199 unsigned long count, void *data);
200 195
201#ifdef CONFIG_ADB 196#ifdef CONFIG_ADB
202struct adb_driver via_pmu_driver = { 197struct adb_driver via_pmu_driver = {
@@ -507,19 +502,15 @@ static int __init via_pmu_dev_init(void)
507 for (i=0; i<pmu_battery_count; i++) { 502 for (i=0; i<pmu_battery_count; i++) {
508 char title[16]; 503 char title[16];
509 sprintf(title, "battery_%ld", i); 504 sprintf(title, "battery_%ld", i);
510 proc_pmu_batt[i] = create_proc_read_entry(title, 0, proc_pmu_root, 505 proc_pmu_batt[i] = proc_create_data(title, 0, proc_pmu_root,
511 proc_get_batt, (void *)i); 506 &pmu_battery_proc_fops, (void *)i);
512 } 507 }
513 508
514 proc_pmu_info = create_proc_read_entry("info", 0, proc_pmu_root, 509 proc_pmu_info = proc_create("info", 0, proc_pmu_root, &pmu_info_proc_fops);
515 proc_get_info, NULL); 510 proc_pmu_irqstats = proc_create("interrupts", 0, proc_pmu_root,
516 proc_pmu_irqstats = create_proc_read_entry("interrupts", 0, proc_pmu_root, 511 &pmu_irqstats_proc_fops);
517 proc_get_irqstats, NULL); 512 proc_pmu_options = proc_create("options", 0600, proc_pmu_root,
518 proc_pmu_options = create_proc_entry("options", 0600, proc_pmu_root); 513 &pmu_options_proc_fops);
519 if (proc_pmu_options) {
520 proc_pmu_options->read_proc = proc_read_options;
521 proc_pmu_options->write_proc = proc_write_options;
522 }
523 } 514 }
524 return 0; 515 return 0;
525} 516}
@@ -799,27 +790,33 @@ query_battery_state(void)
799 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); 790 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1);
800} 791}
801 792
802static int 793static int pmu_info_proc_show(struct seq_file *m, void *v)
803proc_get_info(char *page, char **start, off_t off,
804 int count, int *eof, void *data)
805{ 794{
806 char* p = page; 795 seq_printf(m, "PMU driver version : %d\n", PMU_DRIVER_VERSION);
807 796 seq_printf(m, "PMU firmware version : %02x\n", pmu_version);
808 p += sprintf(p, "PMU driver version : %d\n", PMU_DRIVER_VERSION); 797 seq_printf(m, "AC Power : %d\n",
809 p += sprintf(p, "PMU firmware version : %02x\n", pmu_version);
810 p += sprintf(p, "AC Power : %d\n",
811 ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0); 798 ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0);
812 p += sprintf(p, "Battery count : %d\n", pmu_battery_count); 799 seq_printf(m, "Battery count : %d\n", pmu_battery_count);
800
801 return 0;
802}
813 803
814 return p - page; 804static int pmu_info_proc_open(struct inode *inode, struct file *file)
805{
806 return single_open(file, pmu_info_proc_show, NULL);
815} 807}
816 808
817static int 809static const struct file_operations pmu_info_proc_fops = {
818proc_get_irqstats(char *page, char **start, off_t off, 810 .owner = THIS_MODULE,
819 int count, int *eof, void *data) 811 .open = pmu_info_proc_open,
812 .read = seq_read,
813 .llseek = seq_lseek,
814 .release = single_release,
815};
816
817static int pmu_irqstats_proc_show(struct seq_file *m, void *v)
820{ 818{
821 int i; 819 int i;
822 char* p = page;
823 static const char *irq_names[] = { 820 static const char *irq_names[] = {
824 "Total CB1 triggered events", 821 "Total CB1 triggered events",
825 "Total GPIO1 triggered events", 822 "Total GPIO1 triggered events",
@@ -835,60 +832,76 @@ proc_get_irqstats(char *page, char **start, off_t off,
835 }; 832 };
836 833
837 for (i=0; i<11; i++) { 834 for (i=0; i<11; i++) {
838 p += sprintf(p, " %2u: %10u (%s)\n", 835 seq_printf(m, " %2u: %10u (%s)\n",
839 i, pmu_irq_stats[i], irq_names[i]); 836 i, pmu_irq_stats[i], irq_names[i]);
840 } 837 }
841 return p - page; 838 return 0;
842} 839}
843 840
844static int 841static int pmu_irqstats_proc_open(struct inode *inode, struct file *file)
845proc_get_batt(char *page, char **start, off_t off,
846 int count, int *eof, void *data)
847{ 842{
848 long batnum = (long)data; 843 return single_open(file, pmu_irqstats_proc_show, NULL);
849 char *p = page; 844}
845
846static const struct file_operations pmu_irqstats_proc_fops = {
847 .owner = THIS_MODULE,
848 .open = pmu_irqstats_proc_open,
849 .read = seq_read,
850 .llseek = seq_lseek,
851 .release = single_release,
852};
853
854static int pmu_battery_proc_show(struct seq_file *m, void *v)
855{
856 long batnum = (long)m->private;
850 857
851 p += sprintf(p, "\n"); 858 seq_putc(m, '\n');
852 p += sprintf(p, "flags : %08x\n", 859 seq_printf(m, "flags : %08x\n", pmu_batteries[batnum].flags);
853 pmu_batteries[batnum].flags); 860 seq_printf(m, "charge : %d\n", pmu_batteries[batnum].charge);
854 p += sprintf(p, "charge : %d\n", 861 seq_printf(m, "max_charge : %d\n", pmu_batteries[batnum].max_charge);
855 pmu_batteries[batnum].charge); 862 seq_printf(m, "current : %d\n", pmu_batteries[batnum].amperage);
856 p += sprintf(p, "max_charge : %d\n", 863 seq_printf(m, "voltage : %d\n", pmu_batteries[batnum].voltage);
857 pmu_batteries[batnum].max_charge); 864 seq_printf(m, "time rem. : %d\n", pmu_batteries[batnum].time_remaining);
858 p += sprintf(p, "current : %d\n", 865 return 0;
859 pmu_batteries[batnum].amperage);
860 p += sprintf(p, "voltage : %d\n",
861 pmu_batteries[batnum].voltage);
862 p += sprintf(p, "time rem. : %d\n",
863 pmu_batteries[batnum].time_remaining);
864
865 return p - page;
866} 866}
867 867
868static int 868static int pmu_battery_proc_open(struct inode *inode, struct file *file)
869proc_read_options(char *page, char **start, off_t off,
870 int count, int *eof, void *data)
871{ 869{
872 char *p = page; 870 return single_open(file, pmu_battery_proc_show, PDE(inode)->data);
871}
873 872
873static const struct file_operations pmu_battery_proc_fops = {
874 .owner = THIS_MODULE,
875 .open = pmu_battery_proc_open,
876 .read = seq_read,
877 .llseek = seq_lseek,
878 .release = single_release,
879};
880
881static int pmu_options_proc_show(struct seq_file *m, void *v)
882{
874#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 883#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
875 if (pmu_kind == PMU_KEYLARGO_BASED && 884 if (pmu_kind == PMU_KEYLARGO_BASED &&
876 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) 885 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
877 p += sprintf(p, "lid_wakeup=%d\n", option_lid_wakeup); 886 seq_printf(m, "lid_wakeup=%d\n", option_lid_wakeup);
878#endif 887#endif
879 if (pmu_kind == PMU_KEYLARGO_BASED) 888 if (pmu_kind == PMU_KEYLARGO_BASED)
880 p += sprintf(p, "server_mode=%d\n", option_server_mode); 889 seq_printf(m, "server_mode=%d\n", option_server_mode);
881 890
882 return p - page; 891 return 0;
883} 892}
884 893
885static int 894static int pmu_options_proc_open(struct inode *inode, struct file *file)
886proc_write_options(struct file *file, const char __user *buffer, 895{
887 unsigned long count, void *data) 896 return single_open(file, pmu_options_proc_show, NULL);
897}
898
899static ssize_t pmu_options_proc_write(struct file *file,
900 const char __user *buffer, size_t count, loff_t *pos)
888{ 901{
889 char tmp[33]; 902 char tmp[33];
890 char *label, *val; 903 char *label, *val;
891 unsigned long fcount = count; 904 size_t fcount = count;
892 905
893 if (!count) 906 if (!count)
894 return -EINVAL; 907 return -EINVAL;
@@ -927,6 +940,15 @@ proc_write_options(struct file *file, const char __user *buffer,
927 return fcount; 940 return fcount;
928} 941}
929 942
943static const struct file_operations pmu_options_proc_fops = {
944 .owner = THIS_MODULE,
945 .open = pmu_options_proc_open,
946 .read = seq_read,
947 .llseek = seq_lseek,
948 .release = single_release,
949 .write = pmu_options_proc_write,
950};
951
930#ifdef CONFIG_ADB 952#ifdef CONFIG_ADB
931/* Send an ADB command */ 953/* Send an ADB command */
932static int pmu_send_request(struct adb_request *req, int sync) 954static int pmu_send_request(struct adb_request *req, int sync)
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index 961fa0e7c2cf..6c68b9e5f5c4 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -202,6 +202,8 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
202 fct->ctrl.name = "cpu-front-fan-1"; 202 fct->ctrl.name = "cpu-front-fan-1";
203 else if (!strcmp(l, "CPU A PUMP")) 203 else if (!strcmp(l, "CPU A PUMP"))
204 fct->ctrl.name = "cpu-pump-0"; 204 fct->ctrl.name = "cpu-pump-0";
205 else if (!strcmp(l, "CPU B PUMP"))
206 fct->ctrl.name = "cpu-pump-1";
205 else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") || 207 else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") ||
206 !strcmp(l, "EXPANSION SLOTS INTAKE")) 208 !strcmp(l, "EXPANSION SLOTS INTAKE"))
207 fct->ctrl.name = "slots-fan"; 209 fct->ctrl.name = "slots-fan";
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 0c44d560bf1a..0c7a63c1f12f 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -22,6 +22,8 @@
22#include <linux/mmc/core.h> 22#include <linux/mmc/core.h>
23#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24 24
25MODULE_LICENSE("GPL");
26
25enum { 27enum {
26 CD_GPIO = 0, 28 CD_GPIO = 0,
27 WP_GPIO, 29 WP_GPIO,
diff --git a/drivers/net/ehea/ehea_hcall.h b/drivers/net/ehea/ehea_hcall.h
deleted file mode 100644
index 8e7d1c3edc60..000000000000
--- a/drivers/net/ehea/ehea_hcall.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * linux/drivers/net/ehea/ehea_hcall.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __EHEA_HCALL_H__
30#define __EHEA_HCALL_H__
31
32/**
33 * This file contains HCALL defines that are to be included in the appropriate
34 * kernel files later
35 */
36
37#define H_ALLOC_HEA_RESOURCE 0x278
38#define H_MODIFY_HEA_QP 0x250
39#define H_QUERY_HEA_QP 0x254
40#define H_QUERY_HEA 0x258
41#define H_QUERY_HEA_PORT 0x25C
42#define H_MODIFY_HEA_PORT 0x260
43#define H_REG_BCMC 0x264
44#define H_DEREG_BCMC 0x268
45#define H_REGISTER_HEA_RPAGES 0x26C
46#define H_DISABLE_AND_GET_HEA 0x270
47#define H_GET_HEA_INFO 0x274
48#define H_ADD_CONN 0x284
49#define H_DEL_CONN 0x288
50
51#endif /* __EHEA_HCALL_H__ */
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
index f3628c803567..2f8174c248bc 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -33,7 +33,6 @@
33#include <asm/hvcall.h> 33#include <asm/hvcall.h>
34#include "ehea.h" 34#include "ehea.h"
35#include "ehea_hw.h" 35#include "ehea_hw.h"
36#include "ehea_hcall.h"
37 36
38/* Some abbreviations used here: 37/* Some abbreviations used here:
39 * 38 *
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 298de0f95d70..d58ade170c4b 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -65,47 +65,322 @@ static int of_platform_device_remove(struct device *dev)
65 return 0; 65 return 0;
66} 66}
67 67
68static int of_platform_device_suspend(struct device *dev, pm_message_t state) 68static void of_platform_device_shutdown(struct device *dev)
69{ 69{
70 struct of_device *of_dev = to_of_device(dev); 70 struct of_device *of_dev = to_of_device(dev);
71 struct of_platform_driver *drv = to_of_platform_driver(dev->driver); 71 struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
72 int error = 0;
73 72
74 if (dev->driver && drv->suspend) 73 if (dev->driver && drv->shutdown)
75 error = drv->suspend(of_dev, state); 74 drv->shutdown(of_dev);
76 return error;
77} 75}
78 76
79static int of_platform_device_resume(struct device * dev) 77#ifdef CONFIG_PM_SLEEP
78
79static int of_platform_legacy_suspend(struct device *dev, pm_message_t mesg)
80{ 80{
81 struct of_device *of_dev = to_of_device(dev); 81 struct of_device *of_dev = to_of_device(dev);
82 struct of_platform_driver *drv = to_of_platform_driver(dev->driver); 82 struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
83 int error = 0; 83 int ret = 0;
84 84
85 if (dev->driver && drv->resume) 85 if (dev->driver && drv->suspend)
86 error = drv->resume(of_dev); 86 ret = drv->suspend(of_dev, mesg);
87 return error; 87 return ret;
88} 88}
89 89
90static void of_platform_device_shutdown(struct device *dev) 90static int of_platform_legacy_resume(struct device *dev)
91{ 91{
92 struct of_device *of_dev = to_of_device(dev); 92 struct of_device *of_dev = to_of_device(dev);
93 struct of_platform_driver *drv = to_of_platform_driver(dev->driver); 93 struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
94 int ret = 0;
94 95
95 if (dev->driver && drv->shutdown) 96 if (dev->driver && drv->resume)
96 drv->shutdown(of_dev); 97 ret = drv->resume(of_dev);
98 return ret;
99}
100
101static int of_platform_pm_prepare(struct device *dev)
102{
103 struct device_driver *drv = dev->driver;
104 int ret = 0;
105
106 if (drv && drv->pm && drv->pm->prepare)
107 ret = drv->pm->prepare(dev);
108
109 return ret;
110}
111
112static void of_platform_pm_complete(struct device *dev)
113{
114 struct device_driver *drv = dev->driver;
115
116 if (drv && drv->pm && drv->pm->complete)
117 drv->pm->complete(dev);
118}
119
120#ifdef CONFIG_SUSPEND
121
122static int of_platform_pm_suspend(struct device *dev)
123{
124 struct device_driver *drv = dev->driver;
125 int ret = 0;
126
127 if (!drv)
128 return 0;
129
130 if (drv->pm) {
131 if (drv->pm->suspend)
132 ret = drv->pm->suspend(dev);
133 } else {
134 ret = of_platform_legacy_suspend(dev, PMSG_SUSPEND);
135 }
136
137 return ret;
97} 138}
98 139
140static int of_platform_pm_suspend_noirq(struct device *dev)
141{
142 struct device_driver *drv = dev->driver;
143 int ret = 0;
144
145 if (!drv)
146 return 0;
147
148 if (drv->pm) {
149 if (drv->pm->suspend_noirq)
150 ret = drv->pm->suspend_noirq(dev);
151 }
152
153 return ret;
154}
155
156static int of_platform_pm_resume(struct device *dev)
157{
158 struct device_driver *drv = dev->driver;
159 int ret = 0;
160
161 if (!drv)
162 return 0;
163
164 if (drv->pm) {
165 if (drv->pm->resume)
166 ret = drv->pm->resume(dev);
167 } else {
168 ret = of_platform_legacy_resume(dev);
169 }
170
171 return ret;
172}
173
174static int of_platform_pm_resume_noirq(struct device *dev)
175{
176 struct device_driver *drv = dev->driver;
177 int ret = 0;
178
179 if (!drv)
180 return 0;
181
182 if (drv->pm) {
183 if (drv->pm->resume_noirq)
184 ret = drv->pm->resume_noirq(dev);
185 }
186
187 return ret;
188}
189
190#else /* !CONFIG_SUSPEND */
191
192#define of_platform_pm_suspend NULL
193#define of_platform_pm_resume NULL
194#define of_platform_pm_suspend_noirq NULL
195#define of_platform_pm_resume_noirq NULL
196
197#endif /* !CONFIG_SUSPEND */
198
199#ifdef CONFIG_HIBERNATION
200
201static int of_platform_pm_freeze(struct device *dev)
202{
203 struct device_driver *drv = dev->driver;
204 int ret = 0;
205
206 if (!drv)
207 return 0;
208
209 if (drv->pm) {
210 if (drv->pm->freeze)
211 ret = drv->pm->freeze(dev);
212 } else {
213 ret = of_platform_legacy_suspend(dev, PMSG_FREEZE);
214 }
215
216 return ret;
217}
218
219static int of_platform_pm_freeze_noirq(struct device *dev)
220{
221 struct device_driver *drv = dev->driver;
222 int ret = 0;
223
224 if (!drv)
225 return 0;
226
227 if (drv->pm) {
228 if (drv->pm->freeze_noirq)
229 ret = drv->pm->freeze_noirq(dev);
230 }
231
232 return ret;
233}
234
235static int of_platform_pm_thaw(struct device *dev)
236{
237 struct device_driver *drv = dev->driver;
238 int ret = 0;
239
240 if (!drv)
241 return 0;
242
243 if (drv->pm) {
244 if (drv->pm->thaw)
245 ret = drv->pm->thaw(dev);
246 } else {
247 ret = of_platform_legacy_resume(dev);
248 }
249
250 return ret;
251}
252
253static int of_platform_pm_thaw_noirq(struct device *dev)
254{
255 struct device_driver *drv = dev->driver;
256 int ret = 0;
257
258 if (!drv)
259 return 0;
260
261 if (drv->pm) {
262 if (drv->pm->thaw_noirq)
263 ret = drv->pm->thaw_noirq(dev);
264 }
265
266 return ret;
267}
268
269static int of_platform_pm_poweroff(struct device *dev)
270{
271 struct device_driver *drv = dev->driver;
272 int ret = 0;
273
274 if (!drv)
275 return 0;
276
277 if (drv->pm) {
278 if (drv->pm->poweroff)
279 ret = drv->pm->poweroff(dev);
280 } else {
281 ret = of_platform_legacy_suspend(dev, PMSG_HIBERNATE);
282 }
283
284 return ret;
285}
286
287static int of_platform_pm_poweroff_noirq(struct device *dev)
288{
289 struct device_driver *drv = dev->driver;
290 int ret = 0;
291
292 if (!drv)
293 return 0;
294
295 if (drv->pm) {
296 if (drv->pm->poweroff_noirq)
297 ret = drv->pm->poweroff_noirq(dev);
298 }
299
300 return ret;
301}
302
303static int of_platform_pm_restore(struct device *dev)
304{
305 struct device_driver *drv = dev->driver;
306 int ret = 0;
307
308 if (!drv)
309 return 0;
310
311 if (drv->pm) {
312 if (drv->pm->restore)
313 ret = drv->pm->restore(dev);
314 } else {
315 ret = of_platform_legacy_resume(dev);
316 }
317
318 return ret;
319}
320
321static int of_platform_pm_restore_noirq(struct device *dev)
322{
323 struct device_driver *drv = dev->driver;
324 int ret = 0;
325
326 if (!drv)
327 return 0;
328
329 if (drv->pm) {
330 if (drv->pm->restore_noirq)
331 ret = drv->pm->restore_noirq(dev);
332 }
333
334 return ret;
335}
336
337#else /* !CONFIG_HIBERNATION */
338
339#define of_platform_pm_freeze NULL
340#define of_platform_pm_thaw NULL
341#define of_platform_pm_poweroff NULL
342#define of_platform_pm_restore NULL
343#define of_platform_pm_freeze_noirq NULL
344#define of_platform_pm_thaw_noirq NULL
345#define of_platform_pm_poweroff_noirq NULL
346#define of_platform_pm_restore_noirq NULL
347
348#endif /* !CONFIG_HIBERNATION */
349
350static struct dev_pm_ops of_platform_dev_pm_ops = {
351 .prepare = of_platform_pm_prepare,
352 .complete = of_platform_pm_complete,
353 .suspend = of_platform_pm_suspend,
354 .resume = of_platform_pm_resume,
355 .freeze = of_platform_pm_freeze,
356 .thaw = of_platform_pm_thaw,
357 .poweroff = of_platform_pm_poweroff,
358 .restore = of_platform_pm_restore,
359 .suspend_noirq = of_platform_pm_suspend_noirq,
360 .resume_noirq = of_platform_pm_resume_noirq,
361 .freeze_noirq = of_platform_pm_freeze_noirq,
362 .thaw_noirq = of_platform_pm_thaw_noirq,
363 .poweroff_noirq = of_platform_pm_poweroff_noirq,
364 .restore_noirq = of_platform_pm_restore_noirq,
365};
366
367#define OF_PLATFORM_PM_OPS_PTR (&of_platform_dev_pm_ops)
368
369#else /* !CONFIG_PM_SLEEP */
370
371#define OF_PLATFORM_PM_OPS_PTR NULL
372
373#endif /* !CONFIG_PM_SLEEP */
374
99int of_bus_type_init(struct bus_type *bus, const char *name) 375int of_bus_type_init(struct bus_type *bus, const char *name)
100{ 376{
101 bus->name = name; 377 bus->name = name;
102 bus->match = of_platform_bus_match; 378 bus->match = of_platform_bus_match;
103 bus->probe = of_platform_device_probe; 379 bus->probe = of_platform_device_probe;
104 bus->remove = of_platform_device_remove; 380 bus->remove = of_platform_device_remove;
105 bus->suspend = of_platform_device_suspend;
106 bus->resume = of_platform_device_resume;
107 bus->shutdown = of_platform_device_shutdown; 381 bus->shutdown = of_platform_device_shutdown;
108 bus->dev_attrs = of_platform_device_attrs; 382 bus->dev_attrs = of_platform_device_attrs;
383 bus->pm = OF_PLATFORM_PM_OPS_PTR;
109 return bus_register(bus); 384 return bus_register(bus);
110} 385}
111 386
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4b6f7cba3b3d..28fce65b8594 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -133,6 +133,14 @@ config SPI_LM70_LLP
133 which interfaces to an LM70 temperature sensor using 133 which interfaces to an LM70 temperature sensor using
134 a parallel port. 134 a parallel port.
135 135
136config SPI_MPC52xx
137 tristate "Freescale MPC52xx SPI (non-PSC) controller support"
138 depends on PPC_MPC52xx && SPI
139 select SPI_MASTER_OF
140 help
141 This drivers supports the MPC52xx SPI controller in master SPI
142 mode.
143
136config SPI_MPC52xx_PSC 144config SPI_MPC52xx_PSC
137 tristate "Freescale MPC52xx PSC SPI controller" 145 tristate "Freescale MPC52xx PSC SPI controller"
138 depends on PPC_MPC52xx && EXPERIMENTAL 146 depends on PPC_MPC52xx && EXPERIMENTAL
@@ -147,9 +155,6 @@ config SPI_MPC8xxx
147 This enables using the Freescale MPC8xxx SPI controllers in master 155 This enables using the Freescale MPC8xxx SPI controllers in master
148 mode. 156 mode.
149 157
150 This driver uses a simple set of shift registers for data (opposed
151 to the CPM based descriptor model).
152
153config SPI_OMAP_UWIRE 158config SPI_OMAP_UWIRE
154 tristate "OMAP1 MicroWire" 159 tristate "OMAP1 MicroWire"
155 depends on ARCH_OMAP1 160 depends on ARCH_OMAP1
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 21a118269cac..e3f092a9afa5 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
25obj-$(CONFIG_SPI_ORION) += orion_spi.o 25obj-$(CONFIG_SPI_ORION) += orion_spi.o
26obj-$(CONFIG_SPI_PL022) += amba-pl022.o 26obj-$(CONFIG_SPI_PL022) += amba-pl022.o
27obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 27obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
28obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
28obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o 29obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
29obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o 30obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
30obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 31obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 1b74d5ca03f3..f50c81df336a 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/of_spi.h>
20#include <linux/workqueue.h> 21#include <linux/workqueue.h>
21#include <linux/completion.h> 22#include <linux/completion.h>
22#include <linux/io.h> 23#include <linux/io.h>
@@ -313,11 +314,13 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
313 struct mpc52xx_psc __iomem *psc = mps->psc; 314 struct mpc52xx_psc __iomem *psc = mps->psc;
314 struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; 315 struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
315 u32 mclken_div; 316 u32 mclken_div;
316 int ret = 0; 317 int ret;
317 318
318 /* default sysclk is 512MHz */ 319 /* default sysclk is 512MHz */
319 mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK; 320 mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK;
320 mpc52xx_set_psc_clkdiv(psc_id, mclken_div); 321 ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
322 if (ret)
323 return ret;
321 324
322 /* Reset the PSC into a known state */ 325 /* Reset the PSC into a known state */
323 out_8(&psc->command, MPC52xx_PSC_RST_RX); 326 out_8(&psc->command, MPC52xx_PSC_RST_RX);
@@ -341,7 +344,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
341 344
342 mps->bits_per_word = 8; 345 mps->bits_per_word = 8;
343 346
344 return ret; 347 return 0;
345} 348}
346 349
347static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) 350static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
@@ -410,8 +413,10 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
410 goto free_master; 413 goto free_master;
411 414
412 ret = mpc52xx_psc_spi_port_config(master->bus_num, mps); 415 ret = mpc52xx_psc_spi_port_config(master->bus_num, mps);
413 if (ret < 0) 416 if (ret < 0) {
417 dev_err(dev, "can't configure PSC! Is it capable of SPI?\n");
414 goto free_irq; 418 goto free_irq;
419 }
415 420
416 spin_lock_init(&mps->lock); 421 spin_lock_init(&mps->lock);
417 init_completion(&mps->done); 422 init_completion(&mps->done);
@@ -464,10 +469,11 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
464 const u32 *regaddr_p; 469 const u32 *regaddr_p;
465 u64 regaddr64, size64; 470 u64 regaddr64, size64;
466 s16 id = -1; 471 s16 id = -1;
472 int rc;
467 473
468 regaddr_p = of_get_address(op->node, 0, &size64, NULL); 474 regaddr_p = of_get_address(op->node, 0, &size64, NULL);
469 if (!regaddr_p) { 475 if (!regaddr_p) {
470 printk(KERN_ERR "Invalid PSC address\n"); 476 dev_err(&op->dev, "Invalid PSC address\n");
471 return -EINVAL; 477 return -EINVAL;
472 } 478 }
473 regaddr64 = of_translate_address(op->node, regaddr_p); 479 regaddr64 = of_translate_address(op->node, regaddr_p);
@@ -478,15 +484,18 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
478 484
479 psc_nump = of_get_property(op->node, "cell-index", NULL); 485 psc_nump = of_get_property(op->node, "cell-index", NULL);
480 if (!psc_nump || *psc_nump > 5) { 486 if (!psc_nump || *psc_nump > 5) {
481 printk(KERN_ERR "mpc52xx_psc_spi: Device node %s has invalid " 487 dev_err(&op->dev, "Invalid cell-index property\n");
482 "cell-index property\n", op->node->full_name);
483 return -EINVAL; 488 return -EINVAL;
484 } 489 }
485 id = *psc_nump + 1; 490 id = *psc_nump + 1;
486 } 491 }
487 492
488 return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, 493 rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
489 irq_of_parse_and_map(op->node, 0), id); 494 irq_of_parse_and_map(op->node, 0), id);
495 if (rc == 0)
496 of_register_spi_devices(dev_get_drvdata(&op->dev), op->node);
497
498 return rc;
490} 499}
491 500
492static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) 501static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op)
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
new file mode 100644
index 000000000000..ef8379b2c172
--- /dev/null
+++ b/drivers/spi/mpc52xx_spi.c
@@ -0,0 +1,520 @@
1/*
2 * MPC52xx SPI bus driver.
3 *
4 * Copyright (C) 2008 Secret Lab Technologies Ltd.
5 *
6 * This file is released under the GPLv2
7 *
8 * This is the driver for the MPC5200's dedicated SPI controller.
9 *
10 * Note: this driver does not support the MPC5200 PSC in SPI mode. For
11 * that driver see drivers/spi/mpc52xx_psc_spi.c
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/of_platform.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <linux/spi/spi.h>
21#include <linux/spi/mpc52xx_spi.h>
22#include <linux/of_spi.h>
23#include <linux/io.h>
24#include <asm/time.h>
25#include <asm/mpc52xx.h>
26
27MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
28MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver");
29MODULE_LICENSE("GPL");
30
31/* Register offsets */
32#define SPI_CTRL1 0x00
33#define SPI_CTRL1_SPIE (1 << 7)
34#define SPI_CTRL1_SPE (1 << 6)
35#define SPI_CTRL1_MSTR (1 << 4)
36#define SPI_CTRL1_CPOL (1 << 3)
37#define SPI_CTRL1_CPHA (1 << 2)
38#define SPI_CTRL1_SSOE (1 << 1)
39#define SPI_CTRL1_LSBFE (1 << 0)
40
41#define SPI_CTRL2 0x01
42#define SPI_BRR 0x04
43
44#define SPI_STATUS 0x05
45#define SPI_STATUS_SPIF (1 << 7)
46#define SPI_STATUS_WCOL (1 << 6)
47#define SPI_STATUS_MODF (1 << 4)
48
49#define SPI_DATA 0x09
50#define SPI_PORTDATA 0x0d
51#define SPI_DATADIR 0x10
52
53/* FSM state return values */
54#define FSM_STOP 0 /* Nothing more for the state machine to */
55 /* do. If something interesting happens */
56 /* then and IRQ will be received */
57#define FSM_POLL 1 /* need to poll for completion, an IRQ is */
58 /* not expected */
59#define FSM_CONTINUE 2 /* Keep iterating the state machine */
60
61/* Driver internal data */
62struct mpc52xx_spi {
63 struct spi_master *master;
64 u32 sysclk;
65 void __iomem *regs;
66 int irq0; /* MODF irq */
67 int irq1; /* SPIF irq */
68 int ipb_freq;
69
70 /* Statistics */
71 int msg_count;
72 int wcol_count;
73 int wcol_ticks;
74 u32 wcol_tx_timestamp;
75 int modf_count;
76 int byte_count;
77
78 struct list_head queue; /* queue of pending messages */
79 spinlock_t lock;
80 struct work_struct work;
81
82
83 /* Details of current transfer (length, and buffer pointers) */
84 struct spi_message *message; /* current message */
85 struct spi_transfer *transfer; /* current transfer */
86 int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data);
87 int len;
88 int timestamp;
89 u8 *rx_buf;
90 const u8 *tx_buf;
91 int cs_change;
92};
93
94/*
95 * CS control function
96 */
97static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
98{
99 out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
100}
101
102/*
103 * Start a new transfer. This is called both by the idle state
104 * for the first transfer in a message, and by the wait state when the
105 * previous transfer in a message is complete.
106 */
107static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms)
108{
109 ms->rx_buf = ms->transfer->rx_buf;
110 ms->tx_buf = ms->transfer->tx_buf;
111 ms->len = ms->transfer->len;
112
113 /* Activate the chip select */
114 if (ms->cs_change)
115 mpc52xx_spi_chipsel(ms, 1);
116 ms->cs_change = ms->transfer->cs_change;
117
118 /* Write out the first byte */
119 ms->wcol_tx_timestamp = get_tbl();
120 if (ms->tx_buf)
121 out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
122 else
123 out_8(ms->regs + SPI_DATA, 0);
124}
125
126/* Forward declaration of state handlers */
127static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
128 u8 status, u8 data);
129static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms,
130 u8 status, u8 data);
131
132/*
133 * IDLE state
134 *
135 * No transfers are in progress; if another transfer is pending then retrieve
136 * it and kick it off. Otherwise, stop processing the state machine
137 */
138static int
139mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
140{
141 struct spi_device *spi;
142 int spr, sppr;
143 u8 ctrl1;
144
145 if (status && (irq != NO_IRQ))
146 dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
147 status);
148
149 /* Check if there is another transfer waiting. */
150 if (list_empty(&ms->queue))
151 return FSM_STOP;
152
153 /* get the head of the queue */
154 ms->message = list_first_entry(&ms->queue, struct spi_message, queue);
155 list_del_init(&ms->message->queue);
156
157 /* Setup the controller parameters */
158 ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
159 spi = ms->message->spi;
160 if (spi->mode & SPI_CPHA)
161 ctrl1 |= SPI_CTRL1_CPHA;
162 if (spi->mode & SPI_CPOL)
163 ctrl1 |= SPI_CTRL1_CPOL;
164 if (spi->mode & SPI_LSB_FIRST)
165 ctrl1 |= SPI_CTRL1_LSBFE;
166 out_8(ms->regs + SPI_CTRL1, ctrl1);
167
168 /* Setup the controller speed */
169 /* minimum divider is '2'. Also, add '1' to force rounding the
170 * divider up. */
171 sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1;
172 spr = 0;
173 if (sppr < 1)
174 sppr = 1;
175 while (((sppr - 1) & ~0x7) != 0) {
176 sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */
177 spr++;
178 }
179 sppr--; /* sppr quantity in register is offset by 1 */
180 if (spr > 7) {
181 /* Don't overrun limits of SPI baudrate register */
182 spr = 7;
183 sppr = 7;
184 }
185 out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */
186
187 ms->cs_change = 1;
188 ms->transfer = container_of(ms->message->transfers.next,
189 struct spi_transfer, transfer_list);
190
191 mpc52xx_spi_start_transfer(ms);
192 ms->state = mpc52xx_spi_fsmstate_transfer;
193
194 return FSM_CONTINUE;
195}
196
197/*
198 * TRANSFER state
199 *
200 * In the middle of a transfer. If the SPI core has completed processing
201 * a byte, then read out the received data and write out the next byte
202 * (unless this transfer is finished; in which case go on to the wait
203 * state)
204 */
205static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
206 u8 status, u8 data)
207{
208 if (!status)
209 return ms->irq0 ? FSM_STOP : FSM_POLL;
210
211 if (status & SPI_STATUS_WCOL) {
212 /* The SPI controller is stoopid. At slower speeds, it may
213 * raise the SPIF flag before the state machine is actually
214 * finished, which causes a collision (internal to the state
215 * machine only). The manual recommends inserting a delay
216 * between receiving the interrupt and sending the next byte,
217 * but it can also be worked around simply by retrying the
218 * transfer which is what we do here. */
219 ms->wcol_count++;
220 ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp;
221 ms->wcol_tx_timestamp = get_tbl();
222 data = 0;
223 if (ms->tx_buf)
224 data = *(ms->tx_buf-1);
225 out_8(ms->regs + SPI_DATA, data); /* try again */
226 return FSM_CONTINUE;
227 } else if (status & SPI_STATUS_MODF) {
228 ms->modf_count++;
229 dev_err(&ms->master->dev, "mode fault\n");
230 mpc52xx_spi_chipsel(ms, 0);
231 ms->message->status = -EIO;
232 ms->message->complete(ms->message->context);
233 ms->state = mpc52xx_spi_fsmstate_idle;
234 return FSM_CONTINUE;
235 }
236
237 /* Read data out of the spi device */
238 ms->byte_count++;
239 if (ms->rx_buf)
240 *ms->rx_buf++ = data;
241
242 /* Is the transfer complete? */
243 ms->len--;
244 if (ms->len == 0) {
245 ms->timestamp = get_tbl();
246 ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec;
247 ms->state = mpc52xx_spi_fsmstate_wait;
248 return FSM_CONTINUE;
249 }
250
251 /* Write out the next byte */
252 ms->wcol_tx_timestamp = get_tbl();
253 if (ms->tx_buf)
254 out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
255 else
256 out_8(ms->regs + SPI_DATA, 0);
257
258 return FSM_CONTINUE;
259}
260
261/*
262 * WAIT state
263 *
264 * A transfer has completed; need to wait for the delay period to complete
265 * before starting the next transfer
266 */
267static int
268mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
269{
270 if (status && irq)
271 dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
272 status);
273
274 if (((int)get_tbl()) - ms->timestamp < 0)
275 return FSM_POLL;
276
277 ms->message->actual_length += ms->transfer->len;
278
279 /* Check if there is another transfer in this message. If there
280 * aren't then deactivate CS, notify sender, and drop back to idle
281 * to start the next message. */
282 if (ms->transfer->transfer_list.next == &ms->message->transfers) {
283 ms->msg_count++;
284 mpc52xx_spi_chipsel(ms, 0);
285 ms->message->status = 0;
286 ms->message->complete(ms->message->context);
287 ms->state = mpc52xx_spi_fsmstate_idle;
288 return FSM_CONTINUE;
289 }
290
291 /* There is another transfer; kick it off */
292
293 if (ms->cs_change)
294 mpc52xx_spi_chipsel(ms, 0);
295
296 ms->transfer = container_of(ms->transfer->transfer_list.next,
297 struct spi_transfer, transfer_list);
298 mpc52xx_spi_start_transfer(ms);
299 ms->state = mpc52xx_spi_fsmstate_transfer;
300 return FSM_CONTINUE;
301}
302
303/**
304 * mpc52xx_spi_fsm_process - Finite State Machine iteration function
305 * @irq: irq number that triggered the FSM or 0 for polling
306 * @ms: pointer to mpc52xx_spi driver data
307 */
308static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms)
309{
310 int rc = FSM_CONTINUE;
311 u8 status, data;
312
313 while (rc == FSM_CONTINUE) {
314 /* Interrupt cleared by read of STATUS followed by
315 * read of DATA registers */
316 status = in_8(ms->regs + SPI_STATUS);
317 data = in_8(ms->regs + SPI_DATA);
318 rc = ms->state(irq, ms, status, data);
319 }
320
321 if (rc == FSM_POLL)
322 schedule_work(&ms->work);
323}
324
325/**
326 * mpc52xx_spi_irq - IRQ handler
327 */
328static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms)
329{
330 struct mpc52xx_spi *ms = _ms;
331 spin_lock(&ms->lock);
332 mpc52xx_spi_fsm_process(irq, ms);
333 spin_unlock(&ms->lock);
334 return IRQ_HANDLED;
335}
336
337/**
338 * mpc52xx_spi_wq - Workqueue function for polling the state machine
339 */
340static void mpc52xx_spi_wq(struct work_struct *work)
341{
342 struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work);
343 unsigned long flags;
344
345 spin_lock_irqsave(&ms->lock, flags);
346 mpc52xx_spi_fsm_process(0, ms);
347 spin_unlock_irqrestore(&ms->lock, flags);
348}
349
350/*
351 * spi_master ops
352 */
353
354static int mpc52xx_spi_setup(struct spi_device *spi)
355{
356 if (spi->bits_per_word % 8)
357 return -EINVAL;
358
359 if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST))
360 return -EINVAL;
361
362 if (spi->chip_select >= spi->master->num_chipselect)
363 return -EINVAL;
364
365 return 0;
366}
367
368static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
369{
370 struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
371 unsigned long flags;
372
373 m->actual_length = 0;
374 m->status = -EINPROGRESS;
375
376 spin_lock_irqsave(&ms->lock, flags);
377 list_add_tail(&m->queue, &ms->queue);
378 spin_unlock_irqrestore(&ms->lock, flags);
379 schedule_work(&ms->work);
380
381 return 0;
382}
383
384/*
385 * OF Platform Bus Binding
386 */
387static int __devinit mpc52xx_spi_probe(struct of_device *op,
388 const struct of_device_id *match)
389{
390 struct spi_master *master;
391 struct mpc52xx_spi *ms;
392 void __iomem *regs;
393 int rc;
394
395 /* MMIO registers */
396 dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
397 regs = of_iomap(op->node, 0);
398 if (!regs)
399 return -ENODEV;
400
401 /* initialize the device */
402 out_8(regs+SPI_CTRL1, SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR);
403 out_8(regs + SPI_CTRL2, 0x0);
404 out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */
405 out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */
406
407 /* Clear the status register and re-read it to check for a MODF
408 * failure. This driver cannot currently handle multiple masters
409 * on the SPI bus. This fault will also occur if the SPI signals
410 * are not connected to any pins (port_config setting) */
411 in_8(regs + SPI_STATUS);
412 in_8(regs + SPI_DATA);
413 if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
414 dev_err(&op->dev, "mode fault; is port_config correct?\n");
415 rc = -EIO;
416 goto err_init;
417 }
418
419 dev_dbg(&op->dev, "allocating spi_master struct\n");
420 master = spi_alloc_master(&op->dev, sizeof *ms);
421 if (!master) {
422 rc = -ENOMEM;
423 goto err_alloc;
424 }
425 master->bus_num = -1;
426 master->num_chipselect = 1;
427 master->setup = mpc52xx_spi_setup;
428 master->transfer = mpc52xx_spi_transfer;
429 dev_set_drvdata(&op->dev, master);
430
431 ms = spi_master_get_devdata(master);
432 ms->master = master;
433 ms->regs = regs;
434 ms->irq0 = irq_of_parse_and_map(op->node, 0);
435 ms->irq1 = irq_of_parse_and_map(op->node, 1);
436 ms->state = mpc52xx_spi_fsmstate_idle;
437 ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
438 spin_lock_init(&ms->lock);
439 INIT_LIST_HEAD(&ms->queue);
440 INIT_WORK(&ms->work, mpc52xx_spi_wq);
441
442 /* Decide if interrupts can be used */
443 if (ms->irq0 && ms->irq1) {
444 rc = request_irq(ms->irq0, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM,
445 "mpc5200-spi-modf", ms);
446 rc |= request_irq(ms->irq1, mpc52xx_spi_irq, IRQF_SAMPLE_RANDOM,
447 "mpc5200-spi-spiF", ms);
448 if (rc) {
449 free_irq(ms->irq0, ms);
450 free_irq(ms->irq1, ms);
451 ms->irq0 = ms->irq1 = 0;
452 }
453 } else {
454 /* operate in polled mode */
455 ms->irq0 = ms->irq1 = 0;
456 }
457
458 if (!ms->irq0)
459 dev_info(&op->dev, "using polled mode\n");
460
461 dev_dbg(&op->dev, "registering spi_master struct\n");
462 rc = spi_register_master(master);
463 if (rc)
464 goto err_register;
465
466 of_register_spi_devices(master, op->node);
467 dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
468
469 return rc;
470
471 err_register:
472 dev_err(&ms->master->dev, "initialization failed\n");
473 spi_master_put(master);
474 err_alloc:
475 err_init:
476 iounmap(regs);
477 return rc;
478}
479
480static int __devexit mpc52xx_spi_remove(struct of_device *op)
481{
482 struct spi_master *master = dev_get_drvdata(&op->dev);
483 struct mpc52xx_spi *ms = spi_master_get_devdata(master);
484
485 free_irq(ms->irq0, ms);
486 free_irq(ms->irq1, ms);
487
488 spi_unregister_master(master);
489 spi_master_put(master);
490 iounmap(ms->regs);
491
492 return 0;
493}
494
495static struct of_device_id mpc52xx_spi_match[] __devinitdata = {
496 { .compatible = "fsl,mpc5200-spi", },
497 {}
498};
499MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
500
501static struct of_platform_driver mpc52xx_spi_of_driver = {
502 .owner = THIS_MODULE,
503 .name = "mpc52xx-spi",
504 .match_table = mpc52xx_spi_match,
505 .probe = mpc52xx_spi_probe,
506 .remove = __exit_p(mpc52xx_spi_remove),
507};
508
509static int __init mpc52xx_spi_init(void)
510{
511 return of_register_platform_driver(&mpc52xx_spi_of_driver);
512}
513module_init(mpc52xx_spi_init);
514
515static void __exit mpc52xx_spi_exit(void)
516{
517 of_unregister_platform_driver(&mpc52xx_spi_of_driver);
518}
519module_exit(mpc52xx_spi_exit);
520
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index 0fd0ec4d3a7d..930135dc73ba 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -5,6 +5,10 @@
5 * 5 *
6 * Copyright (C) 2006 Polycom, Inc. 6 * Copyright (C) 2006 Polycom, Inc.
7 * 7 *
8 * CPM SPI and QE buffer descriptors mode support:
9 * Copyright (c) 2009 MontaVista Software, Inc.
10 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
8 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your 14 * Free Software Foundation; either version 2 of the License, or (at your
@@ -27,6 +31,9 @@
27#include <linux/spi/spi_bitbang.h> 31#include <linux/spi/spi_bitbang.h>
28#include <linux/platform_device.h> 32#include <linux/platform_device.h>
29#include <linux/fsl_devices.h> 33#include <linux/fsl_devices.h>
34#include <linux/dma-mapping.h>
35#include <linux/mm.h>
36#include <linux/mutex.h>
30#include <linux/of.h> 37#include <linux/of.h>
31#include <linux/of_platform.h> 38#include <linux/of_platform.h>
32#include <linux/gpio.h> 39#include <linux/gpio.h>
@@ -34,8 +41,19 @@
34#include <linux/of_spi.h> 41#include <linux/of_spi.h>
35 42
36#include <sysdev/fsl_soc.h> 43#include <sysdev/fsl_soc.h>
44#include <asm/cpm.h>
45#include <asm/qe.h>
37#include <asm/irq.h> 46#include <asm/irq.h>
38 47
48/* CPM1 and CPM2 are mutually exclusive. */
49#ifdef CONFIG_CPM1
50#include <asm/cpm1.h>
51#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
52#else
53#include <asm/cpm2.h>
54#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
55#endif
56
39/* SPI Controller registers */ 57/* SPI Controller registers */
40struct mpc8xxx_spi_reg { 58struct mpc8xxx_spi_reg {
41 u8 res1[0x20]; 59 u8 res1[0x20];
@@ -47,6 +65,28 @@ struct mpc8xxx_spi_reg {
47 __be32 receive; 65 __be32 receive;
48}; 66};
49 67
68/* SPI Parameter RAM */
69struct spi_pram {
70 __be16 rbase; /* Rx Buffer descriptor base address */
71 __be16 tbase; /* Tx Buffer descriptor base address */
72 u8 rfcr; /* Rx function code */
73 u8 tfcr; /* Tx function code */
74 __be16 mrblr; /* Max receive buffer length */
75 __be32 rstate; /* Internal */
76 __be32 rdp; /* Internal */
77 __be16 rbptr; /* Internal */
78 __be16 rbc; /* Internal */
79 __be32 rxtmp; /* Internal */
80 __be32 tstate; /* Internal */
81 __be32 tdp; /* Internal */
82 __be16 tbptr; /* Internal */
83 __be16 tbc; /* Internal */
84 __be32 txtmp; /* Internal */
85 __be32 res; /* Tx temp. */
86 __be16 rpbase; /* Relocation pointer (CPM1 only) */
87 __be16 res1; /* Reserved */
88};
89
50/* SPI Controller mode register definitions */ 90/* SPI Controller mode register definitions */
51#define SPMODE_LOOP (1 << 30) 91#define SPMODE_LOOP (1 << 30)
52#define SPMODE_CI_INACTIVEHIGH (1 << 29) 92#define SPMODE_CI_INACTIVEHIGH (1 << 29)
@@ -75,14 +115,40 @@ struct mpc8xxx_spi_reg {
75#define SPIM_NE 0x00000200 /* Not empty */ 115#define SPIM_NE 0x00000200 /* Not empty */
76#define SPIM_NF 0x00000100 /* Not full */ 116#define SPIM_NF 0x00000100 /* Not full */
77 117
118#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
119#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
120
121/* SPCOM register values */
122#define SPCOM_STR (1 << 23) /* Start transmit */
123
124#define SPI_PRAM_SIZE 0x100
125#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
126
78/* SPI Controller driver's private data. */ 127/* SPI Controller driver's private data. */
79struct mpc8xxx_spi { 128struct mpc8xxx_spi {
129 struct device *dev;
80 struct mpc8xxx_spi_reg __iomem *base; 130 struct mpc8xxx_spi_reg __iomem *base;
81 131
82 /* rx & tx bufs from the spi_transfer */ 132 /* rx & tx bufs from the spi_transfer */
83 const void *tx; 133 const void *tx;
84 void *rx; 134 void *rx;
85 135
136 int subblock;
137 struct spi_pram __iomem *pram;
138 struct cpm_buf_desc __iomem *tx_bd;
139 struct cpm_buf_desc __iomem *rx_bd;
140
141 struct spi_transfer *xfer_in_progress;
142
143 /* dma addresses for CPM transfers */
144 dma_addr_t tx_dma;
145 dma_addr_t rx_dma;
146 bool map_tx_dma;
147 bool map_rx_dma;
148
149 dma_addr_t dma_dummy_tx;
150 dma_addr_t dma_dummy_rx;
151
86 /* functions to deal with different sized buffers */ 152 /* functions to deal with different sized buffers */
87 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); 153 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
88 u32(*get_tx) (struct mpc8xxx_spi *); 154 u32(*get_tx) (struct mpc8xxx_spi *);
@@ -96,7 +162,7 @@ struct mpc8xxx_spi {
96 u32 rx_shift; /* RX data reg shift when in qe mode */ 162 u32 rx_shift; /* RX data reg shift when in qe mode */
97 u32 tx_shift; /* TX data reg shift when in qe mode */ 163 u32 tx_shift; /* TX data reg shift when in qe mode */
98 164
99 bool qe_mode; 165 unsigned int flags;
100 166
101 struct workqueue_struct *workqueue; 167 struct workqueue_struct *workqueue;
102 struct work_struct work; 168 struct work_struct work;
@@ -107,6 +173,10 @@ struct mpc8xxx_spi {
107 struct completion done; 173 struct completion done;
108}; 174};
109 175
176static void *mpc8xxx_dummy_rx;
177static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock);
178static int mpc8xxx_dummy_rx_refcnt;
179
110struct spi_mpc8xxx_cs { 180struct spi_mpc8xxx_cs {
111 /* functions to deal with different sized buffers */ 181 /* functions to deal with different sized buffers */
112 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); 182 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
@@ -155,6 +225,42 @@ MPC83XX_SPI_TX_BUF(u8)
155MPC83XX_SPI_TX_BUF(u16) 225MPC83XX_SPI_TX_BUF(u16)
156MPC83XX_SPI_TX_BUF(u32) 226MPC83XX_SPI_TX_BUF(u32)
157 227
228static void mpc8xxx_spi_change_mode(struct spi_device *spi)
229{
230 struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
231 struct spi_mpc8xxx_cs *cs = spi->controller_state;
232 __be32 __iomem *mode = &mspi->base->mode;
233 unsigned long flags;
234
235 if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
236 return;
237
238 /* Turn off IRQs locally to minimize time that SPI is disabled. */
239 local_irq_save(flags);
240
241 /* Turn off SPI unit prior changing mode */
242 mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
243 mpc8xxx_spi_write_reg(mode, cs->hw_mode);
244
245 /* When in CPM mode, we need to reinit tx and rx. */
246 if (mspi->flags & SPI_CPM_MODE) {
247 if (mspi->flags & SPI_QE) {
248 qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
249 QE_CR_PROTOCOL_UNSPECIFIED, 0);
250 } else {
251 cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
252 if (mspi->flags & SPI_CPM1) {
253 out_be16(&mspi->pram->rbptr,
254 in_be16(&mspi->pram->rbase));
255 out_be16(&mspi->pram->tbptr,
256 in_be16(&mspi->pram->tbase));
257 }
258 }
259 }
260
261 local_irq_restore(flags);
262}
263
158static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) 264static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
159{ 265{
160 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 266 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -168,27 +274,13 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
168 } 274 }
169 275
170 if (value == BITBANG_CS_ACTIVE) { 276 if (value == BITBANG_CS_ACTIVE) {
171 u32 regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
172
173 mpc8xxx_spi->rx_shift = cs->rx_shift; 277 mpc8xxx_spi->rx_shift = cs->rx_shift;
174 mpc8xxx_spi->tx_shift = cs->tx_shift; 278 mpc8xxx_spi->tx_shift = cs->tx_shift;
175 mpc8xxx_spi->get_rx = cs->get_rx; 279 mpc8xxx_spi->get_rx = cs->get_rx;
176 mpc8xxx_spi->get_tx = cs->get_tx; 280 mpc8xxx_spi->get_tx = cs->get_tx;
177 281
178 if (cs->hw_mode != regval) { 282 mpc8xxx_spi_change_mode(spi);
179 unsigned long flags; 283
180 __be32 __iomem *mode = &mpc8xxx_spi->base->mode;
181
182 regval = cs->hw_mode;
183 /* Turn off IRQs locally to minimize time that
184 * SPI is disabled
185 */
186 local_irq_save(flags);
187 /* Turn off SPI unit prior changing mode */
188 mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE);
189 mpc8xxx_spi_write_reg(mode, regval);
190 local_irq_restore(flags);
191 }
192 if (pdata->cs_control) 284 if (pdata->cs_control)
193 pdata->cs_control(spi, pol); 285 pdata->cs_control(spi, pol);
194 } 286 }
@@ -198,7 +290,6 @@ static
198int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 290int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
199{ 291{
200 struct mpc8xxx_spi *mpc8xxx_spi; 292 struct mpc8xxx_spi *mpc8xxx_spi;
201 u32 regval;
202 u8 bits_per_word, pm; 293 u8 bits_per_word, pm;
203 u32 hz; 294 u32 hz;
204 struct spi_mpc8xxx_cs *cs = spi->controller_state; 295 struct spi_mpc8xxx_cs *cs = spi->controller_state;
@@ -230,14 +321,14 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
230 if (bits_per_word <= 8) { 321 if (bits_per_word <= 8) {
231 cs->get_rx = mpc8xxx_spi_rx_buf_u8; 322 cs->get_rx = mpc8xxx_spi_rx_buf_u8;
232 cs->get_tx = mpc8xxx_spi_tx_buf_u8; 323 cs->get_tx = mpc8xxx_spi_tx_buf_u8;
233 if (mpc8xxx_spi->qe_mode) { 324 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
234 cs->rx_shift = 16; 325 cs->rx_shift = 16;
235 cs->tx_shift = 24; 326 cs->tx_shift = 24;
236 } 327 }
237 } else if (bits_per_word <= 16) { 328 } else if (bits_per_word <= 16) {
238 cs->get_rx = mpc8xxx_spi_rx_buf_u16; 329 cs->get_rx = mpc8xxx_spi_rx_buf_u16;
239 cs->get_tx = mpc8xxx_spi_tx_buf_u16; 330 cs->get_tx = mpc8xxx_spi_tx_buf_u16;
240 if (mpc8xxx_spi->qe_mode) { 331 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
241 cs->rx_shift = 16; 332 cs->rx_shift = 16;
242 cs->tx_shift = 16; 333 cs->tx_shift = 16;
243 } 334 }
@@ -247,7 +338,8 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
247 } else 338 } else
248 return -EINVAL; 339 return -EINVAL;
249 340
250 if (mpc8xxx_spi->qe_mode && spi->mode & SPI_LSB_FIRST) { 341 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
342 spi->mode & SPI_LSB_FIRST) {
251 cs->tx_shift = 0; 343 cs->tx_shift = 0;
252 if (bits_per_word <= 8) 344 if (bits_per_word <= 8)
253 cs->rx_shift = 8; 345 cs->rx_shift = 8;
@@ -286,37 +378,138 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
286 pm--; 378 pm--;
287 379
288 cs->hw_mode |= SPMODE_PM(pm); 380 cs->hw_mode |= SPMODE_PM(pm);
289 regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); 381
290 if (cs->hw_mode != regval) { 382 mpc8xxx_spi_change_mode(spi);
291 unsigned long flags; 383 return 0;
292 __be32 __iomem *mode = &mpc8xxx_spi->base->mode; 384}
293 385
294 regval = cs->hw_mode; 386static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
295 /* Turn off IRQs locally to minimize time 387{
296 * that SPI is disabled 388 struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
297 */ 389 struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
298 local_irq_save(flags); 390 unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
299 /* Turn off SPI unit prior changing mode */ 391 unsigned int xfer_ofs;
300 mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE); 392
301 mpc8xxx_spi_write_reg(mode, regval); 393 xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
302 local_irq_restore(flags); 394
395 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
396 out_be16(&rx_bd->cbd_datlen, 0);
397 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
398
399 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
400 out_be16(&tx_bd->cbd_datlen, xfer_len);
401 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
402 BD_SC_LAST);
403
404 /* start transfer */
405 mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR);
406}
407
408static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
409 struct spi_transfer *t, bool is_dma_mapped)
410{
411 struct device *dev = mspi->dev;
412
413 if (is_dma_mapped) {
414 mspi->map_tx_dma = 0;
415 mspi->map_rx_dma = 0;
416 } else {
417 mspi->map_tx_dma = 1;
418 mspi->map_rx_dma = 1;
419 }
420
421 if (!t->tx_buf) {
422 mspi->tx_dma = mspi->dma_dummy_tx;
423 mspi->map_tx_dma = 0;
424 }
425
426 if (!t->rx_buf) {
427 mspi->rx_dma = mspi->dma_dummy_rx;
428 mspi->map_rx_dma = 0;
303 } 429 }
430
431 if (mspi->map_tx_dma) {
432 void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
433
434 mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
435 DMA_TO_DEVICE);
436 if (dma_mapping_error(dev, mspi->tx_dma)) {
437 dev_err(dev, "unable to map tx dma\n");
438 return -ENOMEM;
439 }
440 } else {
441 mspi->tx_dma = t->tx_dma;
442 }
443
444 if (mspi->map_rx_dma) {
445 mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
446 DMA_FROM_DEVICE);
447 if (dma_mapping_error(dev, mspi->rx_dma)) {
448 dev_err(dev, "unable to map rx dma\n");
449 goto err_rx_dma;
450 }
451 } else {
452 mspi->rx_dma = t->rx_dma;
453 }
454
455 /* enable rx ints */
456 mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB);
457
458 mspi->xfer_in_progress = t;
459 mspi->count = t->len;
460
461 /* start CPM transfers */
462 mpc8xxx_spi_cpm_bufs_start(mspi);
463
304 return 0; 464 return 0;
465
466err_rx_dma:
467 if (mspi->map_tx_dma)
468 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
469 return -ENOMEM;
305} 470}
306 471
307static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 472static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
308{ 473{
309 struct mpc8xxx_spi *mpc8xxx_spi; 474 struct device *dev = mspi->dev;
310 u32 word, len, bits_per_word; 475 struct spi_transfer *t = mspi->xfer_in_progress;
476
477 if (mspi->map_tx_dma)
478 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
479 if (mspi->map_tx_dma)
480 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
481 mspi->xfer_in_progress = NULL;
482}
311 483
312 mpc8xxx_spi = spi_master_get_devdata(spi->master); 484static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
485 struct spi_transfer *t, unsigned int len)
486{
487 u32 word;
488
489 mspi->count = len;
490
491 /* enable rx ints */
492 mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE);
493
494 /* transmit word */
495 word = mspi->get_tx(mspi);
496 mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
497
498 return 0;
499}
500
501static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
502 bool is_dma_mapped)
503{
504 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
505 unsigned int len = t->len;
506 u8 bits_per_word;
507 int ret;
313 508
314 mpc8xxx_spi->tx = t->tx_buf;
315 mpc8xxx_spi->rx = t->rx_buf;
316 bits_per_word = spi->bits_per_word; 509 bits_per_word = spi->bits_per_word;
317 if (t->bits_per_word) 510 if (t->bits_per_word)
318 bits_per_word = t->bits_per_word; 511 bits_per_word = t->bits_per_word;
319 len = t->len; 512
320 if (bits_per_word > 8) { 513 if (bits_per_word > 8) {
321 /* invalid length? */ 514 /* invalid length? */
322 if (len & 1) 515 if (len & 1)
@@ -329,22 +522,27 @@ static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
329 return -EINVAL; 522 return -EINVAL;
330 len /= 2; 523 len /= 2;
331 } 524 }
332 mpc8xxx_spi->count = len;
333 525
334 INIT_COMPLETION(mpc8xxx_spi->done); 526 mpc8xxx_spi->tx = t->tx_buf;
527 mpc8xxx_spi->rx = t->rx_buf;
335 528
336 /* enable rx ints */ 529 INIT_COMPLETION(mpc8xxx_spi->done);
337 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, SPIM_NE);
338 530
339 /* transmit word */ 531 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
340 word = mpc8xxx_spi->get_tx(mpc8xxx_spi); 532 ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
341 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word); 533 else
534 ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len);
535 if (ret)
536 return ret;
342 537
343 wait_for_completion(&mpc8xxx_spi->done); 538 wait_for_completion(&mpc8xxx_spi->done);
344 539
345 /* disable rx ints */ 540 /* disable rx ints */
346 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); 541 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
347 542
543 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
544 mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi);
545
348 return mpc8xxx_spi->count; 546 return mpc8xxx_spi->count;
349} 547}
350 548
@@ -375,7 +573,7 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
375 } 573 }
376 cs_change = t->cs_change; 574 cs_change = t->cs_change;
377 if (t->len) 575 if (t->len)
378 status = mpc8xxx_spi_bufs(spi, t); 576 status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped);
379 if (status) { 577 if (status) {
380 status = -EMSGSIZE; 578 status = -EMSGSIZE;
381 break; 579 break;
@@ -464,45 +662,80 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
464 return 0; 662 return 0;
465} 663}
466 664
467static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data) 665static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
468{ 666{
469 struct mpc8xxx_spi *mpc8xxx_spi = context_data; 667 u16 len;
470 u32 event;
471 irqreturn_t ret = IRQ_NONE;
472 668
473 /* Get interrupt events(tx/rx) */ 669 dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
474 event = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event); 670 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
475 671
476 /* We need handle RX first */ 672 len = in_be16(&mspi->rx_bd->cbd_datlen);
477 if (event & SPIE_NE) { 673 if (len > mspi->count) {
478 u32 rx_data = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->receive); 674 WARN_ON(1);
675 len = mspi->count;
676 }
479 677
480 if (mpc8xxx_spi->rx) 678 /* Clear the events */
481 mpc8xxx_spi->get_rx(rx_data, mpc8xxx_spi); 679 mpc8xxx_spi_write_reg(&mspi->base->event, events);
482 680
483 ret = IRQ_HANDLED; 681 mspi->count -= len;
682 if (mspi->count)
683 mpc8xxx_spi_cpm_bufs_start(mspi);
684 else
685 complete(&mspi->done);
686}
687
688static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
689{
690 /* We need handle RX first */
691 if (events & SPIE_NE) {
692 u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive);
693
694 if (mspi->rx)
695 mspi->get_rx(rx_data, mspi);
484 } 696 }
485 697
486 if ((event & SPIE_NF) == 0) 698 if ((events & SPIE_NF) == 0)
487 /* spin until TX is done */ 699 /* spin until TX is done */
488 while (((event = 700 while (((events =
489 mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event)) & 701 mpc8xxx_spi_read_reg(&mspi->base->event)) &
490 SPIE_NF) == 0) 702 SPIE_NF) == 0)
491 cpu_relax(); 703 cpu_relax();
492 704
493 mpc8xxx_spi->count -= 1; 705 /* Clear the events */
494 if (mpc8xxx_spi->count) { 706 mpc8xxx_spi_write_reg(&mspi->base->event, events);
495 u32 word = mpc8xxx_spi->get_tx(mpc8xxx_spi); 707
496 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word); 708 mspi->count -= 1;
709 if (mspi->count) {
710 u32 word = mspi->get_tx(mspi);
711
712 mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
497 } else { 713 } else {
498 complete(&mpc8xxx_spi->done); 714 complete(&mspi->done);
499 } 715 }
716}
500 717
501 /* Clear the events */ 718static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data)
502 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, event); 719{
720 struct mpc8xxx_spi *mspi = context_data;
721 irqreturn_t ret = IRQ_NONE;
722 u32 events;
723
724 /* Get interrupt events(tx/rx) */
725 events = mpc8xxx_spi_read_reg(&mspi->base->event);
726 if (events)
727 ret = IRQ_HANDLED;
728
729 dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
730
731 if (mspi->flags & SPI_CPM_MODE)
732 mpc8xxx_spi_cpm_irq(mspi, events);
733 else
734 mpc8xxx_spi_cpu_irq(mspi, events);
503 735
504 return ret; 736 return ret;
505} 737}
738
506static int mpc8xxx_spi_transfer(struct spi_device *spi, 739static int mpc8xxx_spi_transfer(struct spi_device *spi,
507 struct spi_message *m) 740 struct spi_message *m)
508{ 741{
@@ -526,6 +759,215 @@ static void mpc8xxx_spi_cleanup(struct spi_device *spi)
526 kfree(spi->controller_state); 759 kfree(spi->controller_state);
527} 760}
528 761
762static void *mpc8xxx_spi_alloc_dummy_rx(void)
763{
764 mutex_lock(&mpc8xxx_dummy_rx_lock);
765
766 if (!mpc8xxx_dummy_rx)
767 mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
768 if (mpc8xxx_dummy_rx)
769 mpc8xxx_dummy_rx_refcnt++;
770
771 mutex_unlock(&mpc8xxx_dummy_rx_lock);
772
773 return mpc8xxx_dummy_rx;
774}
775
776static void mpc8xxx_spi_free_dummy_rx(void)
777{
778 mutex_lock(&mpc8xxx_dummy_rx_lock);
779
780 switch (mpc8xxx_dummy_rx_refcnt) {
781 case 0:
782 WARN_ON(1);
783 break;
784 case 1:
785 kfree(mpc8xxx_dummy_rx);
786 mpc8xxx_dummy_rx = NULL;
787 /* fall through */
788 default:
789 mpc8xxx_dummy_rx_refcnt--;
790 break;
791 }
792
793 mutex_unlock(&mpc8xxx_dummy_rx_lock);
794}
795
796static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
797{
798 struct device *dev = mspi->dev;
799 struct device_node *np = dev_archdata_get_node(&dev->archdata);
800 const u32 *iprop;
801 int size;
802 unsigned long spi_base_ofs;
803 unsigned long pram_ofs = -ENOMEM;
804
805 /* Can't use of_address_to_resource(), QE muram isn't at 0. */
806 iprop = of_get_property(np, "reg", &size);
807
808 /* QE with a fixed pram location? */
809 if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
810 return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
811
812 /* QE but with a dynamic pram location? */
813 if (mspi->flags & SPI_QE) {
814 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
815 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
816 QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
817 return pram_ofs;
818 }
819
820 /* CPM1 and CPM2 pram must be at a fixed addr. */
821 if (!iprop || size != sizeof(*iprop) * 4)
822 return -ENOMEM;
823
824 spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2);
825 if (IS_ERR_VALUE(spi_base_ofs))
826 return -ENOMEM;
827
828 if (mspi->flags & SPI_CPM2) {
829 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
830 if (!IS_ERR_VALUE(pram_ofs)) {
831 u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs);
832
833 out_be16(spi_base, pram_ofs);
834 }
835 } else {
836 struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs);
837 u16 rpbase = in_be16(&pram->rpbase);
838
839 /* Microcode relocation patch applied? */
840 if (rpbase)
841 pram_ofs = rpbase;
842 else
843 return spi_base_ofs;
844 }
845
846 cpm_muram_free(spi_base_ofs);
847 return pram_ofs;
848}
849
850static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
851{
852 struct device *dev = mspi->dev;
853 struct device_node *np = dev_archdata_get_node(&dev->archdata);
854 const u32 *iprop;
855 int size;
856 unsigned long pram_ofs;
857 unsigned long bds_ofs;
858
859 if (!(mspi->flags & SPI_CPM_MODE))
860 return 0;
861
862 if (!mpc8xxx_spi_alloc_dummy_rx())
863 return -ENOMEM;
864
865 if (mspi->flags & SPI_QE) {
866 iprop = of_get_property(np, "cell-index", &size);
867 if (iprop && size == sizeof(*iprop))
868 mspi->subblock = *iprop;
869
870 switch (mspi->subblock) {
871 default:
872 dev_warn(dev, "cell-index unspecified, assuming SPI1");
873 /* fall through */
874 case 0:
875 mspi->subblock = QE_CR_SUBBLOCK_SPI1;
876 break;
877 case 1:
878 mspi->subblock = QE_CR_SUBBLOCK_SPI2;
879 break;
880 }
881 }
882
883 pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi);
884 if (IS_ERR_VALUE(pram_ofs)) {
885 dev_err(dev, "can't allocate spi parameter ram\n");
886 goto err_pram;
887 }
888
889 bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
890 sizeof(*mspi->rx_bd), 8);
891 if (IS_ERR_VALUE(bds_ofs)) {
892 dev_err(dev, "can't allocate bds\n");
893 goto err_bds;
894 }
895
896 mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
897 DMA_TO_DEVICE);
898 if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
899 dev_err(dev, "unable to map dummy tx buffer\n");
900 goto err_dummy_tx;
901 }
902
903 mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR,
904 DMA_FROM_DEVICE);
905 if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
906 dev_err(dev, "unable to map dummy rx buffer\n");
907 goto err_dummy_rx;
908 }
909
910 mspi->pram = cpm_muram_addr(pram_ofs);
911
912 mspi->tx_bd = cpm_muram_addr(bds_ofs);
913 mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
914
915 /* Initialize parameter ram. */
916 out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
917 out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
918 out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
919 out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
920 out_be16(&mspi->pram->mrblr, SPI_MRBLR);
921 out_be32(&mspi->pram->rstate, 0);
922 out_be32(&mspi->pram->rdp, 0);
923 out_be16(&mspi->pram->rbptr, 0);
924 out_be16(&mspi->pram->rbc, 0);
925 out_be32(&mspi->pram->rxtmp, 0);
926 out_be32(&mspi->pram->tstate, 0);
927 out_be32(&mspi->pram->tdp, 0);
928 out_be16(&mspi->pram->tbptr, 0);
929 out_be16(&mspi->pram->tbc, 0);
930 out_be32(&mspi->pram->txtmp, 0);
931
932 return 0;
933
934err_dummy_rx:
935 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
936err_dummy_tx:
937 cpm_muram_free(bds_ofs);
938err_bds:
939 cpm_muram_free(pram_ofs);
940err_pram:
941 mpc8xxx_spi_free_dummy_rx();
942 return -ENOMEM;
943}
944
945static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi)
946{
947 struct device *dev = mspi->dev;
948
949 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
950 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
951 cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
952 cpm_muram_free(cpm_muram_offset(mspi->pram));
953 mpc8xxx_spi_free_dummy_rx();
954}
955
956static const char *mpc8xxx_spi_strmode(unsigned int flags)
957{
958 if (flags & SPI_QE_CPU_MODE) {
959 return "QE CPU";
960 } else if (flags & SPI_CPM_MODE) {
961 if (flags & SPI_QE)
962 return "QE";
963 else if (flags & SPI_CPM2)
964 return "CPM2";
965 else
966 return "CPM1";
967 }
968 return "CPU";
969}
970
529static struct spi_master * __devinit 971static struct spi_master * __devinit
530mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) 972mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
531{ 973{
@@ -552,14 +994,19 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
552 master->cleanup = mpc8xxx_spi_cleanup; 994 master->cleanup = mpc8xxx_spi_cleanup;
553 995
554 mpc8xxx_spi = spi_master_get_devdata(master); 996 mpc8xxx_spi = spi_master_get_devdata(master);
555 mpc8xxx_spi->qe_mode = pdata->qe_mode; 997 mpc8xxx_spi->dev = dev;
556 mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; 998 mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
557 mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; 999 mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
1000 mpc8xxx_spi->flags = pdata->flags;
558 mpc8xxx_spi->spibrg = pdata->sysclk; 1001 mpc8xxx_spi->spibrg = pdata->sysclk;
559 1002
1003 ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi);
1004 if (ret)
1005 goto err_cpm_init;
1006
560 mpc8xxx_spi->rx_shift = 0; 1007 mpc8xxx_spi->rx_shift = 0;
561 mpc8xxx_spi->tx_shift = 0; 1008 mpc8xxx_spi->tx_shift = 0;
562 if (mpc8xxx_spi->qe_mode) { 1009 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
563 mpc8xxx_spi->rx_shift = 16; 1010 mpc8xxx_spi->rx_shift = 16;
564 mpc8xxx_spi->tx_shift = 24; 1011 mpc8xxx_spi->tx_shift = 24;
565 } 1012 }
@@ -569,7 +1016,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
569 mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1); 1016 mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1);
570 if (mpc8xxx_spi->base == NULL) { 1017 if (mpc8xxx_spi->base == NULL) {
571 ret = -ENOMEM; 1018 ret = -ENOMEM;
572 goto put_master; 1019 goto err_ioremap;
573 } 1020 }
574 1021
575 mpc8xxx_spi->irq = irq; 1022 mpc8xxx_spi->irq = irq;
@@ -592,7 +1039,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
592 1039
593 /* Enable SPI interface */ 1040 /* Enable SPI interface */
594 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 1041 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
595 if (pdata->qe_mode) 1042 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
596 regval |= SPMODE_OP; 1043 regval |= SPMODE_OP;
597 1044
598 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval); 1045 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval);
@@ -612,9 +1059,8 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
612 if (ret < 0) 1059 if (ret < 0)
613 goto unreg_master; 1060 goto unreg_master;
614 1061
615 printk(KERN_INFO 1062 dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base,
616 "%s: MPC8xxx SPI Controller driver at 0x%p (irq = %d)\n", 1063 mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
617 dev_name(dev), mpc8xxx_spi->base, mpc8xxx_spi->irq);
618 1064
619 return master; 1065 return master;
620 1066
@@ -624,7 +1070,9 @@ free_irq:
624 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 1070 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
625unmap_io: 1071unmap_io:
626 iounmap(mpc8xxx_spi->base); 1072 iounmap(mpc8xxx_spi->base);
627put_master: 1073err_ioremap:
1074 mpc8xxx_spi_cpm_free(mpc8xxx_spi);
1075err_cpm_init:
628 spi_master_put(master); 1076 spi_master_put(master);
629err: 1077err:
630 return ERR_PTR(ret); 1078 return ERR_PTR(ret);
@@ -644,6 +1092,7 @@ static int __devexit mpc8xxx_spi_remove(struct device *dev)
644 1092
645 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 1093 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
646 iounmap(mpc8xxx_spi->base); 1094 iounmap(mpc8xxx_spi->base);
1095 mpc8xxx_spi_cpm_free(mpc8xxx_spi);
647 1096
648 return 0; 1097 return 0;
649} 1098}
@@ -709,6 +1158,7 @@ static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
709 gpio = of_get_gpio_flags(np, i, &flags); 1158 gpio = of_get_gpio_flags(np, i, &flags);
710 if (!gpio_is_valid(gpio)) { 1159 if (!gpio_is_valid(gpio)) {
711 dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); 1160 dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
1161 ret = gpio;
712 goto err_loop; 1162 goto err_loop;
713 } 1163 }
714 1164
@@ -804,7 +1254,13 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev,
804 1254
805 prop = of_get_property(np, "mode", NULL); 1255 prop = of_get_property(np, "mode", NULL);
806 if (prop && !strcmp(prop, "cpu-qe")) 1256 if (prop && !strcmp(prop, "cpu-qe"))
807 pdata->qe_mode = 1; 1257 pdata->flags = SPI_QE_CPU_MODE;
1258 else if (prop && !strcmp(prop, "qe"))
1259 pdata->flags = SPI_CPM_MODE | SPI_QE;
1260 else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
1261 pdata->flags = SPI_CPM_MODE | SPI_CPM2;
1262 else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
1263 pdata->flags = SPI_CPM_MODE | SPI_CPM1;
808 1264
809 ret = of_mpc8xxx_spi_get_chipselects(dev); 1265 ret = of_mpc8xxx_spi_get_chipselects(dev);
810 if (ret) 1266 if (ret)
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 46b8c5c2f45e..5a143b9f6361 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -148,7 +148,8 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
148{ 148{
149 u8 bits_per_word; 149 u8 bits_per_word;
150 150
151 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; 151 bits_per_word = (t && t->bits_per_word)
152 ? t->bits_per_word : spi->bits_per_word;
152 if (bits_per_word != 8) { 153 if (bits_per_word != 8) {
153 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", 154 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
154 __func__, bits_per_word); 155 __func__, bits_per_word);
diff --git a/drivers/usb/gadget/fsl_qe_udc.h b/drivers/usb/gadget/fsl_qe_udc.h
index 31b2710882e4..bea5b827bebe 100644
--- a/drivers/usb/gadget/fsl_qe_udc.h
+++ b/drivers/usb/gadget/fsl_qe_udc.h
@@ -419,19 +419,4 @@ struct qe_udc {
419#define CPM_USB_RESTART_TX_OPCODE 0x0b 419#define CPM_USB_RESTART_TX_OPCODE 0x0b
420#define CPM_USB_EP_SHIFT 5 420#define CPM_USB_EP_SHIFT 5
421 421
422#ifndef CONFIG_CPM
423inline int cpm_command(u32 command, u8 opcode)
424{
425 return -EOPNOTSUPP;
426}
427#endif
428
429#ifndef CONFIG_QUICC_ENGINE
430inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol,
431 u32 cmd_input)
432{
433 return -EOPNOTSUPP;
434}
435#endif
436
437#endif /* __FSL_QE_UDC_H */ 422#endif /* __FSL_QE_UDC_H */
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 4d8c54c23dd7..b043ac83c412 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -282,8 +282,17 @@ static int offb_set_par(struct fb_info *info)
282 return 0; 282 return 0;
283} 283}
284 284
285static void offb_destroy(struct fb_info *info)
286{
287 if (info->screen_base)
288 iounmap(info->screen_base);
289 release_mem_region(info->aperture_base, info->aperture_size);
290 framebuffer_release(info);
291}
292
285static struct fb_ops offb_ops = { 293static struct fb_ops offb_ops = {
286 .owner = THIS_MODULE, 294 .owner = THIS_MODULE,
295 .fb_destroy = offb_destroy,
287 .fb_setcolreg = offb_setcolreg, 296 .fb_setcolreg = offb_setcolreg,
288 .fb_set_par = offb_set_par, 297 .fb_set_par = offb_set_par,
289 .fb_blank = offb_blank, 298 .fb_blank = offb_blank,
@@ -482,10 +491,14 @@ static void __init offb_init_fb(const char *name, const char *full_name,
482 var->sync = 0; 491 var->sync = 0;
483 var->vmode = FB_VMODE_NONINTERLACED; 492 var->vmode = FB_VMODE_NONINTERLACED;
484 493
494 /* set offb aperture size for generic probing */
495 info->aperture_base = address;
496 info->aperture_size = fix->smem_len;
497
485 info->fbops = &offb_ops; 498 info->fbops = &offb_ops;
486 info->screen_base = ioremap(address, fix->smem_len); 499 info->screen_base = ioremap(address, fix->smem_len);
487 info->pseudo_palette = (void *) (info + 1); 500 info->pseudo_palette = (void *) (info + 1);
488 info->flags = FBINFO_DEFAULT | foreign_endian; 501 info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE | foreign_endian;
489 502
490 fb_alloc_cmap(&info->cmap, 256, 0); 503 fb_alloc_cmap(&info->cmap, 256, 0);
491 504
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 3711b888d482..d958b76430a2 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -861,8 +861,10 @@ config GEF_WDT
861 Watchdog timer found in a number of GE Fanuc single board computers. 861 Watchdog timer found in a number of GE Fanuc single board computers.
862 862
863config MPC5200_WDT 863config MPC5200_WDT
864 tristate "MPC5200 Watchdog Timer" 864 bool "MPC52xx Watchdog Timer"
865 depends on PPC_MPC52xx 865 depends on PPC_MPC52xx
866 help
867 Use General Purpose Timer (GPT) 0 on the MPC5200 as Watchdog.
866 868
867config 8xxx_WDT 869config 8xxx_WDT
868 tristate "MPC8xxx Platform Watchdog Timer" 870 tristate "MPC8xxx Platform Watchdog Timer"
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 699199b1baa6..89c045dc468e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -118,7 +118,6 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
118 118
119# POWERPC Architecture 119# POWERPC Architecture
120obj-$(CONFIG_GEF_WDT) += gef_wdt.o 120obj-$(CONFIG_GEF_WDT) += gef_wdt.o
121obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o
122obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o 121obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o
123obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o 122obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o
124obj-$(CONFIG_PIKA_WDT) += pika_wdt.o 123obj-$(CONFIG_PIKA_WDT) += pika_wdt.o
diff --git a/drivers/watchdog/mpc5200_wdt.c b/drivers/watchdog/mpc5200_wdt.c
deleted file mode 100644
index fa9c47ce0ae7..000000000000
--- a/drivers/watchdog/mpc5200_wdt.c
+++ /dev/null
@@ -1,293 +0,0 @@
1#include <linux/init.h>
2#include <linux/module.h>
3#include <linux/miscdevice.h>
4#include <linux/watchdog.h>
5#include <linux/io.h>
6#include <linux/spinlock.h>
7#include <linux/of_platform.h>
8#include <linux/uaccess.h>
9#include <asm/mpc52xx.h>
10
11
12#define GPT_MODE_WDT (1 << 15)
13#define GPT_MODE_CE (1 << 12)
14#define GPT_MODE_MS_TIMER (0x4)
15
16
17struct mpc5200_wdt {
18 unsigned count; /* timer ticks before watchdog kicks in */
19 long ipb_freq;
20 struct miscdevice miscdev;
21 struct resource mem;
22 struct mpc52xx_gpt __iomem *regs;
23 spinlock_t io_lock;
24};
25
26/* is_active stores wether or not the /dev/watchdog device is opened */
27static unsigned long is_active;
28
29/* misc devices don't provide a way, to get back to 'dev' or 'miscdev' from
30 * file operations, which sucks. But there can be max 1 watchdog anyway, so...
31 */
32static struct mpc5200_wdt *wdt_global;
33
34
35/* helper to calculate timeout in timer counts */
36static void mpc5200_wdt_set_timeout(struct mpc5200_wdt *wdt, int timeout)
37{
38 /* use biggest prescaler of 64k */
39 wdt->count = (wdt->ipb_freq + 0xffff) / 0x10000 * timeout;
40
41 if (wdt->count > 0xffff)
42 wdt->count = 0xffff;
43}
44/* return timeout in seconds (calculated from timer count) */
45static int mpc5200_wdt_get_timeout(struct mpc5200_wdt *wdt)
46{
47 return wdt->count * 0x10000 / wdt->ipb_freq;
48}
49
50
51/* watchdog operations */
52static int mpc5200_wdt_start(struct mpc5200_wdt *wdt)
53{
54 spin_lock(&wdt->io_lock);
55 /* disable */
56 out_be32(&wdt->regs->mode, 0);
57 /* set timeout, with maximum prescaler */
58 out_be32(&wdt->regs->count, 0x0 | wdt->count);
59 /* enable watchdog */
60 out_be32(&wdt->regs->mode, GPT_MODE_CE | GPT_MODE_WDT |
61 GPT_MODE_MS_TIMER);
62 spin_unlock(&wdt->io_lock);
63
64 return 0;
65}
66static int mpc5200_wdt_ping(struct mpc5200_wdt *wdt)
67{
68 spin_lock(&wdt->io_lock);
69 /* writing A5 to OCPW resets the watchdog */
70 out_be32(&wdt->regs->mode, 0xA5000000 |
71 (0xffffff & in_be32(&wdt->regs->mode)));
72 spin_unlock(&wdt->io_lock);
73 return 0;
74}
75static int mpc5200_wdt_stop(struct mpc5200_wdt *wdt)
76{
77 spin_lock(&wdt->io_lock);
78 /* disable */
79 out_be32(&wdt->regs->mode, 0);
80 spin_unlock(&wdt->io_lock);
81 return 0;
82}
83
84
85/* file operations */
86static ssize_t mpc5200_wdt_write(struct file *file, const char __user *data,
87 size_t len, loff_t *ppos)
88{
89 struct mpc5200_wdt *wdt = file->private_data;
90 mpc5200_wdt_ping(wdt);
91 return 0;
92}
93static struct watchdog_info mpc5200_wdt_info = {
94 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
95 .identity = "mpc5200 watchdog on GPT0",
96};
97static long mpc5200_wdt_ioctl(struct file *file, unsigned int cmd,
98 unsigned long arg)
99{
100 struct mpc5200_wdt *wdt = file->private_data;
101 int __user *data = (int __user *)arg;
102 int timeout;
103 int ret = 0;
104
105 switch (cmd) {
106 case WDIOC_GETSUPPORT:
107 ret = copy_to_user(data, &mpc5200_wdt_info,
108 sizeof(mpc5200_wdt_info));
109 if (ret)
110 ret = -EFAULT;
111 break;
112
113 case WDIOC_GETSTATUS:
114 case WDIOC_GETBOOTSTATUS:
115 ret = put_user(0, data);
116 break;
117
118 case WDIOC_KEEPALIVE:
119 mpc5200_wdt_ping(wdt);
120 break;
121
122 case WDIOC_SETTIMEOUT:
123 ret = get_user(timeout, data);
124 if (ret)
125 break;
126 mpc5200_wdt_set_timeout(wdt, timeout);
127 mpc5200_wdt_start(wdt);
128 /* fall through and return the timeout */
129
130 case WDIOC_GETTIMEOUT:
131 timeout = mpc5200_wdt_get_timeout(wdt);
132 ret = put_user(timeout, data);
133 break;
134
135 default:
136 ret = -ENOTTY;
137 }
138 return ret;
139}
140
141static int mpc5200_wdt_open(struct inode *inode, struct file *file)
142{
143 /* /dev/watchdog can only be opened once */
144 if (test_and_set_bit(0, &is_active))
145 return -EBUSY;
146
147 /* Set and activate the watchdog */
148 mpc5200_wdt_set_timeout(wdt_global, 30);
149 mpc5200_wdt_start(wdt_global);
150 file->private_data = wdt_global;
151 return nonseekable_open(inode, file);
152}
153static int mpc5200_wdt_release(struct inode *inode, struct file *file)
154{
155#if WATCHDOG_NOWAYOUT == 0
156 struct mpc5200_wdt *wdt = file->private_data;
157 mpc5200_wdt_stop(wdt);
158 wdt->count = 0; /* == disabled */
159#endif
160 clear_bit(0, &is_active);
161 return 0;
162}
163
164static const struct file_operations mpc5200_wdt_fops = {
165 .owner = THIS_MODULE,
166 .write = mpc5200_wdt_write,
167 .unlocked_ioctl = mpc5200_wdt_ioctl,
168 .open = mpc5200_wdt_open,
169 .release = mpc5200_wdt_release,
170};
171
172/* module operations */
173static int mpc5200_wdt_probe(struct of_device *op,
174 const struct of_device_id *match)
175{
176 struct mpc5200_wdt *wdt;
177 int err;
178 const void *has_wdt;
179 int size;
180
181 has_wdt = of_get_property(op->node, "has-wdt", NULL);
182 if (!has_wdt)
183 has_wdt = of_get_property(op->node, "fsl,has-wdt", NULL);
184 if (!has_wdt)
185 return -ENODEV;
186
187 wdt = kzalloc(sizeof(*wdt), GFP_KERNEL);
188 if (!wdt)
189 return -ENOMEM;
190
191 wdt->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
192
193 err = of_address_to_resource(op->node, 0, &wdt->mem);
194 if (err)
195 goto out_free;
196 size = wdt->mem.end - wdt->mem.start + 1;
197 if (!request_mem_region(wdt->mem.start, size, "mpc5200_wdt")) {
198 err = -ENODEV;
199 goto out_free;
200 }
201 wdt->regs = ioremap(wdt->mem.start, size);
202 if (!wdt->regs) {
203 err = -ENODEV;
204 goto out_release;
205 }
206
207 dev_set_drvdata(&op->dev, wdt);
208 spin_lock_init(&wdt->io_lock);
209
210 wdt->miscdev = (struct miscdevice) {
211 .minor = WATCHDOG_MINOR,
212 .name = "watchdog",
213 .fops = &mpc5200_wdt_fops,
214 .parent = &op->dev,
215 };
216 wdt_global = wdt;
217 err = misc_register(&wdt->miscdev);
218 if (!err)
219 return 0;
220
221 iounmap(wdt->regs);
222out_release:
223 release_mem_region(wdt->mem.start, size);
224out_free:
225 kfree(wdt);
226 return err;
227}
228
229static int mpc5200_wdt_remove(struct of_device *op)
230{
231 struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
232
233 mpc5200_wdt_stop(wdt);
234 misc_deregister(&wdt->miscdev);
235 iounmap(wdt->regs);
236 release_mem_region(wdt->mem.start, wdt->mem.end - wdt->mem.start + 1);
237 kfree(wdt);
238
239 return 0;
240}
241static int mpc5200_wdt_suspend(struct of_device *op, pm_message_t state)
242{
243 struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
244 mpc5200_wdt_stop(wdt);
245 return 0;
246}
247static int mpc5200_wdt_resume(struct of_device *op)
248{
249 struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
250 if (wdt->count)
251 mpc5200_wdt_start(wdt);
252 return 0;
253}
254static int mpc5200_wdt_shutdown(struct of_device *op)
255{
256 struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
257 mpc5200_wdt_stop(wdt);
258 return 0;
259}
260
261static struct of_device_id mpc5200_wdt_match[] = {
262 { .compatible = "mpc5200-gpt", },
263 { .compatible = "fsl,mpc5200-gpt", },
264 {},
265};
266static struct of_platform_driver mpc5200_wdt_driver = {
267 .owner = THIS_MODULE,
268 .name = "mpc5200-gpt-wdt",
269 .match_table = mpc5200_wdt_match,
270 .probe = mpc5200_wdt_probe,
271 .remove = mpc5200_wdt_remove,
272 .suspend = mpc5200_wdt_suspend,
273 .resume = mpc5200_wdt_resume,
274 .shutdown = mpc5200_wdt_shutdown,
275};
276
277
278static int __init mpc5200_wdt_init(void)
279{
280 return of_register_platform_driver(&mpc5200_wdt_driver);
281}
282
283static void __exit mpc5200_wdt_exit(void)
284{
285 of_unregister_platform_driver(&mpc5200_wdt_driver);
286}
287
288module_init(mpc5200_wdt_init);
289module_exit(mpc5200_wdt_exit);
290
291MODULE_AUTHOR("Domen Puncer <domen.puncer@telargo.com>");
292MODULE_LICENSE("Dual BSD/GPL");
293MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 7ba79a54948c..123257bb356b 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -7,6 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/time.h> 8#include <linux/time.h>
9#include <linux/proc_fs.h> 9#include <linux/proc_fs.h>
10#include <linux/seq_file.h>
10#include <linux/stat.h> 11#include <linux/stat.h>
11#include <linux/string.h> 12#include <linux/string.h>
12#include <asm/prom.h> 13#include <asm/prom.h>
@@ -25,26 +26,27 @@ static struct proc_dir_entry *proc_device_tree;
25/* 26/*
26 * Supply data on a read from /proc/device-tree/node/property. 27 * Supply data on a read from /proc/device-tree/node/property.
27 */ 28 */
28static int property_read_proc(char *page, char **start, off_t off, 29static int property_proc_show(struct seq_file *m, void *v)
29 int count, int *eof, void *data)
30{ 30{
31 struct property *pp = data; 31 struct property *pp = m->private;
32 int n;
33 32
34 if (off >= pp->length) { 33 seq_write(m, pp->value, pp->length);
35 *eof = 1; 34 return 0;
36 return 0; 35}
37 } 36
38 n = pp->length - off; 37static int property_proc_open(struct inode *inode, struct file *file)
39 if (n > count) 38{
40 n = count; 39 return single_open(file, property_proc_show, PDE(inode)->data);
41 else
42 *eof = 1;
43 memcpy(page, (char *)pp->value + off, n);
44 *start = page;
45 return n;
46} 40}
47 41
42static const struct file_operations property_proc_fops = {
43 .owner = THIS_MODULE,
44 .open = property_proc_open,
45 .read = seq_read,
46 .llseek = seq_lseek,
47 .release = single_release,
48};
49
48/* 50/*
49 * For a node with a name like "gc@10", we make symlinks called "gc" 51 * For a node with a name like "gc@10", we make symlinks called "gc"
50 * and "@10" to it. 52 * and "@10" to it.
@@ -63,10 +65,9 @@ __proc_device_tree_add_prop(struct proc_dir_entry *de, struct property *pp,
63 * Unfortunately proc_register puts each new entry 65 * Unfortunately proc_register puts each new entry
64 * at the beginning of the list. So we rearrange them. 66 * at the beginning of the list. So we rearrange them.
65 */ 67 */
66 ent = create_proc_read_entry(name, 68 ent = proc_create_data(name,
67 strncmp(name, "security-", 9) 69 strncmp(name, "security-", 9) ? S_IRUGO : S_IRUSR,
68 ? S_IRUGO : S_IRUSR, de, 70 de, &property_proc_fops, pp);
69 property_read_proc, pp);
70 if (ent == NULL) 71 if (ent == NULL)
71 return NULL; 72 return NULL;
72 73
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 47536197ffdd..e287863ac053 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -43,6 +43,8 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
43 43
44#ifdef CONFIG_HOTPLUG_CPU 44#ifdef CONFIG_HOTPLUG_CPU
45extern void unregister_cpu(struct cpu *cpu); 45extern void unregister_cpu(struct cpu *cpu);
46extern ssize_t arch_cpu_probe(const char *, size_t);
47extern ssize_t arch_cpu_release(const char *, size_t);
46#endif 48#endif
47struct notifier_block; 49struct notifier_block;
48 50
@@ -115,6 +117,19 @@ extern void put_online_cpus(void);
115#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 117#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
116int cpu_down(unsigned int cpu); 118int cpu_down(unsigned int cpu);
117 119
120#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
121extern void cpu_hotplug_driver_lock(void);
122extern void cpu_hotplug_driver_unlock(void);
123#else
124static inline void cpu_hotplug_driver_lock(void)
125{
126}
127
128static inline void cpu_hotplug_driver_unlock(void)
129{
130}
131#endif
132
118#else /* CONFIG_HOTPLUG_CPU */ 133#else /* CONFIG_HOTPLUG_CPU */
119 134
120#define get_online_cpus() do { } while (0) 135#define get_online_cpus() do { } while (0)
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 43fc95d822d5..28e33fea5107 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -74,7 +74,12 @@ struct spi_device;
74struct fsl_spi_platform_data { 74struct fsl_spi_platform_data {
75 u32 initial_spmode; /* initial SPMODE value */ 75 u32 initial_spmode; /* initial SPMODE value */
76 s16 bus_num; 76 s16 bus_num;
77 bool qe_mode; 77 unsigned int flags;
78#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */
79#define SPI_CPM_MODE (1 << 1) /* CPM/QE ("DMA") mode */
80#define SPI_CPM1 (1 << 2) /* SPI unit is in CPM1 block */
81#define SPI_CPM2 (1 << 3) /* SPI unit is in CPM2 block */
82#define SPI_QE (1 << 4) /* SPI unit is in QE block */
78 /* board specific information */ 83 /* board specific information */
79 u16 max_chipselect; 84 u16 max_chipselect;
80 void (*cs_control)(struct spi_device *spi, bool on); 85 void (*cs_control)(struct spi_device *spi, bool on);
@@ -90,6 +95,10 @@ struct mpc8xx_pcmcia_ops {
90 * lead to a deep sleep (i.e. power removed from the core, 95 * lead to a deep sleep (i.e. power removed from the core,
91 * instead of just the clock). 96 * instead of just the clock).
92 */ 97 */
98#if defined(CONFIG_PPC_83xx) && defined(CONFIG_SUSPEND)
93int fsl_deep_sleep(void); 99int fsl_deep_sleep(void);
100#else
101static inline int fsl_deep_sleep(void) { return 0; }
102#endif
94 103
95#endif /* _FSL_DEVICE_H_ */ 104#endif /* _FSL_DEVICE_H_ */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 2d241da07236..a24de0b1858e 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -496,6 +496,7 @@ struct kvm_ioeventfd {
496#define KVM_CAP_VCPU_EVENTS 41 496#define KVM_CAP_VCPU_EVENTS 41
497#endif 497#endif
498#define KVM_CAP_S390_PSW 42 498#define KVM_CAP_S390_PSW 42
499#define KVM_CAP_PPC_SEGSTATE 43
499 500
500#ifdef KVM_CAP_IRQ_ROUTING 501#ifdef KVM_CAP_IRQ_ROUTING
501 502
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index eae1f864c934..cca8a044e2b6 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2295,6 +2295,20 @@
2295#define PCI_DEVICE_ID_MPC8536 0x0051 2295#define PCI_DEVICE_ID_MPC8536 0x0051
2296#define PCI_DEVICE_ID_P2020E 0x0070 2296#define PCI_DEVICE_ID_P2020E 0x0070
2297#define PCI_DEVICE_ID_P2020 0x0071 2297#define PCI_DEVICE_ID_P2020 0x0071
2298#define PCI_DEVICE_ID_P2010E 0x0078
2299#define PCI_DEVICE_ID_P2010 0x0079
2300#define PCI_DEVICE_ID_P1020E 0x0100
2301#define PCI_DEVICE_ID_P1020 0x0101
2302#define PCI_DEVICE_ID_P1011E 0x0108
2303#define PCI_DEVICE_ID_P1011 0x0109
2304#define PCI_DEVICE_ID_P1022E 0x0110
2305#define PCI_DEVICE_ID_P1022 0x0111
2306#define PCI_DEVICE_ID_P1013E 0x0118
2307#define PCI_DEVICE_ID_P1013 0x0119
2308#define PCI_DEVICE_ID_P4080E 0x0400
2309#define PCI_DEVICE_ID_P4080 0x0401
2310#define PCI_DEVICE_ID_P4040E 0x0408
2311#define PCI_DEVICE_ID_P4040 0x0409
2298#define PCI_DEVICE_ID_MPC8641 0x7010 2312#define PCI_DEVICE_ID_MPC8641 0x7010
2299#define PCI_DEVICE_ID_MPC8641D 0x7011 2313#define PCI_DEVICE_ID_MPC8641D 0x7011
2300#define PCI_DEVICE_ID_MPC8610 0x7018 2314#define PCI_DEVICE_ID_MPC8610 0x7018
diff --git a/include/linux/spi/mpc52xx_spi.h b/include/linux/spi/mpc52xx_spi.h
new file mode 100644
index 000000000000..d1004cf09241
--- /dev/null
+++ b/include/linux/spi/mpc52xx_spi.h
@@ -0,0 +1,10 @@
1
2#ifndef INCLUDE_MPC5200_SPI_H
3#define INCLUDE_MPC5200_SPI_H
4
5extern void mpc52xx_spi_set_premessage_hook(struct spi_master *master,
6 void (*hook)(struct spi_message *m,
7 void *context),
8 void *hook_context);
9
10#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f92ba138007a..e1f2bf8d7b1e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -49,6 +49,7 @@
49#include <asm/io.h> 49#include <asm/io.h>
50#include <asm/uaccess.h> 50#include <asm/uaccess.h>
51#include <asm/pgtable.h> 51#include <asm/pgtable.h>
52#include <asm-generic/bitops/le.h>
52 53
53#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 54#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
54#include "coalesced_mmio.h" 55#include "coalesced_mmio.h"
@@ -1071,8 +1072,8 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1071 unsigned long rel_gfn = gfn - memslot->base_gfn; 1072 unsigned long rel_gfn = gfn - memslot->base_gfn;
1072 1073
1073 /* avoid RMW */ 1074 /* avoid RMW */
1074 if (!test_bit(rel_gfn, memslot->dirty_bitmap)) 1075 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
1075 set_bit(rel_gfn, memslot->dirty_bitmap); 1076 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
1076 } 1077 }
1077} 1078}
1078 1079