aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/linkage.h4
-rw-r--r--arch/alpha/include/asm/thread_info.h2
-rw-r--r--arch/alpha/include/asm/unistd.h12
-rw-r--r--arch/alpha/kernel/process.c20
-rw-r--r--arch/alpha/kernel/smp.c3
-rw-r--r--arch/alpha/kernel/sys_nautilus.c5
-rw-r--r--arch/alpha/kernel/traps.c7
-rw-r--r--arch/alpha/mm/init.c24
-rw-r--r--arch/alpha/mm/numa.c3
-rw-r--r--arch/arc/kernel/disasm.c2
-rw-r--r--arch/arc/kernel/process.c27
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arc/kernel/stacktrace.c7
-rw-r--r--arch/arc/kernel/troubleshoot.c3
-rw-r--r--arch/arc/mm/init.c23
-rw-r--r--arch/arc/plat-arcfpga/Kconfig2
-rw-r--r--arch/arm/Kconfig28
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9263ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi10
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi10
-rw-r--r--arch/arm/boot/dts/msm8660-surf.dts6
-rw-r--r--arch/arm/boot/dts/msm8960-cdp.dts6
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi4
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi4
-rw-r--r--arch/arm/boot/dts/spear310.dtsi4
-rw-r--r--arch/arm/boot/dts/spear320.dtsi4
-rw-r--r--arch/arm/boot/dts/vt8500-bv07.dts34
-rw-r--r--arch/arm/boot/dts/vt8500.dtsi4
-rw-r--r--arch/arm/boot/dts/wm8505-ref.dts34
-rw-r--r--arch/arm/boot/dts/wm8505.dtsi4
-rw-r--r--arch/arm/boot/dts/wm8650-mid.dts36
-rw-r--r--arch/arm/boot/dts/wm8650.dtsi4
-rw-r--r--arch/arm/boot/dts/wm8850-w70v2.dts40
-rw-r--r--arch/arm/boot/dts/wm8850.dtsi4
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/kirkwood_defconfig1
-rw-r--r--arch/arm/configs/lpc32xx_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/include/asm/pgtable.h9
-rw-r--r--arch/arm/include/asm/system_misc.h3
-rw-r--r--arch/arm/include/asm/unistd.h8
-rw-r--r--arch/arm/kernel/early_printk.c17
-rw-r--r--arch/arm/kernel/etm.c2
-rw-r--r--arch/arm/kernel/process.c108
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/kernel/traps.c7
-rw-r--r--arch/arm/kvm/arm.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c2
-rw-r--r--arch/arm/mach-at91/at91sam9n12.c2
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c2
-rw-r--r--arch/arm/mach-at91/cpuidle.c18
-rw-r--r--arch/arm/mach-bcm/Kconfig1
-rw-r--r--arch/arm/mach-bcm/board_bcm.c7
-rw-r--r--arch/arm/mach-davinci/Makefile1
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c71
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c166
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c8
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c2
-rw-r--r--arch/arm/mach-davinci/cpufreq.c248
-rw-r--r--arch/arm/mach-davinci/cpuidle.c29
-rw-r--r--arch/arm/mach-davinci/davinci.h11
-rw-r--r--arch/arm/mach-davinci/dm355.c174
-rw-r--r--arch/arm/mach-davinci/dm365.c195
-rw-r--r--arch/arm/mach-davinci/dm644x.c11
-rw-r--r--arch/arm/mach-davinci/pm_domain.c2
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-exynos/cpuidle.c1
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c27
-rw-r--r--arch/arm/mach-exynos/setup-usb-phy.c8
-rw-r--r--arch/arm/mach-gemini/idle.c4
-rw-r--r--arch/arm/mach-gemini/irq.c4
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/clk-busy.c2
-rw-r--r--arch/arm/mach-imx/cpufreq.c8
-rw-r--r--arch/arm/mach-imx/cpuidle-imx5.c37
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c26
-rw-r--r--arch/arm/mach-imx/cpuidle.c80
-rw-r--r--arch/arm/mach-imx/cpuidle.h10
-rw-r--r--arch/arm/mach-imx/pm-imx5.c30
-rw-r--r--arch/arm/mach-integrator/Makefile1
-rw-r--r--arch/arm/mach-integrator/cpu.c224
-rw-r--r--arch/arm/mach-ixp4xx/common.c3
-rw-r--r--arch/arm/mach-mmp/aspenite.c6
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c6
-rw-r--r--arch/arm/mach-mvebu/irq-armada-370-xp.c2
-rw-r--r--arch/arm/mach-omap1/pm.c6
-rw-r--r--arch/arm/mach-omap2/common.h5
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c52
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c84
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c14
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c7
-rw-r--r--arch/arm/mach-omap2/pm.c14
-rw-r--r--arch/arm/mach-omap2/pm44xx.c4
-rw-r--r--arch/arm/mach-orion5x/board-dt.c3
-rw-r--r--arch/arm/mach-orion5x/common.c2
-rw-r--r--arch/arm/mach-pxa/Makefile6
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa2xx.c494
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa3xx.c258
-rw-r--r--arch/arm/mach-pxa/include/mach/generic.h1
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c5
-rw-r--r--arch/arm/mach-s3c24xx/common.c5
-rw-r--r--arch/arm/mach-s3c24xx/cpufreq.c8
-rw-r--r--arch/arm/mach-s3c64xx/cpuidle.c15
-rw-r--r--arch/arm/mach-s3c64xx/setup-usb-phy.c4
-rw-r--r--arch/arm/mach-s5pv210/setup-usb-phy.c4
-rw-r--r--arch/arm/mach-sa1100/Kconfig26
-rw-r--r--arch/arm/mach-sa1100/Makefile3
-rw-r--r--arch/arm/mach-sa1100/cpu-sa1100.c249
-rw-r--r--arch/arm/mach-sa1100/cpu-sa1110.c408
-rw-r--r--arch/arm/mach-sa1100/include/mach/generic.h1
-rw-r--r--arch/arm/mach-shark/core.c3
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c8
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c8
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c12
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c23
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h3
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c6
-rw-r--r--arch/arm/mach-shmobile/suspend.c6
-rw-r--r--arch/arm/mach-tegra/Kconfig8
-rw-r--r--arch/arm/mach-tegra/Makefile1
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c293
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra114.c28
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c72
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra30.c29
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c774
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.h7
-rw-r--r--arch/arm/mach-ux500/board-mop500.c5
-rw-r--r--arch/arm/mach-ux500/cpuidle.c58
-rw-r--r--arch/arm/mach-vexpress/Kconfig3
-rw-r--r--arch/arm/mach-vexpress/Makefile2
-rw-r--r--arch/arm/mach-vexpress/reset.c141
-rw-r--r--arch/arm/mach-vexpress/v2m.c13
-rw-r--r--arch/arm/mach-w90x900/dev.c3
-rw-r--r--arch/arm/mm/init.c50
-rw-r--r--arch/arm/plat-samsung/devs.c10
-rw-r--r--arch/arm/plat-samsung/include/plat/fb.h50
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h282
-rw-r--r--arch/arm/plat-samsung/include/plat/usb-phy.h5
-rw-r--r--arch/arm64/Kconfig11
-rw-r--r--arch/arm64/boot/dts/Makefile2
-rw-r--r--arch/arm64/boot/dts/foundation-v8.dts230
-rw-r--r--arch/arm64/boot/dts/rtsm_ve-aemv8a.dts159
-rw-r--r--arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi234
-rw-r--r--arch/arm64/boot/dts/skeleton.dtsi13
-rw-r--r--arch/arm64/configs/defconfig4
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/bitops.h18
-rw-r--r--arch/arm64/include/asm/cmpxchg.h3
-rw-r--r--arch/arm64/include/asm/compat.h22
-rw-r--r--arch/arm64/include/asm/cputype.h30
-rw-r--r--arch/arm64/include/asm/esr.h55
-rw-r--r--arch/arm64/include/asm/exception.h1
-rw-r--r--arch/arm64/include/asm/hardirq.h5
-rw-r--r--arch/arm64/include/asm/io.h4
-rw-r--r--arch/arm64/include/asm/irq.h1
-rw-r--r--arch/arm64/include/asm/smp_plat.h (renamed from arch/arm64/lib/bitops.c)25
-rw-r--r--arch/arm64/include/asm/string.h37
-rw-r--r--arch/arm64/kernel/arm64ksyms.c21
-rw-r--r--arch/arm64/kernel/early_printk.c35
-rw-r--r--arch/arm64/kernel/entry.S53
-rw-r--r--arch/arm64/kernel/head.S4
-rw-r--r--arch/arm64/kernel/irq.c19
-rw-r--r--arch/arm64/kernel/process.c58
-rw-r--r--arch/arm64/kernel/setup.c12
-rw-r--r--arch/arm64/kernel/smp.c115
-rw-r--r--arch/arm64/kernel/smp_psci.c5
-rw-r--r--arch/arm64/kernel/sys32.S7
-rw-r--r--arch/arm64/kernel/traps.c7
-rw-r--r--arch/arm64/lib/Makefile4
-rw-r--r--arch/arm64/lib/bitops.S68
-rw-r--r--arch/arm64/lib/memchr.S44
-rw-r--r--arch/arm64/lib/memcpy.S53
-rw-r--r--arch/arm64/lib/memmove.S57
-rw-r--r--arch/arm64/lib/memset.S53
-rw-r--r--arch/arm64/lib/strchr.S42
-rw-r--r--arch/arm64/lib/strrchr.S43
-rw-r--r--arch/arm64/mm/fault.c6
-rw-r--r--arch/arm64/mm/init.c26
-rw-r--r--arch/arm64/mm/mmu.c13
-rw-r--r--arch/avr32/Kconfig13
-rw-r--r--arch/avr32/configs/atngw100_defconfig2
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig2
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig2
-rw-r--r--arch/avr32/configs/atngw100_mrmt_defconfig2
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig2
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig2
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig2
-rw-r--r--arch/avr32/configs/atstk1002_defconfig2
-rw-r--r--arch/avr32/configs/atstk1003_defconfig2
-rw-r--r--arch/avr32/configs/atstk1004_defconfig2
-rw-r--r--arch/avr32/configs/atstk1006_defconfig2
-rw-r--r--arch/avr32/configs/favr-32_defconfig2
-rw-r--r--arch/avr32/configs/hammerhead_defconfig2
-rw-r--r--arch/avr32/configs/mimc200_defconfig2
-rw-r--r--arch/avr32/include/asm/unistd.h8
-rw-r--r--arch/avr32/kernel/process.c26
-rw-r--r--arch/avr32/kernel/time.c9
-rw-r--r--arch/avr32/mach-at32ap/Makefile1
-rw-r--r--arch/avr32/mach-at32ap/cpufreq.c124
-rw-r--r--arch/avr32/mach-at32ap/include/mach/pm.h24
-rw-r--r--arch/avr32/mach-at32ap/pm-at32ap700x.S7
-rw-r--r--arch/avr32/mm/init.c24
-rw-r--r--arch/blackfin/include/asm/bfin_sport3.h2
-rw-r--r--arch/blackfin/include/asm/unistd.h8
-rw-r--r--arch/blackfin/kernel/dumpstack.c1
-rw-r--r--arch/blackfin/kernel/early_printk.c2
-rw-r--r--arch/blackfin/kernel/process.c32
-rw-r--r--arch/blackfin/kernel/trace.c2
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c8
-rw-r--r--arch/blackfin/mach-common/Makefile1
-rw-r--r--arch/blackfin/mach-common/cpufreq.c258
-rw-r--r--arch/blackfin/mach-common/smp.c2
-rw-r--r--arch/blackfin/mm/init.c22
-rw-r--r--arch/c6x/kernel/process.c28
-rw-r--r--arch/c6x/kernel/traps.c10
-rw-r--r--arch/c6x/mm/init.c30
-rw-r--r--arch/cris/arch-v10/kernel/process.c6
-rw-r--r--arch/cris/arch-v32/kernel/process.c15
-rw-r--r--arch/cris/arch-v32/kernel/smp.c4
-rw-r--r--arch/cris/arch-v32/mach-a3/Makefile1
-rw-r--r--arch/cris/arch-v32/mach-a3/cpufreq.c152
-rw-r--r--arch/cris/arch-v32/mach-fs/Makefile1
-rw-r--r--arch/cris/arch-v32/mach-fs/cpufreq.c145
-rw-r--r--arch/cris/include/asm/processor.h7
-rw-r--r--arch/cris/include/asm/unistd.h8
-rw-r--r--arch/cris/kernel/process.c49
-rw-r--r--arch/cris/kernel/traps.c7
-rw-r--r--arch/cris/mm/init.c16
-rw-r--r--arch/frv/include/asm/unistd.h10
-rw-r--r--arch/frv/kernel/process.c27
-rw-r--r--arch/frv/kernel/traps.c14
-rw-r--r--arch/frv/mm/init.c38
-rw-r--r--arch/h8300/include/asm/linkage.h2
-rw-r--r--arch/h8300/include/asm/unistd.h7
-rw-r--r--arch/h8300/kernel/process.c37
-rw-r--r--arch/h8300/kernel/traps.c7
-rw-r--r--arch/h8300/mm/init.c30
-rw-r--r--arch/hexagon/kernel/process.c23
-rw-r--r--arch/hexagon/kernel/smp.c2
-rw-r--r--arch/hexagon/kernel/traps.c8
-rw-r--r--arch/hexagon/kernel/vm_events.c2
-rw-r--r--arch/ia64/Kconfig8
-rw-r--r--arch/ia64/hp/sim/simserial.c16
-rw-r--r--arch/ia64/include/asm/futex.h5
-rw-r--r--arch/ia64/include/asm/hugetlb.h1
-rw-r--r--arch/ia64/include/asm/irqflags.h1
-rw-r--r--arch/ia64/include/asm/linkage.h4
-rw-r--r--arch/ia64/include/asm/mca.h1
-rw-r--r--arch/ia64/include/asm/numa.h5
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/ia64/include/asm/unistd.h10
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/cpufreq/Kconfig29
-rw-r--r--arch/ia64/kernel/cpufreq/Makefile2
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c437
-rw-r--r--arch/ia64/kernel/fsys.S49
-rw-r--r--arch/ia64/kernel/iosapic.c34
-rw-r--r--arch/ia64/kernel/irq.c8
-rw-r--r--arch/ia64/kernel/mca.c37
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/perfmon.c14
-rw-r--r--arch/ia64/kernel/process.c90
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kvm/vtlb.c2
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c9
-rw-r--r--arch/ia64/mm/init.c23
-rw-r--r--arch/ia64/mm/ioremap.c14
-rw-r--r--arch/ia64/mm/numa.c20
-rw-r--r--arch/ia64/pci/pci.c11
-rw-r--r--arch/ia64/sn/kernel/tiocx.c5
-rw-r--r--arch/m32r/include/asm/unistd.h10
-rw-r--r--arch/m32r/kernel/process.c20
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/m32r/kernel/traps.c15
-rw-r--r--arch/m32r/mm/init.c26
-rw-r--r--arch/m68k/Kconfig.bus10
-rw-r--r--arch/m68k/Kconfig.devices24
-rw-r--r--arch/m68k/atari/ataints.c152
-rw-r--r--arch/m68k/atari/config.c239
-rw-r--r--arch/m68k/include/asm/atarihw.h6
-rw-r--r--arch/m68k/include/asm/atariints.h11
-rw-r--r--arch/m68k/include/asm/cmpxchg.h3
-rw-r--r--arch/m68k/include/asm/delay.h23
-rw-r--r--arch/m68k/include/asm/io_mm.h136
-rw-r--r--arch/m68k/include/asm/irq.h6
-rw-r--r--arch/m68k/include/asm/raw_io.h109
-rw-r--r--arch/m68k/include/asm/string.h14
-rw-r--r--arch/m68k/include/asm/unistd.h8
-rw-r--r--arch/m68k/kernel/process.c32
-rw-r--r--arch/m68k/kernel/setup_mm.c6
-rw-r--r--arch/m68k/kernel/traps.c12
-rw-r--r--arch/m68k/lib/string.c2
-rw-r--r--arch/m68k/mm/init.c24
-rw-r--r--arch/metag/Kconfig4
-rw-r--r--arch/metag/Makefile2
-rw-r--r--arch/metag/boot/dts/Makefile10
-rw-r--r--arch/metag/configs/meta1_defconfig1
-rw-r--r--arch/metag/configs/meta2_defconfig1
-rw-r--r--arch/metag/configs/meta2_smp_defconfig1
-rw-r--r--arch/metag/include/asm/metag_mem.h3
-rw-r--r--arch/metag/include/asm/thread_info.h2
-rw-r--r--arch/metag/include/uapi/asm/Kbuild1
-rw-r--r--arch/metag/include/uapi/asm/ech.h15
-rw-r--r--arch/metag/kernel/cachepart.c16
-rw-r--r--arch/metag/kernel/da.c2
-rw-r--r--arch/metag/kernel/head.S8
-rw-r--r--arch/metag/kernel/perf/perf_event.c74
-rw-r--r--arch/metag/kernel/process.c37
-rw-r--r--arch/metag/kernel/ptrace.c34
-rw-r--r--arch/metag/kernel/setup.c1
-rw-r--r--arch/metag/kernel/smp.c117
-rw-r--r--arch/metag/kernel/traps.c6
-rw-r--r--arch/metag/mm/Kconfig3
-rw-r--r--arch/metag/mm/init.c31
-rw-r--r--arch/metag/oprofile/Makefile17
-rw-r--r--arch/metag/oprofile/backtrace.c63
-rw-r--r--arch/metag/oprofile/backtrace.h6
-rw-r--r--arch/metag/oprofile/common.c66
-rw-r--r--arch/microblaze/Kconfig4
-rw-r--r--arch/microblaze/include/asm/processor.h5
-rw-r--r--arch/microblaze/include/asm/setup.h1
-rw-r--r--arch/microblaze/include/asm/thread_info.h1
-rw-r--r--arch/microblaze/include/asm/unistd.h8
-rw-r--r--arch/microblaze/kernel/early_printk.c26
-rw-r--r--arch/microblaze/kernel/process.c67
-rw-r--r--arch/microblaze/kernel/traps.c6
-rw-r--r--arch/microblaze/mm/init.c34
-rw-r--r--arch/mips/Kconfig13
-rw-r--r--arch/mips/bcm63xx/dev-spi.c11
-rw-r--r--arch/mips/include/asm/hugetlb.h1
-rw-r--r--arch/mips/include/asm/linkage.h3
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h1
-rw-r--r--arch/mips/include/asm/unistd.h8
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/cpufreq/Kconfig41
-rw-r--r--arch/mips/kernel/cpufreq/Makefile5
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_cpufreq.c255
-rw-r--r--arch/mips/kernel/early_printk.c12
-rw-r--r--arch/mips/kernel/linux32.c119
-rw-r--r--arch/mips/kernel/process.c48
-rw-r--r--arch/mips/kernel/scall64-n32.S8
-rw-r--r--arch/mips/kernel/scall64-o32.S6
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/traps.c15
-rw-r--r--arch/mips/mm/init.c37
-rw-r--r--arch/mips/pci/pci.c8
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c4
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/include/asm/unistd.h10
-rw-r--r--arch/mn10300/kernel/process.c71
-rw-r--r--arch/mn10300/kernel/smp.c7
-rw-r--r--arch/mn10300/kernel/traps.c11
-rw-r--r--arch/mn10300/mm/init.c23
-rw-r--r--arch/openrisc/include/asm/thread_info.h2
-rw-r--r--arch/openrisc/kernel/Makefile2
-rw-r--r--arch/openrisc/kernel/idle.c73
-rw-r--r--arch/openrisc/kernel/process.c1
-rw-r--r--arch/openrisc/kernel/traps.c11
-rw-r--r--arch/openrisc/mm/init.c27
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/Kconfig.debug14
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/asm/unistd.h8
-rw-r--r--arch/parisc/kernel/process.c22
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/parisc/kernel/sys_parisc32.c41
-rw-r--r--arch/parisc/kernel/syscall_table.S8
-rw-r--r--arch/parisc/kernel/traps.c10
-rw-r--r--arch/parisc/mm/init.c25
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/hugetlb.h1
-rw-r--r--arch/powerpc/include/asm/linkage.h13
-rw-r--r--arch/powerpc/include/asm/systbl.h10
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h6
-rw-r--r--arch/powerpc/include/asm/uprobes.h1
-rw-r--r--arch/powerpc/include/uapi/asm/linkage.h6
-rw-r--r--arch/powerpc/kernel/crash_dump.c5
-rw-r--r--arch/powerpc/kernel/fadump.c5
-rw-r--r--arch/powerpc/kernel/idle.c89
-rw-r--r--arch/powerpc/kernel/kvm.c7
-rw-r--r--arch/powerpc/kernel/nvram_64.c3
-rw-r--r--arch/powerpc/kernel/process.c14
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c109
-rw-r--r--arch/powerpc/kernel/udbg.c6
-rw-r--r--arch/powerpc/kernel/uprobes.c29
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/mm/init_64.c11
-rw-r--r--arch/powerpc/mm/mem.c35
-rw-r--r--arch/powerpc/mm/numa.c10
-rw-r--r--arch/powerpc/platforms/44x/Kconfig2
-rw-r--r--arch/powerpc/platforms/512x/Kconfig2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c5
-rw-r--r--arch/powerpc/platforms/cell/Kconfig26
-rw-r--r--arch/powerpc/platforms/cell/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c209
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.h24
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c115
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c156
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c5
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c14
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c5
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c12
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c34
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/Kconfig.debug14
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c4
-rw-r--r--arch/s390/include/asm/bitops.h117
-rw-r--r--arch/s390/include/asm/ccwdev.h3
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/compat.h57
-rw-r--r--arch/s390/include/asm/elf.h23
-rw-r--r--arch/s390/include/asm/hugetlb.h56
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/pci_debug.h9
-rw-r--r--arch/s390/include/asm/pci_insn.h203
-rw-r--r--arch/s390/include/asm/pci_io.h16
-rw-r--r--arch/s390/include/asm/pgtable.h97
-rw-r--r--arch/s390/include/asm/processor.h3
-rw-r--r--arch/s390/include/asm/ptrace.h6
-rw-r--r--arch/s390/include/asm/syscall.h1
-rw-r--r--arch/s390/include/asm/thread_info.h6
-rw-r--r--arch/s390/include/asm/unistd.h8
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h20
-rw-r--r--arch/s390/include/uapi/asm/statfs.h63
-rw-r--r--arch/s390/kernel/Makefile17
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/compat_linux.c86
-rw-r--r--arch/s390/kernel/compat_linux.h5
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.S64
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/dumpstack.c212
-rw-r--r--arch/s390/kernel/entry.S39
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/entry64.S43
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/machine_kexec.c30
-rw-r--r--arch/s390/kernel/process.c32
-rw-r--r--arch/s390/kernel/setup.c9
-rw-r--r--arch/s390/kernel/smp.c18
-rw-r--r--arch/s390/kernel/suspend.c31
-rw-r--r--arch/s390/kernel/swsusp_asm64.S29
-rw-r--r--arch/s390/kernel/sys_s390.c14
-rw-r--r--arch/s390/kernel/syscalls.S18
-rw-r--r--arch/s390/kernel/traps.c250
-rw-r--r--arch/s390/kernel/vtime.c5
-rw-r--r--arch/s390/kvm/trace.h4
-rw-r--r--arch/s390/lib/Makefile1
-rw-r--r--arch/s390/lib/usercopy.c8
-rw-r--r--arch/s390/mm/cmm.c8
-rw-r--r--arch/s390/mm/fault.c9
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/init.c45
-rw-r--r--arch/s390/mm/pageattr.c24
-rw-r--r--arch/s390/mm/pgtable.c235
-rw-r--r--arch/s390/mm/vmem.c15
-rw-r--r--arch/s390/net/bpf_jit_comp.c3
-rw-r--r--arch/s390/oprofile/init.c1
-rw-r--r--arch/s390/pci/Makefile4
-rw-r--r--arch/s390/pci/pci.c153
-rw-r--r--arch/s390/pci/pci_clp.c13
-rw-r--r--arch/s390/pci/pci_debug.c7
-rw-r--r--arch/s390/pci/pci_dma.c9
-rw-r--r--arch/s390/pci/pci_insn.c202
-rw-r--r--arch/s390/pci/pci_msi.c10
-rw-r--r--arch/score/kernel/process.c18
-rw-r--r--arch/score/kernel/traps.c12
-rw-r--r--arch/score/mm/init.c33
-rw-r--r--arch/sh/Kconfig22
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c4
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.c2
-rw-r--r--arch/sh/include/asm/hugetlb.h1
-rw-r--r--arch/sh/include/asm/suspend.h4
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sh/include/asm/unistd.h8
-rw-r--r--arch/sh/kernel/Makefile1
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c101
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c3
-rw-r--r--arch/sh/kernel/cpufreq.c201
-rw-r--r--arch/sh/kernel/dumpstack.c6
-rw-r--r--arch/sh/kernel/idle.c101
-rw-r--r--arch/sh/kernel/process_32.c6
-rw-r--r--arch/sh/kernel/process_64.c1
-rw-r--r--arch/sh/kernel/sh_bios.c4
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sh/mm/init.c26
-rw-r--r--arch/sparc/Kconfig26
-rw-r--r--arch/sparc/include/asm/hugetlb.h1
-rw-r--r--arch/sparc/include/asm/thread_info_32.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/unistd.h8
-rw-r--r--arch/sparc/kernel/Makefile3
-rw-r--r--arch/sparc/kernel/hvtramp.S3
-rw-r--r--arch/sparc/kernel/process_32.c44
-rw-r--r--arch/sparc/kernel/process_64.c55
-rw-r--r--arch/sparc/kernel/smp_32.c2
-rw-r--r--arch/sparc/kernel/smp_64.c2
-rw-r--r--arch/sparc/kernel/sys32.S4
-rw-r--r--arch/sparc/kernel/sys_sparc32.c75
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c6
-rw-r--r--arch/sparc/kernel/systbls_64.S8
-rw-r--r--arch/sparc/kernel/trampoline_64.S3
-rw-r--r--arch/sparc/kernel/traps_64.c7
-rw-r--r--arch/sparc/kernel/us2e_cpufreq.c413
-rw-r--r--arch/sparc/kernel/us3_cpufreq.c274
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/usercopy.c9
-rw-r--r--arch/sparc/mm/init_32.c12
-rw-r--r--arch/sparc/mm/init_64.c7
-rw-r--r--arch/tile/Kconfig9
-rw-r--r--arch/tile/include/asm/hugetlb.h1
-rw-r--r--arch/tile/include/asm/thread_info.h2
-rw-r--r--arch/tile/include/asm/uaccess.h7
-rw-r--r--arch/tile/kernel/compat.c6
-rw-r--r--arch/tile/kernel/early_printk.c27
-rw-r--r--arch/tile/kernel/process.c68
-rw-r--r--arch/tile/kernel/smpboot.c4
-rw-r--r--arch/tile/lib/uaccess.c8
-rw-r--r--arch/tile/mm/pgtable.c7
-rw-r--r--arch/um/drivers/chan_kern.c6
-rw-r--r--arch/um/drivers/line.c8
-rw-r--r--arch/um/kernel/early_printk.c8
-rw-r--r--arch/um/kernel/mem.c26
-rw-r--r--arch/um/kernel/process.c27
-rw-r--r--arch/um/kernel/sysrq.c12
-rw-r--r--arch/um/sys-ppc/sysrq.c2
-rw-r--r--arch/unicore32/kernel/Makefile1
-rw-r--r--arch/unicore32/kernel/cpu-ucv2.c93
-rw-r--r--arch/unicore32/kernel/early_printk.c12
-rw-r--r--arch/unicore32/kernel/process.c27
-rw-r--r--arch/unicore32/kernel/traps.c6
-rw-r--r--arch/unicore32/mm/init.c31
-rw-r--r--arch/unicore32/mm/ioremap.c17
-rw-r--r--arch/x86/Kconfig94
-rw-r--r--arch/x86/Kconfig.debug16
-rw-r--r--arch/x86/boot/compressed/head_64.S2
-rw-r--r--arch/x86/ia32/Makefile3
-rw-r--r--arch/x86/ia32/ia32_aout.c1
-rw-r--r--arch/x86/ia32/ipc32.c54
-rw-r--r--arch/x86/ia32/sys_ia32.c37
-rw-r--r--arch/x86/include/asm/bug.h3
-rw-r--r--arch/x86/include/asm/cmpxchg.h2
-rw-r--r--arch/x86/include/asm/context_tracking.h21
-rw-r--r--arch/x86/include/asm/cpufeature.h23
-rw-r--r--arch/x86/include/asm/fixmap.h7
-rw-r--r--arch/x86/include/asm/hugetlb.h1
-rw-r--r--arch/x86/include/asm/hypervisor.h16
-rw-r--r--arch/x86/include/asm/mce.h4
-rw-r--r--arch/x86/include/asm/msr.h14
-rw-r--r--arch/x86/include/asm/page_64_types.h1
-rw-r--r--arch/x86/include/asm/paravirt.h4
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/perf_event_p4.h62
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/processor.h25
-rw-r--r--arch/x86/include/asm/suspend_32.h1
-rw-r--r--arch/x86/include/asm/suspend_64.h3
-rw-r--r--arch/x86/include/asm/sys_ia32.h12
-rw-r--r--arch/x86/include/asm/syscalls.h4
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/unistd.h8
-rw-r--r--arch/x86/include/asm/uprobes.h1
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h5
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S5
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/amd_nb.c5
-rw-r--r--arch/x86/kernel/aperture_64.c2
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/cpu/Makefile9
-rw-r--r--arch/x86/kernel/cpu/amd.c48
-rw-r--r--arch/x86/kernel/cpu/bugs.c34
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/cyrix.c5
-rw-r--r--arch/x86/kernel/cpu/intel.c34
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c39
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c25
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.pl48
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh41
-rw-r--r--arch/x86/kernel/cpu/perf_event.c89
-rw-r--r--arch/x86/kernel/cpu/perf_event.h56
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c138
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c547
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c38
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c182
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c876
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h64
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c6
-rw-r--r--arch/x86/kernel/cpu/scattered.c3
-rw-r--r--arch/x86/kernel/doublefault_32.c2
-rw-r--r--arch/x86/kernel/dumpstack.c28
-rw-r--r--arch/x86/kernel/dumpstack_32.c4
-rw-r--r--arch/x86/kernel/dumpstack_64.c6
-rw-r--r--arch/x86/kernel/early_printk.c21
-rw-r--r--arch/x86/kernel/head64.c6
-rw-r--r--arch/x86/kernel/kprobes/core.c6
-rw-r--r--arch/x86/kernel/kvm.c8
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/process.c131
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/quirks.c18
-rw-r--r--arch/x86/kernel/rtc.c69
-rw-r--r--arch/x86/kernel/setup.c4
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/tls.c14
-rw-r--r--arch/x86/kernel/traps.c77
-rw-r--r--arch/x86/kernel/tsc.c6
-rw-r--r--arch/x86/kernel/uprobes.c29
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/lguest/Kconfig3
-rw-r--r--arch/x86/lib/checksum_32.S2
-rw-r--r--arch/x86/lib/memcpy_32.c6
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/lib/memmove_64.S6
-rw-r--r--arch/x86/lib/usercopy_32.c6
-rw-r--r--arch/x86/mm/amdtopology.c3
-rw-r--r--arch/x86/mm/fault.c10
-rw-r--r--arch/x86/mm/highmem_32.c1
-rw-r--r--arch/x86/mm/init.c5
-rw-r--r--arch/x86/mm/init_32.c10
-rw-r--r--arch/x86/mm/init_64.c75
-rw-r--r--arch/x86/mm/ioremap.c7
-rw-r--r--arch/x86/mm/numa.c9
-rw-r--r--arch/x86/mm/pageattr-test.c5
-rw-r--r--arch/x86/mm/pageattr.c9
-rw-r--r--arch/x86/pci/common.c11
-rw-r--r--arch/x86/pci/xen.c6
-rw-r--r--arch/x86/platform/efi/efi.c24
-rw-r--r--arch/x86/platform/mrst/mrst.c3
-rw-r--r--arch/x86/platform/mrst/vrtc.c44
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c4
-rw-r--r--arch/x86/platform/uv/uv_time.c3
-rw-r--r--arch/x86/power/cpu.c13
-rw-r--r--arch/x86/syscalls/syscall_32.tbl10
-rw-r--r--arch/x86/tools/Makefile1
-rw-r--r--arch/x86/tools/relocs.c783
-rw-r--r--arch/x86/tools/relocs.h36
-rw-r--r--arch/x86/tools/relocs_32.c17
-rw-r--r--arch/x86/tools/relocs_64.c17
-rw-r--r--arch/x86/tools/relocs_common.c76
-rw-r--r--arch/x86/um/tls_32.c5
-rw-r--r--arch/x86/xen/Kconfig2
-rw-r--r--arch/x86/xen/enlighten.c58
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/x86/xen/smp.c23
-rw-r--r--arch/x86/xen/spinlock.c25
-rw-r--r--arch/x86/xen/time.c13
-rw-r--r--arch/xtensa/include/asm/unistd.h8
-rw-r--r--arch/xtensa/kernel/process.c14
-rw-r--r--arch/xtensa/kernel/traps.c10
-rw-r--r--arch/xtensa/mm/init.c21
676 files changed, 9525 insertions, 12448 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 1455579791ec..99f0e17df429 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -157,9 +157,6 @@ config ARCH_USE_BUILTIN_BSWAP
157 instructions should set this. And it shouldn't hurt to set it 157 instructions should set this. And it shouldn't hurt to set it
158 on architectures that don't have such instructions. 158 on architectures that don't have such instructions.
159 159
160config HAVE_SYSCALL_WRAPPERS
161 bool
162
163config KRETPROBES 160config KRETPROBES
164 def_bool y 161 def_bool y
165 depends on KPROBES && HAVE_KRETPROBES 162 depends on KPROBES && HAVE_KRETPROBES
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 8a33ba01301f..8629127640cf 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -4,7 +4,6 @@ config ALPHA
4 select HAVE_AOUT 4 select HAVE_AOUT
5 select HAVE_IDE 5 select HAVE_IDE
6 select HAVE_OPROFILE 6 select HAVE_OPROFILE
7 select HAVE_SYSCALL_WRAPPERS
8 select HAVE_PCSPKR_PLATFORM 7 select HAVE_PCSPKR_PLATFORM
9 select HAVE_PERF_EVENTS 8 select HAVE_PERF_EVENTS
10 select HAVE_DMA_ATTRS 9 select HAVE_DMA_ATTRS
diff --git a/arch/alpha/include/asm/linkage.h b/arch/alpha/include/asm/linkage.h
index 291c2d01c44f..7cfd06e8c935 100644
--- a/arch/alpha/include/asm/linkage.h
+++ b/arch/alpha/include/asm/linkage.h
@@ -1,6 +1,8 @@
1#ifndef __ASM_LINKAGE_H 1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H 2#define __ASM_LINKAGE_H
3 3
4/* Nothing to see here... */ 4#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall")
5#define SYSCALL_ALIAS(alias, name) \
6 asm ( #alias " = " #name "\n\t.globl " #alias)
5 7
6#endif 8#endif
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 1f8c72959fb6..52cd2a4a3ff4 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -95,8 +95,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
95#define TS_POLLING 0x0010 /* idle task polling need_resched, 95#define TS_POLLING 0x0010 /* idle task polling need_resched,
96 skip sending interrupt */ 96 skip sending interrupt */
97 97
98#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
99
100#ifndef __ASSEMBLY__ 98#ifndef __ASSEMBLY__
101#define HAVE_SET_RESTORE_SIGMASK 1 99#define HAVE_SET_RESTORE_SIGMASK 1
102static inline void set_restore_sigmask(void) 100static inline void set_restore_sigmask(void)
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 6d6fe7ab5473..43baee17acdf 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -18,16 +18,4 @@
18#define __ARCH_WANT_SYS_VFORK 18#define __ARCH_WANT_SYS_VFORK
19#define __ARCH_WANT_SYS_CLONE 19#define __ARCH_WANT_SYS_CLONE
20 20
21/* "Conditional" syscalls. What we want is
22
23 __attribute__((weak,alias("sys_ni_syscall")))
24
25 but that raises the problem of what type to give the symbol. If we use
26 a prototype, it'll conflict with the definition given in this file and
27 others. If we use __typeof, we discover that not all symbols actually
28 have declarations. If we use no prototype, then we get warnings from
29 -Wstrict-prototypes. Ho hum. */
30
31#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall")
32
33#endif /* _ALPHA_UNISTD_H */ 21#endif /* _ALPHA_UNISTD_H */
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 63d27fb9b023..ab80a80d38a2 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -46,25 +46,6 @@
46void (*pm_power_off)(void) = machine_power_off; 46void (*pm_power_off)(void) = machine_power_off;
47EXPORT_SYMBOL(pm_power_off); 47EXPORT_SYMBOL(pm_power_off);
48 48
49void
50cpu_idle(void)
51{
52 current_thread_info()->status |= TS_POLLING;
53
54 while (1) {
55 /* FIXME -- EV6 and LCA45 know how to power down
56 the CPU. */
57
58 rcu_idle_enter();
59 while (!need_resched())
60 cpu_relax();
61
62 rcu_idle_exit();
63 schedule_preempt_disabled();
64 }
65}
66
67
68struct halt_info { 49struct halt_info {
69 int mode; 50 int mode;
70 char *restart_cmd; 51 char *restart_cmd;
@@ -194,6 +175,7 @@ machine_power_off(void)
194void 175void
195show_regs(struct pt_regs *regs) 176show_regs(struct pt_regs *regs)
196{ 177{
178 show_regs_print_info(KERN_DEFAULT);
197 dik_show_regs(regs, NULL); 179 dik_show_regs(regs, NULL);
198} 180}
199 181
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 9603bc234b47..7b60834fb4b2 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -167,8 +167,7 @@ smp_callin(void)
167 cpuid, current, current->active_mm)); 167 cpuid, current, current->active_mm));
168 168
169 preempt_disable(); 169 preempt_disable();
170 /* Do nothing. */ 170 cpu_startup_entry(CPUHP_ONLINE);
171 cpu_idle();
172} 171}
173 172
174/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ 173/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 1383f8601a93..1d4aabfcf9a1 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -185,7 +185,6 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
185 mb(); 185 mb();
186} 186}
187 187
188extern void free_reserved_mem(void *, void *);
189extern void pcibios_claim_one_bus(struct pci_bus *); 188extern void pcibios_claim_one_bus(struct pci_bus *);
190 189
191static struct resource irongate_io = { 190static struct resource irongate_io = {
@@ -239,8 +238,8 @@ nautilus_init_pci(void)
239 if (pci_mem < memtop) 238 if (pci_mem < memtop)
240 memtop = pci_mem; 239 memtop = pci_mem;
241 if (memtop > alpha_mv.min_mem_address) { 240 if (memtop > alpha_mv.min_mem_address) {
242 free_reserved_mem(__va(alpha_mv.min_mem_address), 241 free_reserved_area((unsigned long)__va(alpha_mv.min_mem_address),
243 __va(memtop)); 242 (unsigned long)__va(memtop), 0, NULL);
244 printk("nautilus_init_pci: %ldk freed\n", 243 printk("nautilus_init_pci: %ldk freed\n",
245 (memtop - alpha_mv.min_mem_address) >> 10); 244 (memtop - alpha_mv.min_mem_address) >> 10);
246 } 245 }
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 4037461a6493..affccb959a9e 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -169,13 +169,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
169 dik_show_trace(sp); 169 dik_show_trace(sp);
170} 170}
171 171
172void dump_stack(void)
173{
174 show_stack(NULL, NULL);
175}
176
177EXPORT_SYMBOL(dump_stack);
178
179void 172void
180die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) 173die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
181{ 174{
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 1ad6ca74bed2..0ba85ee4a466 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -31,6 +31,7 @@
31#include <asm/console.h> 31#include <asm/console.h>
32#include <asm/tlb.h> 32#include <asm/tlb.h>
33#include <asm/setup.h> 33#include <asm/setup.h>
34#include <asm/sections.h>
34 35
35extern void die_if_kernel(char *,struct pt_regs *,long); 36extern void die_if_kernel(char *,struct pt_regs *,long);
36 37
@@ -281,8 +282,6 @@ printk_memory_info(void)
281{ 282{
282 unsigned long codesize, reservedpages, datasize, initsize, tmp; 283 unsigned long codesize, reservedpages, datasize, initsize, tmp;
283 extern int page_is_ram(unsigned long) __init; 284 extern int page_is_ram(unsigned long) __init;
284 extern char _text, _etext, _data, _edata;
285 extern char __init_begin, __init_end;
286 285
287 /* printk all informations */ 286 /* printk all informations */
288 reservedpages = 0; 287 reservedpages = 0;
@@ -318,32 +317,15 @@ mem_init(void)
318#endif /* CONFIG_DISCONTIGMEM */ 317#endif /* CONFIG_DISCONTIGMEM */
319 318
320void 319void
321free_reserved_mem(void *start, void *end)
322{
323 void *__start = start;
324 for (; __start < end; __start += PAGE_SIZE) {
325 ClearPageReserved(virt_to_page(__start));
326 init_page_count(virt_to_page(__start));
327 free_page((long)__start);
328 totalram_pages++;
329 }
330}
331
332void
333free_initmem(void) 320free_initmem(void)
334{ 321{
335 extern char __init_begin, __init_end; 322 free_initmem_default(0);
336
337 free_reserved_mem(&__init_begin, &__init_end);
338 printk ("Freeing unused kernel memory: %ldk freed\n",
339 (&__init_end - &__init_begin) >> 10);
340} 323}
341 324
342#ifdef CONFIG_BLK_DEV_INITRD 325#ifdef CONFIG_BLK_DEV_INITRD
343void 326void
344free_initrd_mem(unsigned long start, unsigned long end) 327free_initrd_mem(unsigned long start, unsigned long end)
345{ 328{
346 free_reserved_mem((void *)start, (void *)end); 329 free_reserved_area(start, end, 0, "initrd");
347 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
348} 330}
349#endif 331#endif
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 3973ae395772..33885048fa36 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/hwrpb.h> 18#include <asm/hwrpb.h>
19#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
20#include <asm/sections.h>
20 21
21pg_data_t node_data[MAX_NUMNODES]; 22pg_data_t node_data[MAX_NUMNODES];
22EXPORT_SYMBOL(node_data); 23EXPORT_SYMBOL(node_data);
@@ -325,8 +326,6 @@ void __init mem_init(void)
325{ 326{
326 unsigned long codesize, reservedpages, datasize, initsize, pfn; 327 unsigned long codesize, reservedpages, datasize, initsize, pfn;
327 extern int page_is_ram(unsigned long) __init; 328 extern int page_is_ram(unsigned long) __init;
328 extern char _text, _etext, _data, _edata;
329 extern char __init_begin, __init_end;
330 unsigned long nid, i; 329 unsigned long nid, i;
331 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 330 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
332 331
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
index 2f390289a792..d14764ae2c60 100644
--- a/arch/arc/kernel/disasm.c
+++ b/arch/arc/kernel/disasm.c
@@ -535,4 +535,4 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
535 return instr.is_branch; 535 return instr.is_branch;
536} 536}
537 537
538#endif /* CONFIG_KGDB || CONFIG_MISALIGN_ACCESS || CONFIG_KPROBES */ 538#endif /* CONFIG_KGDB || CONFIG_ARC_MISALIGN_ACCESS || CONFIG_KPROBES */
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 0a7531d99294..cad66851e0c4 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -41,37 +41,12 @@ SYSCALL_DEFINE0(arc_gettls)
41 return task_thread_info(current)->thr_ptr; 41 return task_thread_info(current)->thr_ptr;
42} 42}
43 43
44static inline void arch_idle(void) 44void arch_cpu_idle(void)
45{ 45{
46 /* sleep, but enable all interrupts before committing */ 46 /* sleep, but enable all interrupts before committing */
47 __asm__("sleep 0x3"); 47 __asm__("sleep 0x3");
48} 48}
49 49
50void cpu_idle(void)
51{
52 /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
53
54 /* endless idle loop with no priority at all */
55 while (1) {
56 tick_nohz_idle_enter();
57 rcu_idle_enter();
58
59doze:
60 local_irq_disable();
61 if (!need_resched()) {
62 arch_idle();
63 goto doze;
64 } else {
65 local_irq_enable();
66 }
67
68 rcu_idle_exit();
69 tick_nohz_idle_exit();
70
71 schedule_preempt_disabled();
72 }
73}
74
75asmlinkage void ret_from_fork(void); 50asmlinkage void ret_from_fork(void);
76 51
77/* Layout of Child kernel mode stack as setup at the end of this function is 52/* Layout of Child kernel mode stack as setup at the end of this function is
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 3af3e06dcf02..5c7fd603d216 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -141,7 +141,7 @@ void __cpuinit start_kernel_secondary(void)
141 141
142 local_irq_enable(); 142 local_irq_enable();
143 preempt_disable(); 143 preempt_disable();
144 cpu_idle(); 144 cpu_startup_entry(CPUHP_ONLINE);
145} 145}
146 146
147/* 147/*
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index a63ff842564b..ca0207b9d5b6 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -220,13 +220,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
220 show_stacktrace(tsk, NULL); 220 show_stacktrace(tsk, NULL);
221} 221}
222 222
223/* Expected by Rest of kernel code */
224void dump_stack(void)
225{
226 show_stacktrace(NULL, NULL);
227}
228EXPORT_SYMBOL(dump_stack);
229
230/* Another API expected by schedular, shows up in "ps" as Wait Channel 223/* Another API expected by schedular, shows up in "ps" as Wait Channel
231 * Ofcourse just returning schedule( ) would be pointless so unwind until 224 * Ofcourse just returning schedule( ) would be pointless so unwind until
232 * the function is not in schedular code 225 * the function is not in schedular code
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 7c10873c311f..0aec01985bf9 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -71,7 +71,7 @@ void print_task_path_n_nm(struct task_struct *tsk, char *buf)
71 } 71 }
72 72
73done: 73done:
74 pr_info("%s, TGID %u\n", path_nm, tsk->tgid); 74 pr_info("Path: %s\n", path_nm);
75} 75}
76EXPORT_SYMBOL(print_task_path_n_nm); 76EXPORT_SYMBOL(print_task_path_n_nm);
77 77
@@ -163,6 +163,7 @@ void show_regs(struct pt_regs *regs)
163 return; 163 return;
164 164
165 print_task_path_n_nm(tsk, buf); 165 print_task_path_n_nm(tsk, buf);
166 show_regs_print_info(KERN_INFO);
166 167
167 if (current->thread.cause_code) 168 if (current->thread.cause_code)
168 show_ecr_verbose(regs); 169 show_ecr_verbose(regs);
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index caf797de23fc..727d4794ea0f 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -144,37 +144,18 @@ void __init mem_init(void)
144 PAGES_TO_KB(reserved_pages)); 144 PAGES_TO_KB(reserved_pages));
145} 145}
146 146
147static void __init free_init_pages(const char *what, unsigned long begin,
148 unsigned long end)
149{
150 unsigned long addr;
151
152 pr_info("Freeing %s: %ldk [%lx] to [%lx]\n",
153 what, TO_KB(end - begin), begin, end);
154
155 /* need to check that the page we free is not a partial page */
156 for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
157 ClearPageReserved(virt_to_page(addr));
158 init_page_count(virt_to_page(addr));
159 free_page(addr);
160 totalram_pages++;
161 }
162}
163
164/* 147/*
165 * free_initmem: Free all the __init memory. 148 * free_initmem: Free all the __init memory.
166 */ 149 */
167void __init_refok free_initmem(void) 150void __init_refok free_initmem(void)
168{ 151{
169 free_init_pages("unused kernel memory", 152 free_initmem_default(0);
170 (unsigned long)__init_begin,
171 (unsigned long)__init_end);
172} 153}
173 154
174#ifdef CONFIG_BLK_DEV_INITRD 155#ifdef CONFIG_BLK_DEV_INITRD
175void __init free_initrd_mem(unsigned long start, unsigned long end) 156void __init free_initrd_mem(unsigned long start, unsigned long end)
176{ 157{
177 free_init_pages("initrd memory", start, end); 158 free_reserved_area(start, end, 0, "initrd");
178} 159}
179#endif 160#endif
180 161
diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig
index b41e786cdbc0..295cefeb25d3 100644
--- a/arch/arc/plat-arcfpga/Kconfig
+++ b/arch/arc/plat-arcfpga/Kconfig
@@ -53,7 +53,7 @@ menuconfig ARC_HAS_BVCI_LAT_UNIT
53 bool "BVCI Bus Latency Unit" 53 bool "BVCI Bus Latency Unit"
54 depends on ARC_BOARD_ML509 || ARC_BOARD_ANGEL4 54 depends on ARC_BOARD_ML509 || ARC_BOARD_ANGEL4
55 help 55 help
56 IP to add artifical latency to BVCI Bus Based FPGA builds. 56 IP to add artificial latency to BVCI Bus Based FPGA builds.
57 The default latency (even worst case) for FPGA is non-realistic 57 The default latency (even worst case) for FPGA is non-realistic
58 (~10 SDRAM, ~5 SSRAM). 58 (~10 SDRAM, ~5 SSRAM).
59 59
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1cacda426a0e..006f9838dd43 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -15,6 +15,7 @@ config ARM
15 select GENERIC_IRQ_SHOW 15 select GENERIC_IRQ_SHOW
16 select GENERIC_PCI_IOMAP 16 select GENERIC_PCI_IOMAP
17 select GENERIC_SMP_IDLE_THREAD 17 select GENERIC_SMP_IDLE_THREAD
18 select GENERIC_IDLE_POLL_SETUP
18 select GENERIC_STRNCPY_FROM_USER 19 select GENERIC_STRNCPY_FROM_USER
19 select GENERIC_STRNLEN_USER 20 select GENERIC_STRNLEN_USER
20 select HARDIRQS_SW_RESEND 21 select HARDIRQS_SW_RESEND
@@ -549,6 +550,8 @@ config ARCH_IXP4XX
549 select GENERIC_CLOCKEVENTS 550 select GENERIC_CLOCKEVENTS
550 select MIGHT_HAVE_PCI 551 select MIGHT_HAVE_PCI
551 select NEED_MACH_IO_H 552 select NEED_MACH_IO_H
553 select USB_EHCI_BIG_ENDIAN_MMIO
554 select USB_EHCI_BIG_ENDIAN_DESC
552 help 555 help
553 Support for Intel's IXP4XX (XScale) family of processors. 556 Support for Intel's IXP4XX (XScale) family of processors.
554 557
@@ -2160,7 +2163,6 @@ endmenu
2160menu "CPU Power Management" 2163menu "CPU Power Management"
2161 2164
2162if ARCH_HAS_CPUFREQ 2165if ARCH_HAS_CPUFREQ
2163
2164source "drivers/cpufreq/Kconfig" 2166source "drivers/cpufreq/Kconfig"
2165 2167
2166config CPU_FREQ_IMX 2168config CPU_FREQ_IMX
@@ -2170,30 +2172,6 @@ config CPU_FREQ_IMX
2170 help 2172 help
2171 This enables the CPUfreq driver for i.MX CPUs. 2173 This enables the CPUfreq driver for i.MX CPUs.
2172 2174
2173config CPU_FREQ_SA1100
2174 bool
2175
2176config CPU_FREQ_SA1110
2177 bool
2178
2179config CPU_FREQ_INTEGRATOR
2180 tristate "CPUfreq driver for ARM Integrator CPUs"
2181 depends on ARCH_INTEGRATOR && CPU_FREQ
2182 default y
2183 help
2184 This enables the CPUfreq driver for ARM Integrator CPUs.
2185
2186 For details, take a look at <file:Documentation/cpu-freq>.
2187
2188 If in doubt, say Y.
2189
2190config CPU_FREQ_PXA
2191 bool
2192 depends on CPU_FREQ && ARCH_PXA && PXA25x
2193 default y
2194 select CPU_FREQ_DEFAULT_GOV_USERSPACE
2195 select CPU_FREQ_TABLE
2196
2197config CPU_FREQ_S3C 2175config CPU_FREQ_S3C
2198 bool 2176 bool
2199 help 2177 help
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index cb7bcc51608d..39253b9aedd1 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -322,6 +322,24 @@
322 }; 322 };
323 }; 323 };
324 324
325 spi0 {
326 pinctrl_spi0: spi0-0 {
327 atmel,pins =
328 <0 0 0x1 0x0 /* PA0 periph A SPI0_MISO pin */
329 0 1 0x1 0x0 /* PA1 periph A SPI0_MOSI pin */
330 0 2 0x1 0x0>; /* PA2 periph A SPI0_SPCK pin */
331 };
332 };
333
334 spi1 {
335 pinctrl_spi1: spi1-0 {
336 atmel,pins =
337 <1 0 0x1 0x0 /* PB0 periph A SPI1_MISO pin */
338 1 1 0x1 0x0 /* PB1 periph A SPI1_MOSI pin */
339 1 2 0x1 0x0>; /* PB2 periph A SPI1_SPCK pin */
340 };
341 };
342
325 pioA: gpio@fffff400 { 343 pioA: gpio@fffff400 {
326 compatible = "atmel,at91rm9200-gpio"; 344 compatible = "atmel,at91rm9200-gpio";
327 reg = <0xfffff400 0x200>; 345 reg = <0xfffff400 0x200>;
@@ -471,6 +489,28 @@
471 status = "disabled"; 489 status = "disabled";
472 }; 490 };
473 491
492 spi0: spi@fffc8000 {
493 #address-cells = <1>;
494 #size-cells = <0>;
495 compatible = "atmel,at91rm9200-spi";
496 reg = <0xfffc8000 0x200>;
497 interrupts = <12 4 3>;
498 pinctrl-names = "default";
499 pinctrl-0 = <&pinctrl_spi0>;
500 status = "disabled";
501 };
502
503 spi1: spi@fffcc000 {
504 #address-cells = <1>;
505 #size-cells = <0>;
506 compatible = "atmel,at91rm9200-spi";
507 reg = <0xfffcc000 0x200>;
508 interrupts = <13 4 3>;
509 pinctrl-names = "default";
510 pinctrl-0 = <&pinctrl_spi1>;
511 status = "disabled";
512 };
513
474 adc0: adc@fffe0000 { 514 adc0: adc@fffe0000 {
475 compatible = "atmel,at91sam9260-adc"; 515 compatible = "atmel,at91sam9260-adc";
476 reg = <0xfffe0000 0x100>; 516 reg = <0xfffe0000 0x100>;
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 271d4de026e9..94b58ab2cc08 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -303,6 +303,24 @@
303 }; 303 };
304 }; 304 };
305 305
306 spi0 {
307 pinctrl_spi0: spi0-0 {
308 atmel,pins =
309 <0 0 0x2 0x0 /* PA0 periph B SPI0_MISO pin */
310 0 1 0x2 0x0 /* PA1 periph B SPI0_MOSI pin */
311 0 2 0x2 0x0>; /* PA2 periph B SPI0_SPCK pin */
312 };
313 };
314
315 spi1 {
316 pinctrl_spi1: spi1-0 {
317 atmel,pins =
318 <1 12 0x1 0x0 /* PB12 periph A SPI1_MISO pin */
319 1 13 0x1 0x0 /* PB13 periph A SPI1_MOSI pin */
320 1 14 0x1 0x0>; /* PB14 periph A SPI1_SPCK pin */
321 };
322 };
323
306 pioA: gpio@fffff200 { 324 pioA: gpio@fffff200 {
307 compatible = "atmel,at91rm9200-gpio"; 325 compatible = "atmel,at91rm9200-gpio";
308 reg = <0xfffff200 0x200>; 326 reg = <0xfffff200 0x200>;
@@ -462,6 +480,28 @@
462 reg = <0xfffffd40 0x10>; 480 reg = <0xfffffd40 0x10>;
463 status = "disabled"; 481 status = "disabled";
464 }; 482 };
483
484 spi0: spi@fffa4000 {
485 #address-cells = <1>;
486 #size-cells = <0>;
487 compatible = "atmel,at91rm9200-spi";
488 reg = <0xfffa4000 0x200>;
489 interrupts = <14 4 3>;
490 pinctrl-names = "default";
491 pinctrl-0 = <&pinctrl_spi0>;
492 status = "disabled";
493 };
494
495 spi1: spi@fffa8000 {
496 #address-cells = <1>;
497 #size-cells = <0>;
498 compatible = "atmel,at91rm9200-spi";
499 reg = <0xfffa8000 0x200>;
500 interrupts = <15 4 3>;
501 pinctrl-names = "default";
502 pinctrl-0 = <&pinctrl_spi1>;
503 status = "disabled";
504 };
465 }; 505 };
466 506
467 nand0: nand@40000000 { 507 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 1eb08728f527..a14e424b2e81 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -79,6 +79,16 @@
79 }; 79 };
80 }; 80 };
81 }; 81 };
82
83 spi0: spi@fffa4000 {
84 status = "okay";
85 cs-gpios = <&pioA 5 0>, <0>, <0>, <0>;
86 mtd_dataflash@0 {
87 compatible = "atmel,at45", "atmel,dataflash";
88 spi-max-frequency = <50000000>;
89 reg = <0>;
90 };
91 };
82 }; 92 };
83 93
84 nand0: nand@40000000 { 94 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index da15e83e7f17..23d1f468f27f 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -96,6 +96,16 @@
96 status = "okay"; 96 status = "okay";
97 pinctrl-0 = <&pinctrl_ssc0_tx>; 97 pinctrl-0 = <&pinctrl_ssc0_tx>;
98 }; 98 };
99
100 spi0: spi@fffc8000 {
101 status = "okay";
102 cs-gpios = <0>, <&pioC 11 0>, <0>, <0>;
103 mtd_dataflash@0 {
104 compatible = "atmel,at45", "atmel,dataflash";
105 spi-max-frequency = <50000000>;
106 reg = <1>;
107 };
108 };
99 }; 109 };
100 110
101 nand0: nand@40000000 { 111 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 6b1d4cab24c2..cfdf429578b5 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -322,6 +322,24 @@
322 }; 322 };
323 }; 323 };
324 324
325 spi0 {
326 pinctrl_spi0: spi0-0 {
327 atmel,pins =
328 <1 0 0x1 0x0 /* PB0 periph A SPI0_MISO pin */
329 1 1 0x1 0x0 /* PB1 periph A SPI0_MOSI pin */
330 1 2 0x1 0x0>; /* PB2 periph A SPI0_SPCK pin */
331 };
332 };
333
334 spi1 {
335 pinctrl_spi1: spi1-0 {
336 atmel,pins =
337 <1 14 0x1 0x0 /* PB14 periph A SPI1_MISO pin */
338 1 15 0x1 0x0 /* PB15 periph A SPI1_MOSI pin */
339 1 16 0x1 0x0>; /* PB16 periph A SPI1_SPCK pin */
340 };
341 };
342
325 pioA: gpio@fffff200 { 343 pioA: gpio@fffff200 {
326 compatible = "atmel,at91rm9200-gpio"; 344 compatible = "atmel,at91rm9200-gpio";
327 reg = <0xfffff200 0x200>; 345 reg = <0xfffff200 0x200>;
@@ -531,6 +549,28 @@
531 reg = <0xfffffd40 0x10>; 549 reg = <0xfffffd40 0x10>;
532 status = "disabled"; 550 status = "disabled";
533 }; 551 };
552
553 spi0: spi@fffa4000 {
554 #address-cells = <1>;
555 #size-cells = <0>;
556 compatible = "atmel,at91rm9200-spi";
557 reg = <0xfffa4000 0x200>;
558 interrupts = <14 4 3>;
559 pinctrl-names = "default";
560 pinctrl-0 = <&pinctrl_spi0>;
561 status = "disabled";
562 };
563
564 spi1: spi@fffa8000 {
565 #address-cells = <1>;
566 #size-cells = <0>;
567 compatible = "atmel,at91rm9200-spi";
568 reg = <0xfffa8000 0x200>;
569 interrupts = <15 4 3>;
570 pinctrl-names = "default";
571 pinctrl-0 = <&pinctrl_spi1>;
572 status = "disabled";
573 };
534 }; 574 };
535 575
536 nand0: nand@40000000 { 576 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 20c31913c270..92c52a7d70bc 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -102,6 +102,16 @@
102 }; 102 };
103 }; 103 };
104 }; 104 };
105
106 spi0: spi@fffa4000{
107 status = "okay";
108 cs-gpios = <&pioB 3 0>, <0>, <0>, <0>;
109 mtd_dataflash@0 {
110 compatible = "atmel,at45", "atmel,dataflash";
111 spi-max-frequency = <13000000>;
112 reg = <0>;
113 };
114 };
105 }; 115 };
106 116
107 nand0: nand@40000000 { 117 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 7750f98dd764..b2961f1ea51b 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -261,6 +261,24 @@
261 }; 261 };
262 }; 262 };
263 263
264 spi0 {
265 pinctrl_spi0: spi0-0 {
266 atmel,pins =
267 <0 11 0x1 0x0 /* PA11 periph A SPI0_MISO pin */
268 0 12 0x1 0x0 /* PA12 periph A SPI0_MOSI pin */
269 0 13 0x1 0x0>; /* PA13 periph A SPI0_SPCK pin */
270 };
271 };
272
273 spi1 {
274 pinctrl_spi1: spi1-0 {
275 atmel,pins =
276 <0 21 0x2 0x0 /* PA21 periph B SPI1_MISO pin */
277 0 22 0x2 0x0 /* PA22 periph B SPI1_MOSI pin */
278 0 23 0x2 0x0>; /* PA23 periph B SPI1_SPCK pin */
279 };
280 };
281
264 pioA: gpio@fffff400 { 282 pioA: gpio@fffff400 {
265 compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; 283 compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
266 reg = <0xfffff400 0x200>; 284 reg = <0xfffff400 0x200>;
@@ -373,6 +391,28 @@
373 #size-cells = <0>; 391 #size-cells = <0>;
374 status = "disabled"; 392 status = "disabled";
375 }; 393 };
394
395 spi0: spi@f0000000 {
396 #address-cells = <1>;
397 #size-cells = <0>;
398 compatible = "atmel,at91rm9200-spi";
399 reg = <0xf0000000 0x100>;
400 interrupts = <13 4 3>;
401 pinctrl-names = "default";
402 pinctrl-0 = <&pinctrl_spi0>;
403 status = "disabled";
404 };
405
406 spi1: spi@f0004000 {
407 #address-cells = <1>;
408 #size-cells = <0>;
409 compatible = "atmel,at91rm9200-spi";
410 reg = <0xf0004000 0x100>;
411 interrupts = <14 4 3>;
412 pinctrl-names = "default";
413 pinctrl-0 = <&pinctrl_spi1>;
414 status = "disabled";
415 };
376 }; 416 };
377 417
378 nand0: nand@40000000 { 418 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d400f8de4387..34c842b1efb2 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -67,6 +67,16 @@
67 }; 67 };
68 }; 68 };
69 }; 69 };
70
71 spi0: spi@f0000000 {
72 status = "okay";
73 cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
74 m25p80@0 {
75 compatible = "atmel,at25df321a";
76 spi-max-frequency = <50000000>;
77 reg = <0>;
78 };
79 };
70 }; 80 };
71 81
72 nand0: nand@40000000 { 82 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index a98c0d50fbbe..347b438d47fa 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -343,6 +343,24 @@
343 }; 343 };
344 }; 344 };
345 345
346 spi0 {
347 pinctrl_spi0: spi0-0 {
348 atmel,pins =
349 <0 11 0x1 0x0 /* PA11 periph A SPI0_MISO pin */
350 0 12 0x1 0x0 /* PA12 periph A SPI0_MOSI pin */
351 0 13 0x1 0x0>; /* PA13 periph A SPI0_SPCK pin */
352 };
353 };
354
355 spi1 {
356 pinctrl_spi1: spi1-0 {
357 atmel,pins =
358 <0 21 0x2 0x0 /* PA21 periph B SPI1_MISO pin */
359 0 22 0x2 0x0 /* PA22 periph B SPI1_MOSI pin */
360 0 23 0x2 0x0>; /* PA23 periph B SPI1_SPCK pin */
361 };
362 };
363
346 pioA: gpio@fffff400 { 364 pioA: gpio@fffff400 {
347 compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; 365 compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
348 reg = <0xfffff400 0x200>; 366 reg = <0xfffff400 0x200>;
@@ -529,6 +547,28 @@
529 trigger-value = <0x6>; 547 trigger-value = <0x6>;
530 }; 548 };
531 }; 549 };
550
551 spi0: spi@f0000000 {
552 #address-cells = <1>;
553 #size-cells = <0>;
554 compatible = "atmel,at91rm9200-spi";
555 reg = <0xf0000000 0x100>;
556 interrupts = <13 4 3>;
557 pinctrl-names = "default";
558 pinctrl-0 = <&pinctrl_spi0>;
559 status = "disabled";
560 };
561
562 spi1: spi@f0004000 {
563 #address-cells = <1>;
564 #size-cells = <0>;
565 compatible = "atmel,at91rm9200-spi";
566 reg = <0xf0004000 0x100>;
567 interrupts = <14 4 3>;
568 pinctrl-names = "default";
569 pinctrl-0 = <&pinctrl_spi1>;
570 status = "disabled";
571 };
532 }; 572 };
533 573
534 nand0: nand@40000000 { 574 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index 8a7cf1d9cf5d..09f5e667ca7a 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -84,6 +84,16 @@
84 }; 84 };
85 }; 85 };
86 }; 86 };
87
88 spi0: spi@f0000000 {
89 status = "okay";
90 cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
91 m25p80@0 {
92 compatible = "atmel,at25df321a";
93 spi-max-frequency = <50000000>;
94 reg = <0>;
95 };
96 };
87 }; 97 };
88 98
89 usb0: ohci@00600000 { 99 usb0: ohci@00600000 {
diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts
index 31f2157cd7d7..67f8670c4d6a 100644
--- a/arch/arm/boot/dts/msm8660-surf.dts
+++ b/arch/arm/boot/dts/msm8660-surf.dts
@@ -38,4 +38,10 @@
38 <0x19c00000 0x1000>; 38 <0x19c00000 0x1000>;
39 interrupts = <0 195 0x0>; 39 interrupts = <0 195 0x0>;
40 }; 40 };
41
42 qcom,ssbi@500000 {
43 compatible = "qcom,ssbi";
44 reg = <0x500000 0x1000>;
45 qcom,controller-type = "pmic-arbiter";
46 };
41}; 47};
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts
index 9e621b5ad3dd..c9b09a813a4b 100644
--- a/arch/arm/boot/dts/msm8960-cdp.dts
+++ b/arch/arm/boot/dts/msm8960-cdp.dts
@@ -38,4 +38,10 @@
38 <0x16400000 0x1000>; 38 <0x16400000 0x1000>;
39 interrupts = <0 154 0x0>; 39 interrupts = <0 154 0x0>;
40 }; 40 };
41
42 qcom,ssbi@500000 {
43 compatible = "qcom,ssbi";
44 reg = <0x500000 0x1000>;
45 qcom,controller-type = "pmic-arbiter";
46 };
41}; 47};
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index 1513c1927cc8..122ae94076c8 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -89,7 +89,7 @@
89 pinmux: pinmux@e0700000 { 89 pinmux: pinmux@e0700000 {
90 compatible = "st,spear1310-pinmux"; 90 compatible = "st,spear1310-pinmux";
91 reg = <0xe0700000 0x1000>; 91 reg = <0xe0700000 0x1000>;
92 #gpio-range-cells = <2>; 92 #gpio-range-cells = <3>;
93 }; 93 };
94 94
95 apb { 95 apb {
@@ -212,7 +212,7 @@
212 interrupt-controller; 212 interrupt-controller;
213 gpio-controller; 213 gpio-controller;
214 #gpio-cells = <2>; 214 #gpio-cells = <2>;
215 gpio-ranges = <&pinmux 0 246>; 215 gpio-ranges = <&pinmux 0 0 246>;
216 status = "disabled"; 216 status = "disabled";
217 217
218 st-plgpio,ngpio = <246>; 218 st-plgpio,ngpio = <246>;
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index 34da11aa6795..c511c4772efd 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -63,7 +63,7 @@
63 pinmux: pinmux@e0700000 { 63 pinmux: pinmux@e0700000 {
64 compatible = "st,spear1340-pinmux"; 64 compatible = "st,spear1340-pinmux";
65 reg = <0xe0700000 0x1000>; 65 reg = <0xe0700000 0x1000>;
66 #gpio-range-cells = <2>; 66 #gpio-range-cells = <3>;
67 }; 67 };
68 68
69 pwm: pwm@e0180000 { 69 pwm: pwm@e0180000 {
@@ -127,7 +127,7 @@
127 interrupt-controller; 127 interrupt-controller;
128 gpio-controller; 128 gpio-controller;
129 #gpio-cells = <2>; 129 #gpio-cells = <2>;
130 gpio-ranges = <&pinmux 0 252>; 130 gpio-ranges = <&pinmux 0 0 252>;
131 status = "disabled"; 131 status = "disabled";
132 132
133 st-plgpio,ngpio = <250>; 133 st-plgpio,ngpio = <250>;
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index ab45b8c81982..95372080eea6 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -25,7 +25,7 @@
25 pinmux: pinmux@b4000000 { 25 pinmux: pinmux@b4000000 {
26 compatible = "st,spear310-pinmux"; 26 compatible = "st,spear310-pinmux";
27 reg = <0xb4000000 0x1000>; 27 reg = <0xb4000000 0x1000>;
28 #gpio-range-cells = <2>; 28 #gpio-range-cells = <3>;
29 }; 29 };
30 30
31 fsmc: flash@44000000 { 31 fsmc: flash@44000000 {
@@ -102,7 +102,7 @@
102 interrupt-controller; 102 interrupt-controller;
103 gpio-controller; 103 gpio-controller;
104 #gpio-cells = <2>; 104 #gpio-cells = <2>;
105 gpio-ranges = <&pinmux 0 102>; 105 gpio-ranges = <&pinmux 0 0 102>;
106 status = "disabled"; 106 status = "disabled";
107 107
108 st-plgpio,ngpio = <102>; 108 st-plgpio,ngpio = <102>;
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index caa5520b1fd4..ffea342aeec9 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -24,7 +24,7 @@
24 pinmux: pinmux@b3000000 { 24 pinmux: pinmux@b3000000 {
25 compatible = "st,spear320-pinmux"; 25 compatible = "st,spear320-pinmux";
26 reg = <0xb3000000 0x1000>; 26 reg = <0xb3000000 0x1000>;
27 #gpio-range-cells = <2>; 27 #gpio-range-cells = <3>;
28 }; 28 };
29 29
30 clcd@90000000 { 30 clcd@90000000 {
@@ -130,7 +130,7 @@
130 interrupt-controller; 130 interrupt-controller;
131 gpio-controller; 131 gpio-controller;
132 #gpio-cells = <2>; 132 #gpio-cells = <2>;
133 gpio-ranges = <&pinmux 0 102>; 133 gpio-ranges = <&pinmux 0 0 102>;
134 status = "disabled"; 134 status = "disabled";
135 135
136 st-plgpio,ngpio = <102>; 136 st-plgpio,ngpio = <102>;
diff --git a/arch/arm/boot/dts/vt8500-bv07.dts b/arch/arm/boot/dts/vt8500-bv07.dts
index 567cf4e8ab84..877b33afa7ed 100644
--- a/arch/arm/boot/dts/vt8500-bv07.dts
+++ b/arch/arm/boot/dts/vt8500-bv07.dts
@@ -11,26 +11,22 @@
11 11
12/ { 12/ {
13 model = "Benign BV07 Netbook"; 13 model = "Benign BV07 Netbook";
14};
14 15
15 /* 16&fb {
16 * Display node is based on Sascha Hauer's patch on dri-devel. 17 bits-per-pixel = <16>;
17 * Added a bpp property to calculate the size of the framebuffer 18 display-timings {
18 * until the binding is formalized. 19 native-mode = <&timing0>;
19 */ 20 timing0: 800x480 {
20 display: display@0 { 21 clock-frequency = <0>; /* unused but required */
21 modes { 22 hactive = <800>;
22 mode0: mode@0 { 23 vactive = <480>;
23 hactive = <800>; 24 hfront-porch = <40>;
24 vactive = <480>; 25 hback-porch = <88>;
25 hback-porch = <88>; 26 hsync-len = <0>;
26 hfront-porch = <40>; 27 vback-porch = <32>;
27 hsync-len = <0>; 28 vfront-porch = <11>;
28 vback-porch = <32>; 29 vsync-len = <1>;
29 vfront-porch = <11>;
30 vsync-len = <1>;
31 clock = <0>; /* unused but required */
32 bpp = <16>; /* non-standard but required */
33 };
34 }; 30 };
35 }; 31 };
36}; 32};
diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi
index cf31ced46602..68c8dc644383 100644
--- a/arch/arm/boot/dts/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500.dtsi
@@ -98,12 +98,10 @@
98 interrupts = <43>; 98 interrupts = <43>;
99 }; 99 };
100 100
101 fb@d800e400 { 101 fb: fb@d8050800 {
102 compatible = "via,vt8500-fb"; 102 compatible = "via,vt8500-fb";
103 reg = <0xd800e400 0x400>; 103 reg = <0xd800e400 0x400>;
104 interrupts = <12>; 104 interrupts = <12>;
105 display = <&display>;
106 default-mode = <&mode0>;
107 }; 105 };
108 106
109 ge_rops@d8050400 { 107 ge_rops@d8050400 {
diff --git a/arch/arm/boot/dts/wm8505-ref.dts b/arch/arm/boot/dts/wm8505-ref.dts
index fd4e248074c6..edd2cec3d37f 100644
--- a/arch/arm/boot/dts/wm8505-ref.dts
+++ b/arch/arm/boot/dts/wm8505-ref.dts
@@ -11,26 +11,22 @@
11 11
12/ { 12/ {
13 model = "Wondermedia WM8505 Netbook"; 13 model = "Wondermedia WM8505 Netbook";
14};
14 15
15 /* 16&fb {
16 * Display node is based on Sascha Hauer's patch on dri-devel. 17 bits-per-pixel = <32>;
17 * Added a bpp property to calculate the size of the framebuffer 18 display-timings {
18 * until the binding is formalized. 19 native-mode = <&timing0>;
19 */ 20 timing0: 800x480 {
20 display: display@0 { 21 clock-frequency = <0>; /* unused but required */
21 modes { 22 hactive = <800>;
22 mode0: mode@0 { 23 vactive = <480>;
23 hactive = <800>; 24 hfront-porch = <40>;
24 vactive = <480>; 25 hback-porch = <88>;
25 hback-porch = <88>; 26 hsync-len = <0>;
26 hfront-porch = <40>; 27 vback-porch = <32>;
27 hsync-len = <0>; 28 vfront-porch = <11>;
28 vback-porch = <32>; 29 vsync-len = <1>;
29 vfront-porch = <11>;
30 vsync-len = <1>;
31 clock = <0>; /* unused but required */
32 bpp = <32>; /* non-standard but required */
33 };
34 }; 30 };
35 }; 31 };
36}; 32};
diff --git a/arch/arm/boot/dts/wm8505.dtsi b/arch/arm/boot/dts/wm8505.dtsi
index e74a1c0fb9a2..bcf668d31b28 100644
--- a/arch/arm/boot/dts/wm8505.dtsi
+++ b/arch/arm/boot/dts/wm8505.dtsi
@@ -128,11 +128,9 @@
128 interrupts = <0>; 128 interrupts = <0>;
129 }; 129 };
130 130
131 fb@d8050800 { 131 fb: fb@d8050800 {
132 compatible = "wm,wm8505-fb"; 132 compatible = "wm,wm8505-fb";
133 reg = <0xd8050800 0x200>; 133 reg = <0xd8050800 0x200>;
134 display = <&display>;
135 default-mode = <&mode0>;
136 }; 134 };
137 135
138 ge_rops@d8050400 { 136 ge_rops@d8050400 {
diff --git a/arch/arm/boot/dts/wm8650-mid.dts b/arch/arm/boot/dts/wm8650-mid.dts
index cefd938f842f..61671a0d9ede 100644
--- a/arch/arm/boot/dts/wm8650-mid.dts
+++ b/arch/arm/boot/dts/wm8650-mid.dts
@@ -11,26 +11,24 @@
11 11
12/ { 12/ {
13 model = "Wondermedia WM8650-MID Tablet"; 13 model = "Wondermedia WM8650-MID Tablet";
14};
15
16&fb {
17 bits-per-pixel = <16>;
14 18
15 /* 19 display-timings {
16 * Display node is based on Sascha Hauer's patch on dri-devel. 20 native-mode = <&timing0>;
17 * Added a bpp property to calculate the size of the framebuffer 21 timing0: 800x480 {
18 * until the binding is formalized. 22 clock-frequency = <0>; /* unused but required */
19 */ 23 hactive = <800>;
20 display: display@0 { 24 vactive = <480>;
21 modes { 25 hfront-porch = <40>;
22 mode0: mode@0 { 26 hback-porch = <88>;
23 hactive = <800>; 27 hsync-len = <0>;
24 vactive = <480>; 28 vback-porch = <32>;
25 hback-porch = <88>; 29 vfront-porch = <11>;
26 hfront-porch = <40>; 30 vsync-len = <1>;
27 hsync-len = <0>;
28 vback-porch = <32>;
29 vfront-porch = <11>;
30 vsync-len = <1>;
31 clock = <0>; /* unused but required */
32 bpp = <16>; /* non-standard but required */
33 };
34 }; 31 };
35 }; 32 };
36}; 33};
34
diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
index db3c0a12e052..9313407bbc30 100644
--- a/arch/arm/boot/dts/wm8650.dtsi
+++ b/arch/arm/boot/dts/wm8650.dtsi
@@ -128,11 +128,9 @@
128 interrupts = <43>; 128 interrupts = <43>;
129 }; 129 };
130 130
131 fb@d8050800 { 131 fb: fb@d8050800 {
132 compatible = "wm,wm8505-fb"; 132 compatible = "wm,wm8505-fb";
133 reg = <0xd8050800 0x200>; 133 reg = <0xd8050800 0x200>;
134 display = <&display>;
135 default-mode = <&mode0>;
136 }; 134 };
137 135
138 ge_rops@d8050400 { 136 ge_rops@d8050400 {
diff --git a/arch/arm/boot/dts/wm8850-w70v2.dts b/arch/arm/boot/dts/wm8850-w70v2.dts
index fcc660c89540..32d22532cd6c 100644
--- a/arch/arm/boot/dts/wm8850-w70v2.dts
+++ b/arch/arm/boot/dts/wm8850-w70v2.dts
@@ -15,28 +15,6 @@
15/ { 15/ {
16 model = "Wondermedia WM8850-W70v2 Tablet"; 16 model = "Wondermedia WM8850-W70v2 Tablet";
17 17
18 /*
19 * Display node is based on Sascha Hauer's patch on dri-devel.
20 * Added a bpp property to calculate the size of the framebuffer
21 * until the binding is formalized.
22 */
23 display: display@0 {
24 modes {
25 mode0: mode@0 {
26 hactive = <800>;
27 vactive = <480>;
28 hback-porch = <88>;
29 hfront-porch = <40>;
30 hsync-len = <0>;
31 vback-porch = <32>;
32 vfront-porch = <11>;
33 vsync-len = <1>;
34 clock = <0>; /* unused but required */
35 bpp = <16>; /* non-standard but required */
36 };
37 };
38 };
39
40 backlight { 18 backlight {
41 compatible = "pwm-backlight"; 19 compatible = "pwm-backlight";
42 pwms = <&pwm 0 50000 1>; /* duty inverted */ 20 pwms = <&pwm 0 50000 1>; /* duty inverted */
@@ -45,3 +23,21 @@
45 default-brightness-level = <5>; 23 default-brightness-level = <5>;
46 }; 24 };
47}; 25};
26
27&fb {
28 bits-per-pixel = <16>;
29 display-timings {
30 native-mode = <&timing0>;
31 timing0: 800x480 {
32 clock-frequency = <0>; /* unused but required */
33 hactive = <800>;
34 vactive = <480>;
35 hfront-porch = <40>;
36 hback-porch = <88>;
37 hsync-len = <0>;
38 vback-porch = <32>;
39 vfront-porch = <11>;
40 vsync-len = <1>;
41 };
42 };
43};
diff --git a/arch/arm/boot/dts/wm8850.dtsi b/arch/arm/boot/dts/wm8850.dtsi
index e8cbfdc87bba..7149cd13e3b9 100644
--- a/arch/arm/boot/dts/wm8850.dtsi
+++ b/arch/arm/boot/dts/wm8850.dtsi
@@ -135,11 +135,9 @@
135 }; 135 };
136 }; 136 };
137 137
138 fb@d8051700 { 138 fb: fb@d8051700 {
139 compatible = "wm,wm8505-fb"; 139 compatible = "wm,wm8505-fb";
140 reg = <0xd8051700 0x200>; 140 reg = <0xd8051700 0x200>;
141 display = <&display>;
142 default-mode = <&mode0>;
143 }; 141 };
144 142
145 ge_rops@d8050400 { 143 ge_rops@d8050400 {
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index e36b01025321..088d6c11a0fa 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -188,6 +188,7 @@ CONFIG_USB_EHCI_HCD=y
188CONFIG_USB_EHCI_MXC=y 188CONFIG_USB_EHCI_MXC=y
189CONFIG_USB_CHIPIDEA=y 189CONFIG_USB_CHIPIDEA=y
190CONFIG_USB_CHIPIDEA_HOST=y 190CONFIG_USB_CHIPIDEA_HOST=y
191CONFIG_USB_PHY=y
191CONFIG_USB_MXS_PHY=y 192CONFIG_USB_MXS_PHY=y
192CONFIG_USB_STORAGE=y 193CONFIG_USB_STORAGE=y
193CONFIG_MMC=y 194CONFIG_MMC=y
diff --git a/arch/arm/configs/kirkwood_defconfig b/arch/arm/configs/kirkwood_defconfig
index 13482ea58b09..93f3794ba5cb 100644
--- a/arch/arm/configs/kirkwood_defconfig
+++ b/arch/arm/configs/kirkwood_defconfig
@@ -56,7 +56,6 @@ CONFIG_AEABI=y
56CONFIG_ZBOOT_ROM_TEXT=0x0 56CONFIG_ZBOOT_ROM_TEXT=0x0
57CONFIG_ZBOOT_ROM_BSS=0x0 57CONFIG_ZBOOT_ROM_BSS=0x0
58CONFIG_CPU_IDLE=y 58CONFIG_CPU_IDLE=y
59CONFIG_CPU_IDLE_KIRKWOOD=y
60CONFIG_NET=y 59CONFIG_NET=y
61CONFIG_PACKET=y 60CONFIG_PACKET=y
62CONFIG_UNIX=y 61CONFIG_UNIX=y
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 92386b20bd09..afa7249fac6e 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -134,6 +134,7 @@ CONFIG_SND_DEBUG_VERBOSE=y
134# CONFIG_SND_SPI is not set 134# CONFIG_SND_SPI is not set
135CONFIG_SND_SOC=y 135CONFIG_SND_SOC=y
136CONFIG_USB=y 136CONFIG_USB=y
137CONFIG_USB_PHY=y
137CONFIG_USB_OHCI_HCD=y 138CONFIG_USB_OHCI_HCD=y
138CONFIG_USB_STORAGE=y 139CONFIG_USB_STORAGE=y
139CONFIG_USB_GADGET=y 140CONFIG_USB_GADGET=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 6a99e30f81d2..87924d671115 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -120,6 +120,7 @@ CONFIG_USB_EHCI_HCD=y
120CONFIG_USB_CHIPIDEA=y 120CONFIG_USB_CHIPIDEA=y
121CONFIG_USB_CHIPIDEA_HOST=y 121CONFIG_USB_CHIPIDEA_HOST=y
122CONFIG_USB_STORAGE=y 122CONFIG_USB_STORAGE=y
123CONFIG_USB_PHY=y
123CONFIG_USB_MXS_PHY=y 124CONFIG_USB_MXS_PHY=y
124CONFIG_MMC=y 125CONFIG_MMC=y
125CONFIG_MMC_MXS=y 126CONFIG_MMC_MXS=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 42eab9a2a0fd..7e0ebb64a7f9 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -195,6 +195,7 @@ CONFIG_SND_SOC=y
195CONFIG_SND_OMAP_SOC=y 195CONFIG_SND_OMAP_SOC=y
196# CONFIG_USB_HID is not set 196# CONFIG_USB_HID is not set
197CONFIG_USB=y 197CONFIG_USB=y
198CONFIG_USB_PHY=y
198CONFIG_USB_DEBUG=y 199CONFIG_USB_DEBUG=y
199CONFIG_USB_DEVICEFS=y 200CONFIG_USB_DEVICEFS=y
200# CONFIG_USB_DEVICE_CLASS is not set 201# CONFIG_USB_DEVICE_CLASS is not set
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 80d6fc4dbe4a..9bcd262a9008 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t);
61#define FIRST_USER_ADDRESS PAGE_SIZE 61#define FIRST_USER_ADDRESS PAGE_SIZE
62 62
63/* 63/*
64 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
65 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
66 * page shared between user and kernel).
67 */
68#ifdef CONFIG_ARM_LPAE
69#define USER_PGTABLES_CEILING TASK_SIZE
70#endif
71
72/*
64 * The pgprot_* and protection_map entries will be fixed up in runtime 73 * The pgprot_* and protection_map entries will be fixed up in runtime
65 * to include the cachable and bufferable bits based on memory policy, 74 * to include the cachable and bufferable bits based on memory policy,
66 * as well as any architecture dependent bits like global/ASID and SMP 75 * as well as any architecture dependent bits like global/ASID and SMP
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 5a85f148b607..21a23e378bbe 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -21,9 +21,6 @@ extern void (*arm_pm_idle)(void);
21 21
22extern unsigned int user_debug; 22extern unsigned int user_debug;
23 23
24extern void disable_hlt(void);
25extern void enable_hlt(void);
26
27#endif /* !__ASSEMBLY__ */ 24#endif /* !__ASSEMBLY__ */
28 25
29#endif /* __ASM_ARM_SYSTEM_MISC_H */ 26#endif /* __ASM_ARM_SYSTEM_MISC_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index e4ddfb39ca34..141baa3f9a72 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -44,14 +44,6 @@
44#define __ARCH_WANT_SYS_CLONE 44#define __ARCH_WANT_SYS_CLONE
45 45
46/* 46/*
47 * "Conditional" syscalls
48 *
49 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
50 * but it doesn't work on all toolchains, so we just do it by hand
51 */
52#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
53
54/*
55 * Unimplemented (or alternatively implemented) syscalls 47 * Unimplemented (or alternatively implemented) syscalls
56 */ 48 */
57#define __IGNORE_fadvise64_64 49#define __IGNORE_fadvise64_64
diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c
index 85aa2b292692..43076536965c 100644
--- a/arch/arm/kernel/early_printk.c
+++ b/arch/arm/kernel/early_printk.c
@@ -29,28 +29,17 @@ static void early_console_write(struct console *con, const char *s, unsigned n)
29 early_write(s, n); 29 early_write(s, n);
30} 30}
31 31
32static struct console early_console = { 32static struct console early_console_dev = {
33 .name = "earlycon", 33 .name = "earlycon",
34 .write = early_console_write, 34 .write = early_console_write,
35 .flags = CON_PRINTBUFFER | CON_BOOT, 35 .flags = CON_PRINTBUFFER | CON_BOOT,
36 .index = -1, 36 .index = -1,
37}; 37};
38 38
39asmlinkage void early_printk(const char *fmt, ...)
40{
41 char buf[512];
42 int n;
43 va_list ap;
44
45 va_start(ap, fmt);
46 n = vscnprintf(buf, sizeof(buf), fmt, ap);
47 early_write(buf, n);
48 va_end(ap);
49}
50
51static int __init setup_early_printk(char *buf) 39static int __init setup_early_printk(char *buf)
52{ 40{
53 register_console(&early_console); 41 early_console = &early_console_dev;
42 register_console(&early_console_dev);
54 return 0; 43 return 0;
55} 44}
56 45
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 9b6de8c988f3..8ff0ecdc637f 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -254,7 +254,7 @@ static void sysrq_etm_dump(int key)
254 254
255static struct sysrq_key_op sysrq_etm_op = { 255static struct sysrq_key_op sysrq_etm_op = {
256 .handler = sysrq_etm_dump, 256 .handler = sysrq_etm_dump,
257 .help_msg = "ETM buffer dump", 257 .help_msg = "etm-buffer-dump(v)",
258 .action_msg = "etm", 258 .action_msg = "etm",
259}; 259};
260 260
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 047d3e40e470..ae58d3b37d9d 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -57,38 +57,6 @@ static const char *isa_modes[] = {
57 "ARM" , "Thumb" , "Jazelle", "ThumbEE" 57 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
58}; 58};
59 59
60static volatile int hlt_counter;
61
62void disable_hlt(void)
63{
64 hlt_counter++;
65}
66
67EXPORT_SYMBOL(disable_hlt);
68
69void enable_hlt(void)
70{
71 hlt_counter--;
72 BUG_ON(hlt_counter < 0);
73}
74
75EXPORT_SYMBOL(enable_hlt);
76
77static int __init nohlt_setup(char *__unused)
78{
79 hlt_counter = 1;
80 return 1;
81}
82
83static int __init hlt_setup(char *__unused)
84{
85 hlt_counter = 0;
86 return 1;
87}
88
89__setup("nohlt", nohlt_setup);
90__setup("hlt", hlt_setup);
91
92extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); 60extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
93typedef void (*phys_reset_t)(unsigned long); 61typedef void (*phys_reset_t)(unsigned long);
94 62
@@ -172,54 +140,38 @@ static void default_idle(void)
172 local_irq_enable(); 140 local_irq_enable();
173} 141}
174 142
175/* 143void arch_cpu_idle_prepare(void)
176 * The idle thread.
177 * We always respect 'hlt_counter' to prevent low power idle.
178 */
179void cpu_idle(void)
180{ 144{
181 local_fiq_enable(); 145 local_fiq_enable();
146}
182 147
183 /* endless idle loop with no priority at all */ 148void arch_cpu_idle_enter(void)
184 while (1) { 149{
185 tick_nohz_idle_enter(); 150 ledtrig_cpu(CPU_LED_IDLE_START);
186 rcu_idle_enter(); 151#ifdef CONFIG_PL310_ERRATA_769419
187 ledtrig_cpu(CPU_LED_IDLE_START); 152 wmb();
188 while (!need_resched()) {
189#ifdef CONFIG_HOTPLUG_CPU
190 if (cpu_is_offline(smp_processor_id()))
191 cpu_die();
192#endif 153#endif
154}
193 155
194 /* 156void arch_cpu_idle_exit(void)
195 * We need to disable interrupts here 157{
196 * to ensure we don't miss a wakeup call. 158 ledtrig_cpu(CPU_LED_IDLE_END);
197 */ 159}
198 local_irq_disable(); 160
199#ifdef CONFIG_PL310_ERRATA_769419 161#ifdef CONFIG_HOTPLUG_CPU
200 wmb(); 162void arch_cpu_idle_dead(void)
163{
164 cpu_die();
165}
201#endif 166#endif
202 if (hlt_counter) { 167
203 local_irq_enable(); 168/*
204 cpu_relax(); 169 * Called from the core idle loop.
205 } else if (!need_resched()) { 170 */
206 stop_critical_timings(); 171void arch_cpu_idle(void)
207 if (cpuidle_idle_call()) 172{
208 default_idle(); 173 if (cpuidle_idle_call())
209 start_critical_timings(); 174 default_idle();
210 /*
211 * default_idle functions must always
212 * return with IRQs enabled.
213 */
214 WARN_ON(irqs_disabled());
215 } else
216 local_irq_enable();
217 }
218 ledtrig_cpu(CPU_LED_IDLE_END);
219 rcu_idle_exit();
220 tick_nohz_idle_exit();
221 schedule_preempt_disabled();
222 }
223} 175}
224 176
225static char reboot_mode = 'h'; 177static char reboot_mode = 'h';
@@ -273,11 +225,8 @@ void __show_regs(struct pt_regs *regs)
273 unsigned long flags; 225 unsigned long flags;
274 char buf[64]; 226 char buf[64];
275 227
276 printk("CPU: %d %s (%s %.*s)\n", 228 show_regs_print_info(KERN_DEFAULT);
277 raw_smp_processor_id(), print_tainted(), 229
278 init_utsname()->release,
279 (int)strcspn(init_utsname()->version, " "),
280 init_utsname()->version);
281 print_symbol("PC is at %s\n", instruction_pointer(regs)); 230 print_symbol("PC is at %s\n", instruction_pointer(regs));
282 print_symbol("LR is at %s\n", regs->ARM_lr); 231 print_symbol("LR is at %s\n", regs->ARM_lr);
283 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" 232 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
@@ -332,7 +281,6 @@ void __show_regs(struct pt_regs *regs)
332void show_regs(struct pt_regs * regs) 281void show_regs(struct pt_regs * regs)
333{ 282{
334 printk("\n"); 283 printk("\n");
335 printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
336 __show_regs(regs); 284 __show_regs(regs);
337 dump_stack(); 285 dump_stack();
338} 286}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 1f2ccccaf009..4619177bcfe6 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -336,7 +336,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
336 /* 336 /*
337 * OK, it's off to the idle thread for us 337 * OK, it's off to the idle thread for us
338 */ 338 */
339 cpu_idle(); 339 cpu_startup_entry(CPUHP_ONLINE);
340} 340}
341 341
342void __init smp_cpus_done(unsigned int max_cpus) 342void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 79282ebcd939..f10316b4ecdc 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -100,7 +100,7 @@ static void __init parse_dt_topology(void)
100 int alloc_size, cpu = 0; 100 int alloc_size, cpu = 0;
101 101
102 alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); 102 alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
103 cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT); 103 cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
104 104
105 while ((cn = of_find_node_by_type(cn, "cpu"))) { 105 while ((cn = of_find_node_by_type(cn, "cpu"))) {
106 const u32 *rate, *reg; 106 const u32 *rate, *reg;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 1c089119b2d7..18b32e8e4497 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -204,13 +204,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
204} 204}
205#endif 205#endif
206 206
207void dump_stack(void)
208{
209 dump_backtrace(NULL, NULL);
210}
211
212EXPORT_SYMBOL(dump_stack);
213
214void show_stack(struct task_struct *tsk, unsigned long *sp) 207void show_stack(struct task_struct *tsk, unsigned long *sp)
215{ 208{
216 dump_backtrace(NULL, tsk); 209 dump_backtrace(NULL, tsk);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index c1fe498983ac..842098d78f58 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -614,7 +614,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
614 614
615 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) 615 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
616 || !arm_exit_handlers[hsr_ec]) { 616 || !arm_exit_handlers[hsr_ec]) {
617 kvm_err("Unkown exception class: %#08lx, " 617 kvm_err("Unknown exception class: %#08lx, "
618 "hsr: %#08x\n", hsr_ec, 618 "hsr: %#08x\n", hsr_ec,
619 (unsigned int)vcpu->arch.hsr); 619 (unsigned int)vcpu->arch.hsr);
620 BUG(); 620 BUG();
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index b67cd5374117..44199bc2c665 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -232,6 +232,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
232 CLKDEV_CON_DEV_ID("t2_clk", "fffdc000.timer", &tc5_clk), 232 CLKDEV_CON_DEV_ID("t2_clk", "fffdc000.timer", &tc5_clk),
233 CLKDEV_CON_DEV_ID("hclk", "500000.ohci", &ohci_clk), 233 CLKDEV_CON_DEV_ID("hclk", "500000.ohci", &ohci_clk),
234 CLKDEV_CON_DEV_ID("mci_clk", "fffa8000.mmc", &mmc_clk), 234 CLKDEV_CON_DEV_ID("mci_clk", "fffa8000.mmc", &mmc_clk),
235 CLKDEV_CON_DEV_ID("spi_clk", "fffc8000.spi", &spi0_clk),
236 CLKDEV_CON_DEV_ID("spi_clk", "fffcc000.spi", &spi1_clk),
235 /* fake hclk clock */ 237 /* fake hclk clock */
236 CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk), 238 CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
237 CLKDEV_CON_ID("pioA", &pioA_clk), 239 CLKDEV_CON_ID("pioA", &pioA_clk),
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index d3addee43d8d..2ec5efea3f03 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -262,6 +262,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
262 CLKDEV_CON_DEV_ID("mci_clk", "fffd0000.mmc", &mmc1_clk), 262 CLKDEV_CON_DEV_ID("mci_clk", "fffd0000.mmc", &mmc1_clk),
263 CLKDEV_CON_DEV_ID(NULL, "fff84000.i2c", &twi0_clk), 263 CLKDEV_CON_DEV_ID(NULL, "fff84000.i2c", &twi0_clk),
264 CLKDEV_CON_DEV_ID(NULL, "fff88000.i2c", &twi1_clk), 264 CLKDEV_CON_DEV_ID(NULL, "fff88000.i2c", &twi1_clk),
265 CLKDEV_CON_DEV_ID("spi_clk", "fffa4000.spi", &spi0_clk),
266 CLKDEV_CON_DEV_ID("spi_clk", "fffa8000.spi", &spi1_clk),
265 /* fake hclk clock */ 267 /* fake hclk clock */
266 CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &uhphs_clk), 268 CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &uhphs_clk),
267 CLKDEV_CON_DEV_ID(NULL, "fffff200.gpio", &pioA_clk), 269 CLKDEV_CON_DEV_ID(NULL, "fffff200.gpio", &pioA_clk),
diff --git a/arch/arm/mach-at91/at91sam9n12.c b/arch/arm/mach-at91/at91sam9n12.c
index 5dfc8fd87103..ccd078355eed 100644
--- a/arch/arm/mach-at91/at91sam9n12.c
+++ b/arch/arm/mach-at91/at91sam9n12.c
@@ -172,6 +172,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
172 CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma_clk), 172 CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma_clk),
173 CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk), 173 CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk),
174 CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk), 174 CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk),
175 CLKDEV_CON_DEV_ID("spi_clk", "f0000000.spi", &spi0_clk),
176 CLKDEV_CON_DEV_ID("spi_clk", "f0004000.spi", &spi1_clk),
175 CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk), 177 CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk),
176 CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk), 178 CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk),
177 CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk), 179 CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk),
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 44a9a62dcc13..a200d8a17123 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -237,6 +237,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
237 CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk), 237 CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk),
238 CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk), 238 CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk),
239 CLKDEV_CON_DEV_ID(NULL, "f8018000.i2c", &twi2_clk), 239 CLKDEV_CON_DEV_ID(NULL, "f8018000.i2c", &twi2_clk),
240 CLKDEV_CON_DEV_ID("spi_clk", "f0000000.spi", &spi0_clk),
241 CLKDEV_CON_DEV_ID("spi_clk", "f0004000.spi", &spi1_clk),
240 CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk), 242 CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk),
241 CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk), 243 CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk),
242 CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk), 244 CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk),
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c
index 0c6381516a5a..48f1228c611c 100644
--- a/arch/arm/mach-at91/cpuidle.c
+++ b/arch/arm/mach-at91/cpuidle.c
@@ -27,8 +27,6 @@
27 27
28#define AT91_MAX_STATES 2 28#define AT91_MAX_STATES 2
29 29
30static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device);
31
32/* Actual code that puts the SoC in different idle states */ 30/* Actual code that puts the SoC in different idle states */
33static int at91_enter_idle(struct cpuidle_device *dev, 31static int at91_enter_idle(struct cpuidle_device *dev,
34 struct cpuidle_driver *drv, 32 struct cpuidle_driver *drv,
@@ -47,7 +45,6 @@ static int at91_enter_idle(struct cpuidle_device *dev,
47static struct cpuidle_driver at91_idle_driver = { 45static struct cpuidle_driver at91_idle_driver = {
48 .name = "at91_idle", 46 .name = "at91_idle",
49 .owner = THIS_MODULE, 47 .owner = THIS_MODULE,
50 .en_core_tk_irqen = 1,
51 .states[0] = ARM_CPUIDLE_WFI_STATE, 48 .states[0] = ARM_CPUIDLE_WFI_STATE,
52 .states[1] = { 49 .states[1] = {
53 .enter = at91_enter_idle, 50 .enter = at91_enter_idle,
@@ -61,20 +58,9 @@ static struct cpuidle_driver at91_idle_driver = {
61}; 58};
62 59
63/* Initialize CPU idle by registering the idle states */ 60/* Initialize CPU idle by registering the idle states */
64static int at91_init_cpuidle(void) 61static int __init at91_init_cpuidle(void)
65{ 62{
66 struct cpuidle_device *device; 63 return cpuidle_register(&at91_idle_driver, NULL);
67
68 device = &per_cpu(at91_cpuidle_device, smp_processor_id());
69 device->state_count = AT91_MAX_STATES;
70
71 cpuidle_register_driver(&at91_idle_driver);
72
73 if (cpuidle_register_device(device)) {
74 printk(KERN_ERR "at91_init_cpuidle: Failed registering\n");
75 return -EIO;
76 }
77 return 0;
78} 64}
79 65
80device_initcall(at91_init_cpuidle); 66device_initcall(at91_init_cpuidle);
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index bf02471d7e7c..f11289519c39 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -6,6 +6,7 @@ config ARCH_BCM
6 select ARM_ERRATA_764369 if SMP 6 select ARM_ERRATA_764369 if SMP
7 select ARM_GIC 7 select ARM_GIC
8 select CPU_V7 8 select CPU_V7
9 select CLKSRC_OF
9 select GENERIC_CLOCKEVENTS 10 select GENERIC_CLOCKEVENTS
10 select GENERIC_TIME 11 select GENERIC_TIME
11 select GPIO_BCM 12 select GPIO_BCM
diff --git a/arch/arm/mach-bcm/board_bcm.c b/arch/arm/mach-bcm/board_bcm.c
index f0f9abafad29..259593540477 100644
--- a/arch/arm/mach-bcm/board_bcm.c
+++ b/arch/arm/mach-bcm/board_bcm.c
@@ -16,14 +16,11 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/irqchip.h> 18#include <linux/irqchip.h>
19#include <linux/clocksource.h>
19 20
20#include <asm/mach/arch.h> 21#include <asm/mach/arch.h>
21#include <asm/mach/time.h> 22#include <asm/mach/time.h>
22 23
23static void timer_init(void)
24{
25}
26
27 24
28static void __init board_init(void) 25static void __init board_init(void)
29{ 26{
@@ -35,7 +32,7 @@ static const char * const bcm11351_dt_compat[] = { "bcm,bcm11351", NULL, };
35 32
36DT_MACHINE_START(BCM11351_DT, "Broadcom Application Processor") 33DT_MACHINE_START(BCM11351_DT, "Broadcom Application Processor")
37 .init_irq = irqchip_init, 34 .init_irq = irqchip_init,
38 .init_time = timer_init, 35 .init_time = clocksource_of_init,
39 .init_machine = board_init, 36 .init_machine = board_init,
40 .dt_compat = bcm11351_dt_compat, 37 .dt_compat = bcm11351_dt_compat,
41MACHINE_END 38MACHINE_END
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index fb5c1aa98a63..dd1ffccc75e9 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -37,7 +37,6 @@ obj-$(CONFIG_MACH_MITYOMAPL138) += board-mityomapl138.o
37obj-$(CONFIG_MACH_OMAPL138_HAWKBOARD) += board-omapl138-hawk.o 37obj-$(CONFIG_MACH_OMAPL138_HAWKBOARD) += board-omapl138-hawk.o
38 38
39# Power Management 39# Power Management
40obj-$(CONFIG_CPU_FREQ) += cpufreq.o
41obj-$(CONFIG_CPU_IDLE) += cpuidle.o 40obj-$(CONFIG_CPU_IDLE) += cpuidle.o
42obj-$(CONFIG_SUSPEND) += pm.o sleep.o 41obj-$(CONFIG_SUSPEND) += pm.o sleep.o
43obj-$(CONFIG_HAVE_CLK) += pm_domain.o 42obj-$(CONFIG_HAVE_CLK) += pm_domain.o
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 147b8e1a4407..886481c12173 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -242,6 +242,73 @@ static struct vpfe_config vpfe_cfg = {
242 .ccdc = "DM355 CCDC", 242 .ccdc = "DM355 CCDC",
243}; 243};
244 244
245/* venc standards timings */
246static struct vpbe_enc_mode_info dm355evm_enc_preset_timing[] = {
247 {
248 .name = "ntsc",
249 .timings_type = VPBE_ENC_STD,
250 .std_id = V4L2_STD_NTSC,
251 .interlaced = 1,
252 .xres = 720,
253 .yres = 480,
254 .aspect = {11, 10},
255 .fps = {30000, 1001},
256 .left_margin = 0x79,
257 .upper_margin = 0x10,
258 },
259 {
260 .name = "pal",
261 .timings_type = VPBE_ENC_STD,
262 .std_id = V4L2_STD_PAL,
263 .interlaced = 1,
264 .xres = 720,
265 .yres = 576,
266 .aspect = {54, 59},
267 .fps = {25, 1},
268 .left_margin = 0x7E,
269 .upper_margin = 0x16
270 },
271};
272
273#define VENC_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL)
274
275/*
276 * The outputs available from VPBE + ecnoders. Keep the
277 * the order same as that of encoders. First those from venc followed by that
278 * from encoders. Index in the output refers to index on a particular encoder.
279 * Driver uses this index to pass it to encoder when it supports more than
280 * one output. Application uses index of the array to set an output.
281 */
282static struct vpbe_output dm355evm_vpbe_outputs[] = {
283 {
284 .output = {
285 .index = 0,
286 .name = "Composite",
287 .type = V4L2_OUTPUT_TYPE_ANALOG,
288 .std = VENC_STD_ALL,
289 .capabilities = V4L2_OUT_CAP_STD,
290 },
291 .subdev_name = DM355_VPBE_VENC_SUBDEV_NAME,
292 .default_mode = "ntsc",
293 .num_modes = ARRAY_SIZE(dm355evm_enc_preset_timing),
294 .modes = dm355evm_enc_preset_timing,
295 .if_params = V4L2_MBUS_FMT_FIXED,
296 },
297};
298
299static struct vpbe_config dm355evm_display_cfg = {
300 .module_name = "dm355-vpbe-display",
301 .i2c_adapter_id = 1,
302 .osd = {
303 .module_name = DM355_VPBE_OSD_SUBDEV_NAME,
304 },
305 .venc = {
306 .module_name = DM355_VPBE_VENC_SUBDEV_NAME,
307 },
308 .num_outputs = ARRAY_SIZE(dm355evm_vpbe_outputs),
309 .outputs = dm355evm_vpbe_outputs,
310};
311
245static struct platform_device *davinci_evm_devices[] __initdata = { 312static struct platform_device *davinci_evm_devices[] __initdata = {
246 &dm355evm_dm9000, 313 &dm355evm_dm9000,
247 &davinci_nand_device, 314 &davinci_nand_device,
@@ -253,8 +320,6 @@ static struct davinci_uart_config uart_config __initdata = {
253 320
254static void __init dm355_evm_map_io(void) 321static void __init dm355_evm_map_io(void)
255{ 322{
256 /* setup input configuration for VPFE input devices */
257 dm355_set_vpfe_config(&vpfe_cfg);
258 dm355_init(); 323 dm355_init();
259} 324}
260 325
@@ -344,6 +409,8 @@ static __init void dm355_evm_init(void)
344 davinci_setup_mmc(0, &dm355evm_mmc_config); 409 davinci_setup_mmc(0, &dm355evm_mmc_config);
345 davinci_setup_mmc(1, &dm355evm_mmc_config); 410 davinci_setup_mmc(1, &dm355evm_mmc_config);
346 411
412 dm355_init_video(&vpfe_cfg, &dm355evm_display_cfg);
413
347 dm355_init_spi0(BIT(0), dm355_evm_spi_info, 414 dm355_init_spi0(BIT(0), dm355_evm_spi_info,
348 ARRAY_SIZE(dm355_evm_spi_info)); 415 ARRAY_SIZE(dm355_evm_spi_info));
349 416
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index c2d4958a0cb6..2a6674356585 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -27,6 +27,7 @@
27#include <linux/input.h> 27#include <linux/input.h>
28#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
29#include <linux/spi/eeprom.h> 29#include <linux/spi/eeprom.h>
30#include <linux/v4l2-dv-timings.h>
30 31
31#include <asm/mach-types.h> 32#include <asm/mach-types.h>
32#include <asm/mach/arch.h> 33#include <asm/mach/arch.h>
@@ -39,6 +40,7 @@
39#include <linux/platform_data/mtd-davinci.h> 40#include <linux/platform_data/mtd-davinci.h>
40#include <linux/platform_data/keyscan-davinci.h> 41#include <linux/platform_data/keyscan-davinci.h>
41 42
43#include <media/ths7303.h>
42#include <media/tvp514x.h> 44#include <media/tvp514x.h>
43 45
44#include "davinci.h" 46#include "davinci.h"
@@ -374,6 +376,166 @@ static struct vpfe_config vpfe_cfg = {
374 .ccdc = "ISIF", 376 .ccdc = "ISIF",
375}; 377};
376 378
379/* venc standards timings */
380static struct vpbe_enc_mode_info dm365evm_enc_std_timing[] = {
381 {
382 .name = "ntsc",
383 .timings_type = VPBE_ENC_STD,
384 .std_id = V4L2_STD_NTSC,
385 .interlaced = 1,
386 .xres = 720,
387 .yres = 480,
388 .aspect = {11, 10},
389 .fps = {30000, 1001},
390 .left_margin = 0x79,
391 .upper_margin = 0x10,
392 },
393 {
394 .name = "pal",
395 .timings_type = VPBE_ENC_STD,
396 .std_id = V4L2_STD_PAL,
397 .interlaced = 1,
398 .xres = 720,
399 .yres = 576,
400 .aspect = {54, 59},
401 .fps = {25, 1},
402 .left_margin = 0x7E,
403 .upper_margin = 0x16,
404 },
405};
406
407/* venc dv timings */
408static struct vpbe_enc_mode_info dm365evm_enc_preset_timing[] = {
409 {
410 .name = "480p59_94",
411 .timings_type = VPBE_ENC_DV_TIMINGS,
412 .dv_timings = V4L2_DV_BT_CEA_720X480P59_94,
413 .interlaced = 0,
414 .xres = 720,
415 .yres = 480,
416 .aspect = {1, 1},
417 .fps = {5994, 100},
418 .left_margin = 0x8F,
419 .upper_margin = 0x2D,
420 },
421 {
422 .name = "576p50",
423 .timings_type = VPBE_ENC_DV_TIMINGS,
424 .dv_timings = V4L2_DV_BT_CEA_720X576P50,
425 .interlaced = 0,
426 .xres = 720,
427 .yres = 576,
428 .aspect = {1, 1},
429 .fps = {50, 1},
430 .left_margin = 0x8C,
431 .upper_margin = 0x36,
432 },
433 {
434 .name = "720p60",
435 .timings_type = VPBE_ENC_DV_TIMINGS,
436 .dv_timings = V4L2_DV_BT_CEA_1280X720P60,
437 .interlaced = 0,
438 .xres = 1280,
439 .yres = 720,
440 .aspect = {1, 1},
441 .fps = {60, 1},
442 .left_margin = 0x117,
443 .right_margin = 70,
444 .upper_margin = 38,
445 .lower_margin = 3,
446 .hsync_len = 80,
447 .vsync_len = 5,
448 },
449 {
450 .name = "1080i60",
451 .timings_type = VPBE_ENC_DV_TIMINGS,
452 .dv_timings = V4L2_DV_BT_CEA_1920X1080I60,
453 .interlaced = 1,
454 .xres = 1920,
455 .yres = 1080,
456 .aspect = {1, 1},
457 .fps = {30, 1},
458 .left_margin = 0xc9,
459 .right_margin = 80,
460 .upper_margin = 30,
461 .lower_margin = 3,
462 .hsync_len = 88,
463 .vsync_len = 5,
464 },
465};
466
467#define VENC_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL)
468
469/*
470 * The outputs available from VPBE + ecnoders. Keep the
471 * the order same as that of encoders. First those from venc followed by that
472 * from encoders. Index in the output refers to index on a particular
473 * encoder.Driver uses this index to pass it to encoder when it supports more
474 * than one output. Application uses index of the array to set an output.
475 */
476static struct vpbe_output dm365evm_vpbe_outputs[] = {
477 {
478 .output = {
479 .index = 0,
480 .name = "Composite",
481 .type = V4L2_OUTPUT_TYPE_ANALOG,
482 .std = VENC_STD_ALL,
483 .capabilities = V4L2_OUT_CAP_STD,
484 },
485 .subdev_name = DM365_VPBE_VENC_SUBDEV_NAME,
486 .default_mode = "ntsc",
487 .num_modes = ARRAY_SIZE(dm365evm_enc_std_timing),
488 .modes = dm365evm_enc_std_timing,
489 .if_params = V4L2_MBUS_FMT_FIXED,
490 },
491 {
492 .output = {
493 .index = 1,
494 .name = "Component",
495 .type = V4L2_OUTPUT_TYPE_ANALOG,
496 .capabilities = V4L2_OUT_CAP_DV_TIMINGS,
497 },
498 .subdev_name = DM365_VPBE_VENC_SUBDEV_NAME,
499 .default_mode = "480p59_94",
500 .num_modes = ARRAY_SIZE(dm365evm_enc_preset_timing),
501 .modes = dm365evm_enc_preset_timing,
502 .if_params = V4L2_MBUS_FMT_FIXED,
503 },
504};
505
506/*
507 * Amplifiers on the board
508 */
509struct ths7303_platform_data ths7303_pdata = {
510 .ch_1 = 3,
511 .ch_2 = 3,
512 .ch_3 = 3,
513 .init_enable = 1,
514};
515
516static struct amp_config_info vpbe_amp = {
517 .module_name = "ths7303",
518 .is_i2c = 1,
519 .board_info = {
520 I2C_BOARD_INFO("ths7303", 0x2c),
521 .platform_data = &ths7303_pdata,
522 }
523};
524
525static struct vpbe_config dm365evm_display_cfg = {
526 .module_name = "dm365-vpbe-display",
527 .i2c_adapter_id = 1,
528 .amp = &vpbe_amp,
529 .osd = {
530 .module_name = DM365_VPBE_OSD_SUBDEV_NAME,
531 },
532 .venc = {
533 .module_name = DM365_VPBE_VENC_SUBDEV_NAME,
534 },
535 .num_outputs = ARRAY_SIZE(dm365evm_vpbe_outputs),
536 .outputs = dm365evm_vpbe_outputs,
537};
538
377static void __init evm_init_i2c(void) 539static void __init evm_init_i2c(void)
378{ 540{
379 davinci_init_i2c(&i2c_pdata); 541 davinci_init_i2c(&i2c_pdata);
@@ -564,8 +726,6 @@ static struct davinci_uart_config uart_config __initdata = {
564 726
565static void __init dm365_evm_map_io(void) 727static void __init dm365_evm_map_io(void)
566{ 728{
567 /* setup input configuration for VPFE input devices */
568 dm365_set_vpfe_config(&vpfe_cfg);
569 dm365_init(); 729 dm365_init();
570} 730}
571 731
@@ -597,6 +757,8 @@ static __init void dm365_evm_init(void)
597 757
598 davinci_setup_mmc(0, &dm365evm_mmc_config); 758 davinci_setup_mmc(0, &dm365evm_mmc_config);
599 759
760 dm365_init_video(&vpfe_cfg, &dm365evm_display_cfg);
761
600 /* maybe setup mmc1/etc ... _after_ mmc0 */ 762 /* maybe setup mmc1/etc ... _after_ mmc0 */
601 evm_init_cpld(); 763 evm_init_cpld();
602 764
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 71735e7797cc..745280d4144c 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -622,7 +622,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_std_timing[] = {
622 { 622 {
623 .name = "ntsc", 623 .name = "ntsc",
624 .timings_type = VPBE_ENC_STD, 624 .timings_type = VPBE_ENC_STD,
625 .std_id = V4L2_STD_525_60, 625 .std_id = V4L2_STD_NTSC,
626 .interlaced = 1, 626 .interlaced = 1,
627 .xres = 720, 627 .xres = 720,
628 .yres = 480, 628 .yres = 480,
@@ -634,7 +634,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_std_timing[] = {
634 { 634 {
635 .name = "pal", 635 .name = "pal",
636 .timings_type = VPBE_ENC_STD, 636 .timings_type = VPBE_ENC_STD,
637 .std_id = V4L2_STD_625_50, 637 .std_id = V4L2_STD_PAL,
638 .interlaced = 1, 638 .interlaced = 1,
639 .xres = 720, 639 .xres = 720,
640 .yres = 576, 640 .yres = 576,
@@ -649,7 +649,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_std_timing[] = {
649static struct vpbe_enc_mode_info dm644xevm_enc_preset_timing[] = { 649static struct vpbe_enc_mode_info dm644xevm_enc_preset_timing[] = {
650 { 650 {
651 .name = "480p59_94", 651 .name = "480p59_94",
652 .timings_type = VPBE_ENC_CUSTOM_TIMINGS, 652 .timings_type = VPBE_ENC_DV_TIMINGS,
653 .dv_timings = V4L2_DV_BT_CEA_720X480P59_94, 653 .dv_timings = V4L2_DV_BT_CEA_720X480P59_94,
654 .interlaced = 0, 654 .interlaced = 0,
655 .xres = 720, 655 .xres = 720,
@@ -661,7 +661,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_preset_timing[] = {
661 }, 661 },
662 { 662 {
663 .name = "576p50", 663 .name = "576p50",
664 .timings_type = VPBE_ENC_CUSTOM_TIMINGS, 664 .timings_type = VPBE_ENC_DV_TIMINGS,
665 .dv_timings = V4L2_DV_BT_CEA_720X576P50, 665 .dv_timings = V4L2_DV_BT_CEA_720X576P50,
666 .interlaced = 0, 666 .interlaced = 0,
667 .xres = 720, 667 .xres = 720,
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index de7adff324dc..fc4871ac1c2c 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -514,7 +514,7 @@ static const struct vpif_output dm6467_ch0_outputs[] = {
514 .index = 1, 514 .index = 1,
515 .name = "Component", 515 .name = "Component",
516 .type = V4L2_OUTPUT_TYPE_ANALOG, 516 .type = V4L2_OUTPUT_TYPE_ANALOG,
517 .capabilities = V4L2_OUT_CAP_CUSTOM_TIMINGS, 517 .capabilities = V4L2_OUT_CAP_DV_TIMINGS,
518 }, 518 },
519 .subdev_name = "adv7343", 519 .subdev_name = "adv7343",
520 .output_route = ADV7343_COMPONENT_ID, 520 .output_route = ADV7343_COMPONENT_ID,
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
deleted file mode 100644
index 4729eaab0f40..000000000000
--- a/arch/arm/mach-davinci/cpufreq.c
+++ /dev/null
@@ -1,248 +0,0 @@
1/*
2 * CPU frequency scaling for DaVinci
3 *
4 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
7 *
8 * Copyright (C) 2005 Nokia Corporation
9 * Written by Tony Lindgren <tony@atomide.com>
10 *
11 * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
12 *
13 * Copyright (C) 2007-2008 Texas Instruments, Inc.
14 * Updated to support OMAP3
15 * Rajendra Nayak <rnayak@ti.com>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License version 2 as
19 * published by the Free Software Foundation.
20 */
21#include <linux/types.h>
22#include <linux/cpufreq.h>
23#include <linux/init.h>
24#include <linux/err.h>
25#include <linux/clk.h>
26#include <linux/platform_device.h>
27#include <linux/export.h>
28
29#include <mach/hardware.h>
30#include <mach/cpufreq.h>
31#include <mach/common.h>
32
33#include "clock.h"
34
35struct davinci_cpufreq {
36 struct device *dev;
37 struct clk *armclk;
38 struct clk *asyncclk;
39 unsigned long asyncrate;
40};
41static struct davinci_cpufreq cpufreq;
42
43static int davinci_verify_speed(struct cpufreq_policy *policy)
44{
45 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
46 struct cpufreq_frequency_table *freq_table = pdata->freq_table;
47 struct clk *armclk = cpufreq.armclk;
48
49 if (freq_table)
50 return cpufreq_frequency_table_verify(policy, freq_table);
51
52 if (policy->cpu)
53 return -EINVAL;
54
55 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
56 policy->cpuinfo.max_freq);
57
58 policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
59 policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
60 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
61 policy->cpuinfo.max_freq);
62 return 0;
63}
64
65static unsigned int davinci_getspeed(unsigned int cpu)
66{
67 if (cpu)
68 return 0;
69
70 return clk_get_rate(cpufreq.armclk) / 1000;
71}
72
73static int davinci_target(struct cpufreq_policy *policy,
74 unsigned int target_freq, unsigned int relation)
75{
76 int ret = 0;
77 unsigned int idx;
78 struct cpufreq_freqs freqs;
79 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
80 struct clk *armclk = cpufreq.armclk;
81
82 /*
83 * Ensure desired rate is within allowed range. Some govenors
84 * (ondemand) will just pass target_freq=0 to get the minimum.
85 */
86 if (target_freq < policy->cpuinfo.min_freq)
87 target_freq = policy->cpuinfo.min_freq;
88 if (target_freq > policy->cpuinfo.max_freq)
89 target_freq = policy->cpuinfo.max_freq;
90
91 freqs.old = davinci_getspeed(0);
92 freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000;
93 freqs.cpu = 0;
94
95 if (freqs.old == freqs.new)
96 return ret;
97
98 dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
99
100 ret = cpufreq_frequency_table_target(policy, pdata->freq_table,
101 freqs.new, relation, &idx);
102 if (ret)
103 return -EINVAL;
104
105 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
106
107 /* if moving to higher frequency, up the voltage beforehand */
108 if (pdata->set_voltage && freqs.new > freqs.old) {
109 ret = pdata->set_voltage(idx);
110 if (ret)
111 goto out;
112 }
113
114 ret = clk_set_rate(armclk, idx);
115 if (ret)
116 goto out;
117
118 if (cpufreq.asyncclk) {
119 ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
120 if (ret)
121 goto out;
122 }
123
124 /* if moving to lower freq, lower the voltage after lowering freq */
125 if (pdata->set_voltage && freqs.new < freqs.old)
126 pdata->set_voltage(idx);
127
128out:
129 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
130
131 return ret;
132}
133
134static int davinci_cpu_init(struct cpufreq_policy *policy)
135{
136 int result = 0;
137 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
138 struct cpufreq_frequency_table *freq_table = pdata->freq_table;
139
140 if (policy->cpu != 0)
141 return -EINVAL;
142
143 /* Finish platform specific initialization */
144 if (pdata->init) {
145 result = pdata->init();
146 if (result)
147 return result;
148 }
149
150 policy->cur = policy->min = policy->max = davinci_getspeed(0);
151
152 if (freq_table) {
153 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
154 if (!result)
155 cpufreq_frequency_table_get_attr(freq_table,
156 policy->cpu);
157 } else {
158 policy->cpuinfo.min_freq = policy->min;
159 policy->cpuinfo.max_freq = policy->max;
160 }
161
162 policy->min = policy->cpuinfo.min_freq;
163 policy->max = policy->cpuinfo.max_freq;
164 policy->cur = davinci_getspeed(0);
165
166 /*
167 * Time measurement across the target() function yields ~1500-1800us
168 * time taken with no drivers on notification list.
169 * Setting the latency to 2000 us to accommodate addition of drivers
170 * to pre/post change notification list.
171 */
172 policy->cpuinfo.transition_latency = 2000 * 1000;
173 return 0;
174}
175
176static int davinci_cpu_exit(struct cpufreq_policy *policy)
177{
178 cpufreq_frequency_table_put_attr(policy->cpu);
179 return 0;
180}
181
182static struct freq_attr *davinci_cpufreq_attr[] = {
183 &cpufreq_freq_attr_scaling_available_freqs,
184 NULL,
185};
186
187static struct cpufreq_driver davinci_driver = {
188 .flags = CPUFREQ_STICKY,
189 .verify = davinci_verify_speed,
190 .target = davinci_target,
191 .get = davinci_getspeed,
192 .init = davinci_cpu_init,
193 .exit = davinci_cpu_exit,
194 .name = "davinci",
195 .attr = davinci_cpufreq_attr,
196};
197
198static int __init davinci_cpufreq_probe(struct platform_device *pdev)
199{
200 struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
201 struct clk *asyncclk;
202
203 if (!pdata)
204 return -EINVAL;
205 if (!pdata->freq_table)
206 return -EINVAL;
207
208 cpufreq.dev = &pdev->dev;
209
210 cpufreq.armclk = clk_get(NULL, "arm");
211 if (IS_ERR(cpufreq.armclk)) {
212 dev_err(cpufreq.dev, "Unable to get ARM clock\n");
213 return PTR_ERR(cpufreq.armclk);
214 }
215
216 asyncclk = clk_get(cpufreq.dev, "async");
217 if (!IS_ERR(asyncclk)) {
218 cpufreq.asyncclk = asyncclk;
219 cpufreq.asyncrate = clk_get_rate(asyncclk);
220 }
221
222 return cpufreq_register_driver(&davinci_driver);
223}
224
225static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
226{
227 clk_put(cpufreq.armclk);
228
229 if (cpufreq.asyncclk)
230 clk_put(cpufreq.asyncclk);
231
232 return cpufreq_unregister_driver(&davinci_driver);
233}
234
235static struct platform_driver davinci_cpufreq_driver = {
236 .driver = {
237 .name = "cpufreq-davinci",
238 .owner = THIS_MODULE,
239 },
240 .remove = __exit_p(davinci_cpufreq_remove),
241};
242
243int __init davinci_cpufreq_init(void)
244{
245 return platform_driver_probe(&davinci_cpufreq_driver,
246 davinci_cpufreq_probe);
247}
248
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 5ac9e9384b15..36aef3a7dedb 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -25,7 +25,6 @@
25 25
26#define DAVINCI_CPUIDLE_MAX_STATES 2 26#define DAVINCI_CPUIDLE_MAX_STATES 2
27 27
28static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
29static void __iomem *ddr2_reg_base; 28static void __iomem *ddr2_reg_base;
30static bool ddr2_pdown; 29static bool ddr2_pdown;
31 30
@@ -50,14 +49,10 @@ static void davinci_save_ddr_power(int enter, bool pdown)
50 49
51/* Actual code that puts the SoC in different idle states */ 50/* Actual code that puts the SoC in different idle states */
52static int davinci_enter_idle(struct cpuidle_device *dev, 51static int davinci_enter_idle(struct cpuidle_device *dev,
53 struct cpuidle_driver *drv, 52 struct cpuidle_driver *drv, int index)
54 int index)
55{ 53{
56 davinci_save_ddr_power(1, ddr2_pdown); 54 davinci_save_ddr_power(1, ddr2_pdown);
57 55 cpu_do_idle();
58 index = cpuidle_wrap_enter(dev, drv, index,
59 arm_cpuidle_simple_enter);
60
61 davinci_save_ddr_power(0, ddr2_pdown); 56 davinci_save_ddr_power(0, ddr2_pdown);
62 57
63 return index; 58 return index;
@@ -66,7 +61,6 @@ static int davinci_enter_idle(struct cpuidle_device *dev,
66static struct cpuidle_driver davinci_idle_driver = { 61static struct cpuidle_driver davinci_idle_driver = {
67 .name = "cpuidle-davinci", 62 .name = "cpuidle-davinci",
68 .owner = THIS_MODULE, 63 .owner = THIS_MODULE,
69 .en_core_tk_irqen = 1,
70 .states[0] = ARM_CPUIDLE_WFI_STATE, 64 .states[0] = ARM_CPUIDLE_WFI_STATE,
71 .states[1] = { 65 .states[1] = {
72 .enter = davinci_enter_idle, 66 .enter = davinci_enter_idle,
@@ -81,12 +75,8 @@ static struct cpuidle_driver davinci_idle_driver = {
81 75
82static int __init davinci_cpuidle_probe(struct platform_device *pdev) 76static int __init davinci_cpuidle_probe(struct platform_device *pdev)
83{ 77{
84 int ret;
85 struct cpuidle_device *device;
86 struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; 78 struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
87 79
88 device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
89
90 if (!pdata) { 80 if (!pdata) {
91 dev_err(&pdev->dev, "cannot get platform data\n"); 81 dev_err(&pdev->dev, "cannot get platform data\n");
92 return -ENOENT; 82 return -ENOENT;
@@ -96,20 +86,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
96 86
97 ddr2_pdown = pdata->ddr2_pdown; 87 ddr2_pdown = pdata->ddr2_pdown;
98 88
99 ret = cpuidle_register_driver(&davinci_idle_driver); 89 return cpuidle_register(&davinci_idle_driver, NULL);
100 if (ret) {
101 dev_err(&pdev->dev, "failed to register driver\n");
102 return ret;
103 }
104
105 ret = cpuidle_register_device(device);
106 if (ret) {
107 dev_err(&pdev->dev, "failed to register device\n");
108 cpuidle_unregister_driver(&davinci_idle_driver);
109 return ret;
110 }
111
112 return 0;
113} 90}
114 91
115static struct platform_driver davinci_cpuidle_driver = { 92static struct platform_driver davinci_cpuidle_driver = {
diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h
index 12d544befcfa..1ab3df423dac 100644
--- a/arch/arm/mach-davinci/davinci.h
+++ b/arch/arm/mach-davinci/davinci.h
@@ -36,12 +36,19 @@
36#include <media/davinci/vpbe_osd.h> 36#include <media/davinci/vpbe_osd.h>
37 37
38#define DAVINCI_SYSTEM_MODULE_BASE 0x01c40000 38#define DAVINCI_SYSTEM_MODULE_BASE 0x01c40000
39#define SYSMOD_VDAC_CONFIG 0x2c
39#define SYSMOD_VIDCLKCTL 0x38 40#define SYSMOD_VIDCLKCTL 0x38
40#define SYSMOD_VPSS_CLKCTL 0x44 41#define SYSMOD_VPSS_CLKCTL 0x44
41#define SYSMOD_VDD3P3VPWDN 0x48 42#define SYSMOD_VDD3P3VPWDN 0x48
42#define SYSMOD_VSCLKDIS 0x6c 43#define SYSMOD_VSCLKDIS 0x6c
43#define SYSMOD_PUPDCTL1 0x7c 44#define SYSMOD_PUPDCTL1 0x7c
44 45
46/* VPSS CLKCTL bit definitions */
47#define VPSS_MUXSEL_EXTCLK_ENABLE BIT(1)
48#define VPSS_VENCCLKEN_ENABLE BIT(3)
49#define VPSS_DACCLKEN_ENABLE BIT(4)
50#define VPSS_PLLC2SYSCLK5_ENABLE BIT(5)
51
45extern void __iomem *davinci_sysmod_base; 52extern void __iomem *davinci_sysmod_base;
46#define DAVINCI_SYSMOD_VIRT(x) (davinci_sysmod_base + (x)) 53#define DAVINCI_SYSMOD_VIRT(x) (davinci_sysmod_base + (x))
47void davinci_map_sysmod(void); 54void davinci_map_sysmod(void);
@@ -74,7 +81,7 @@ void __init dm355_init(void);
74void dm355_init_spi0(unsigned chipselect_mask, 81void dm355_init_spi0(unsigned chipselect_mask,
75 const struct spi_board_info *info, unsigned len); 82 const struct spi_board_info *info, unsigned len);
76void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata); 83void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata);
77void dm355_set_vpfe_config(struct vpfe_config *cfg); 84int dm355_init_video(struct vpfe_config *, struct vpbe_config *);
78 85
79/* DM365 function declarations */ 86/* DM365 function declarations */
80void __init dm365_init(void); 87void __init dm365_init(void);
@@ -84,7 +91,7 @@ void __init dm365_init_ks(struct davinci_ks_platform_data *pdata);
84void __init dm365_init_rtc(void); 91void __init dm365_init_rtc(void);
85void dm365_init_spi0(unsigned chipselect_mask, 92void dm365_init_spi0(unsigned chipselect_mask,
86 const struct spi_board_info *info, unsigned len); 93 const struct spi_board_info *info, unsigned len);
87void dm365_set_vpfe_config(struct vpfe_config *cfg); 94int dm365_init_video(struct vpfe_config *, struct vpbe_config *);
88 95
89/* DM644x function declarations */ 96/* DM644x function declarations */
90void __init dm644x_init(void); 97void __init dm644x_init(void);
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index b49c3b77d55e..bf9a9d4ad9f5 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -35,6 +35,8 @@
35#include "asp.h" 35#include "asp.h"
36 36
37#define DM355_UART2_BASE (IO_PHYS + 0x206000) 37#define DM355_UART2_BASE (IO_PHYS + 0x206000)
38#define DM355_OSD_BASE (IO_PHYS + 0x70200)
39#define DM355_VENC_BASE (IO_PHYS + 0x70400)
38 40
39/* 41/*
40 * Device specific clocks 42 * Device specific clocks
@@ -345,8 +347,8 @@ static struct clk_lookup dm355_clks[] = {
345 CLK(NULL, "pll1_aux", &pll1_aux_clk), 347 CLK(NULL, "pll1_aux", &pll1_aux_clk),
346 CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp), 348 CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp),
347 CLK(NULL, "vpss_dac", &vpss_dac_clk), 349 CLK(NULL, "vpss_dac", &vpss_dac_clk),
348 CLK(NULL, "vpss_master", &vpss_master_clk), 350 CLK("vpss", "master", &vpss_master_clk),
349 CLK(NULL, "vpss_slave", &vpss_slave_clk), 351 CLK("vpss", "slave", &vpss_slave_clk),
350 CLK(NULL, "clkout1", &clkout1_clk), 352 CLK(NULL, "clkout1", &clkout1_clk),
351 CLK(NULL, "clkout2", &clkout2_clk), 353 CLK(NULL, "clkout2", &clkout2_clk),
352 CLK(NULL, "pll2", &pll2_clk), 354 CLK(NULL, "pll2", &pll2_clk),
@@ -744,11 +746,146 @@ static struct platform_device vpfe_capture_dev = {
744 }, 746 },
745}; 747};
746 748
747void dm355_set_vpfe_config(struct vpfe_config *cfg) 749static struct resource dm355_osd_resources[] = {
750 {
751 .start = DM355_OSD_BASE,
752 .end = DM355_OSD_BASE + 0x17f,
753 .flags = IORESOURCE_MEM,
754 },
755};
756
757static struct platform_device dm355_osd_dev = {
758 .name = DM355_VPBE_OSD_SUBDEV_NAME,
759 .id = -1,
760 .num_resources = ARRAY_SIZE(dm355_osd_resources),
761 .resource = dm355_osd_resources,
762 .dev = {
763 .dma_mask = &vpfe_capture_dma_mask,
764 .coherent_dma_mask = DMA_BIT_MASK(32),
765 },
766};
767
768static struct resource dm355_venc_resources[] = {
769 {
770 .start = IRQ_VENCINT,
771 .end = IRQ_VENCINT,
772 .flags = IORESOURCE_IRQ,
773 },
774 /* venc registers io space */
775 {
776 .start = DM355_VENC_BASE,
777 .end = DM355_VENC_BASE + 0x17f,
778 .flags = IORESOURCE_MEM,
779 },
780 /* VDAC config register io space */
781 {
782 .start = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG,
783 .end = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG + 3,
784 .flags = IORESOURCE_MEM,
785 },
786};
787
788static struct resource dm355_v4l2_disp_resources[] = {
789 {
790 .start = IRQ_VENCINT,
791 .end = IRQ_VENCINT,
792 .flags = IORESOURCE_IRQ,
793 },
794 /* venc registers io space */
795 {
796 .start = DM355_VENC_BASE,
797 .end = DM355_VENC_BASE + 0x17f,
798 .flags = IORESOURCE_MEM,
799 },
800};
801
802static int dm355_vpbe_setup_pinmux(enum v4l2_mbus_pixelcode if_type,
803 int field)
804{
805 switch (if_type) {
806 case V4L2_MBUS_FMT_SGRBG8_1X8:
807 davinci_cfg_reg(DM355_VOUT_FIELD_G70);
808 break;
809 case V4L2_MBUS_FMT_YUYV10_1X20:
810 if (field)
811 davinci_cfg_reg(DM355_VOUT_FIELD);
812 else
813 davinci_cfg_reg(DM355_VOUT_FIELD_G70);
814 break;
815 default:
816 return -EINVAL;
817 }
818
819 davinci_cfg_reg(DM355_VOUT_COUTL_EN);
820 davinci_cfg_reg(DM355_VOUT_COUTH_EN);
821
822 return 0;
823}
824
825static int dm355_venc_setup_clock(enum vpbe_enc_timings_type type,
826 unsigned int pclock)
748{ 827{
749 vpfe_capture_dev.dev.platform_data = cfg; 828 void __iomem *vpss_clk_ctrl_reg;
829
830 vpss_clk_ctrl_reg = DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL);
831
832 switch (type) {
833 case VPBE_ENC_STD:
834 writel(VPSS_DACCLKEN_ENABLE | VPSS_VENCCLKEN_ENABLE,
835 vpss_clk_ctrl_reg);
836 break;
837 case VPBE_ENC_DV_TIMINGS:
838 if (pclock > 27000000)
839 /*
840 * For HD, use external clock source since we cannot
841 * support HD mode with internal clocks.
842 */
843 writel(VPSS_MUXSEL_EXTCLK_ENABLE, vpss_clk_ctrl_reg);
844 break;
845 default:
846 return -EINVAL;
847 }
848
849 return 0;
750} 850}
751 851
852static struct platform_device dm355_vpbe_display = {
853 .name = "vpbe-v4l2",
854 .id = -1,
855 .num_resources = ARRAY_SIZE(dm355_v4l2_disp_resources),
856 .resource = dm355_v4l2_disp_resources,
857 .dev = {
858 .dma_mask = &vpfe_capture_dma_mask,
859 .coherent_dma_mask = DMA_BIT_MASK(32),
860 },
861};
862
863struct venc_platform_data dm355_venc_pdata = {
864 .setup_pinmux = dm355_vpbe_setup_pinmux,
865 .setup_clock = dm355_venc_setup_clock,
866};
867
868static struct platform_device dm355_venc_dev = {
869 .name = DM355_VPBE_VENC_SUBDEV_NAME,
870 .id = -1,
871 .num_resources = ARRAY_SIZE(dm355_venc_resources),
872 .resource = dm355_venc_resources,
873 .dev = {
874 .dma_mask = &vpfe_capture_dma_mask,
875 .coherent_dma_mask = DMA_BIT_MASK(32),
876 .platform_data = (void *)&dm355_venc_pdata,
877 },
878};
879
880static struct platform_device dm355_vpbe_dev = {
881 .name = "vpbe_controller",
882 .id = -1,
883 .dev = {
884 .dma_mask = &vpfe_capture_dma_mask,
885 .coherent_dma_mask = DMA_BIT_MASK(32),
886 },
887};
888
752/*----------------------------------------------------------------------*/ 889/*----------------------------------------------------------------------*/
753 890
754static struct map_desc dm355_io_desc[] = { 891static struct map_desc dm355_io_desc[] = {
@@ -868,19 +1005,36 @@ void __init dm355_init(void)
868 davinci_map_sysmod(); 1005 davinci_map_sysmod();
869} 1006}
870 1007
1008int __init dm355_init_video(struct vpfe_config *vpfe_cfg,
1009 struct vpbe_config *vpbe_cfg)
1010{
1011 if (vpfe_cfg || vpbe_cfg)
1012 platform_device_register(&dm355_vpss_device);
1013
1014 if (vpfe_cfg) {
1015 vpfe_capture_dev.dev.platform_data = vpfe_cfg;
1016 platform_device_register(&dm355_ccdc_dev);
1017 platform_device_register(&vpfe_capture_dev);
1018 }
1019
1020 if (vpbe_cfg) {
1021 dm355_vpbe_dev.dev.platform_data = vpbe_cfg;
1022 platform_device_register(&dm355_osd_dev);
1023 platform_device_register(&dm355_venc_dev);
1024 platform_device_register(&dm355_vpbe_dev);
1025 platform_device_register(&dm355_vpbe_display);
1026 }
1027
1028 return 0;
1029}
1030
871static int __init dm355_init_devices(void) 1031static int __init dm355_init_devices(void)
872{ 1032{
873 if (!cpu_is_davinci_dm355()) 1033 if (!cpu_is_davinci_dm355())
874 return 0; 1034 return 0;
875 1035
876 /* Add ccdc clock aliases */
877 clk_add_alias("master", dm355_ccdc_dev.name, "vpss_master", NULL);
878 clk_add_alias("slave", dm355_ccdc_dev.name, "vpss_master", NULL);
879 davinci_cfg_reg(DM355_INT_EDMA_CC); 1036 davinci_cfg_reg(DM355_INT_EDMA_CC);
880 platform_device_register(&dm355_edma_device); 1037 platform_device_register(&dm355_edma_device);
881 platform_device_register(&dm355_vpss_device);
882 platform_device_register(&dm355_ccdc_dev);
883 platform_device_register(&vpfe_capture_dev);
884 1038
885 return 0; 1039 return 0;
886} 1040}
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 6c3980540be0..ff771ceac3f1 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -39,16 +39,13 @@
39#include "asp.h" 39#include "asp.h"
40 40
41#define DM365_REF_FREQ 24000000 /* 24 MHz on the DM365 EVM */ 41#define DM365_REF_FREQ 24000000 /* 24 MHz on the DM365 EVM */
42
43/* Base of key scan register bank */
44#define DM365_KEYSCAN_BASE 0x01c69400
45
46#define DM365_RTC_BASE 0x01c69000 42#define DM365_RTC_BASE 0x01c69000
47 43#define DM365_KEYSCAN_BASE 0x01c69400
44#define DM365_OSD_BASE 0x01c71c00
45#define DM365_VENC_BASE 0x01c71e00
48#define DAVINCI_DM365_VC_BASE 0x01d0c000 46#define DAVINCI_DM365_VC_BASE 0x01d0c000
49#define DAVINCI_DMA_VC_TX 2 47#define DAVINCI_DMA_VC_TX 2
50#define DAVINCI_DMA_VC_RX 3 48#define DAVINCI_DMA_VC_RX 3
51
52#define DM365_EMAC_BASE 0x01d07000 49#define DM365_EMAC_BASE 0x01d07000
53#define DM365_EMAC_MDIO_BASE (DM365_EMAC_BASE + 0x4000) 50#define DM365_EMAC_MDIO_BASE (DM365_EMAC_BASE + 0x4000)
54#define DM365_EMAC_CNTRL_OFFSET 0x0000 51#define DM365_EMAC_CNTRL_OFFSET 0x0000
@@ -257,6 +254,12 @@ static struct clk vpss_master_clk = {
257 .flags = CLK_PSC, 254 .flags = CLK_PSC,
258}; 255};
259 256
257static struct clk vpss_slave_clk = {
258 .name = "vpss_slave",
259 .parent = &pll1_sysclk5,
260 .lpsc = DAVINCI_LPSC_VPSSSLV,
261};
262
260static struct clk arm_clk = { 263static struct clk arm_clk = {
261 .name = "arm_clk", 264 .name = "arm_clk",
262 .parent = &pll2_sysclk2, 265 .parent = &pll2_sysclk2,
@@ -449,7 +452,8 @@ static struct clk_lookup dm365_clks[] = {
449 CLK(NULL, "pll2_sysclk8", &pll2_sysclk8), 452 CLK(NULL, "pll2_sysclk8", &pll2_sysclk8),
450 CLK(NULL, "pll2_sysclk9", &pll2_sysclk9), 453 CLK(NULL, "pll2_sysclk9", &pll2_sysclk9),
451 CLK(NULL, "vpss_dac", &vpss_dac_clk), 454 CLK(NULL, "vpss_dac", &vpss_dac_clk),
452 CLK(NULL, "vpss_master", &vpss_master_clk), 455 CLK("vpss", "master", &vpss_master_clk),
456 CLK("vpss", "slave", &vpss_slave_clk),
453 CLK(NULL, "arm", &arm_clk), 457 CLK(NULL, "arm", &arm_clk),
454 CLK(NULL, "uart0", &uart0_clk), 458 CLK(NULL, "uart0", &uart0_clk),
455 CLK(NULL, "uart1", &uart1_clk), 459 CLK(NULL, "uart1", &uart1_clk),
@@ -1226,6 +1230,173 @@ static struct platform_device dm365_isif_dev = {
1226 }, 1230 },
1227}; 1231};
1228 1232
1233static struct resource dm365_osd_resources[] = {
1234 {
1235 .start = DM365_OSD_BASE,
1236 .end = DM365_OSD_BASE + 0xff,
1237 .flags = IORESOURCE_MEM,
1238 },
1239};
1240
1241static u64 dm365_video_dma_mask = DMA_BIT_MASK(32);
1242
1243static struct platform_device dm365_osd_dev = {
1244 .name = DM365_VPBE_OSD_SUBDEV_NAME,
1245 .id = -1,
1246 .num_resources = ARRAY_SIZE(dm365_osd_resources),
1247 .resource = dm365_osd_resources,
1248 .dev = {
1249 .dma_mask = &dm365_video_dma_mask,
1250 .coherent_dma_mask = DMA_BIT_MASK(32),
1251 },
1252};
1253
1254static struct resource dm365_venc_resources[] = {
1255 {
1256 .start = IRQ_VENCINT,
1257 .end = IRQ_VENCINT,
1258 .flags = IORESOURCE_IRQ,
1259 },
1260 /* venc registers io space */
1261 {
1262 .start = DM365_VENC_BASE,
1263 .end = DM365_VENC_BASE + 0x177,
1264 .flags = IORESOURCE_MEM,
1265 },
1266 /* vdaccfg registers io space */
1267 {
1268 .start = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG,
1269 .end = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG + 3,
1270 .flags = IORESOURCE_MEM,
1271 },
1272};
1273
1274static struct resource dm365_v4l2_disp_resources[] = {
1275 {
1276 .start = IRQ_VENCINT,
1277 .end = IRQ_VENCINT,
1278 .flags = IORESOURCE_IRQ,
1279 },
1280 /* venc registers io space */
1281 {
1282 .start = DM365_VENC_BASE,
1283 .end = DM365_VENC_BASE + 0x177,
1284 .flags = IORESOURCE_MEM,
1285 },
1286};
1287
1288static int dm365_vpbe_setup_pinmux(enum v4l2_mbus_pixelcode if_type,
1289 int field)
1290{
1291 switch (if_type) {
1292 case V4L2_MBUS_FMT_SGRBG8_1X8:
1293 davinci_cfg_reg(DM365_VOUT_FIELD_G81);
1294 davinci_cfg_reg(DM365_VOUT_COUTL_EN);
1295 davinci_cfg_reg(DM365_VOUT_COUTH_EN);
1296 break;
1297 case V4L2_MBUS_FMT_YUYV10_1X20:
1298 if (field)
1299 davinci_cfg_reg(DM365_VOUT_FIELD);
1300 else
1301 davinci_cfg_reg(DM365_VOUT_FIELD_G81);
1302 davinci_cfg_reg(DM365_VOUT_COUTL_EN);
1303 davinci_cfg_reg(DM365_VOUT_COUTH_EN);
1304 break;
1305 default:
1306 return -EINVAL;
1307 }
1308
1309 return 0;
1310}
1311
1312static int dm365_venc_setup_clock(enum vpbe_enc_timings_type type,
1313 unsigned int pclock)
1314{
1315 void __iomem *vpss_clkctl_reg;
1316 u32 val;
1317
1318 vpss_clkctl_reg = DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL);
1319
1320 switch (type) {
1321 case VPBE_ENC_STD:
1322 val = VPSS_VENCCLKEN_ENABLE | VPSS_DACCLKEN_ENABLE;
1323 break;
1324 case VPBE_ENC_DV_TIMINGS:
1325 if (pclock <= 27000000) {
1326 val = VPSS_VENCCLKEN_ENABLE | VPSS_DACCLKEN_ENABLE;
1327 } else {
1328 /* set sysclk4 to output 74.25 MHz from pll1 */
1329 val = VPSS_PLLC2SYSCLK5_ENABLE | VPSS_DACCLKEN_ENABLE |
1330 VPSS_VENCCLKEN_ENABLE;
1331 }
1332 break;
1333 default:
1334 return -EINVAL;
1335 }
1336 writel(val, vpss_clkctl_reg);
1337
1338 return 0;
1339}
1340
1341static struct platform_device dm365_vpbe_display = {
1342 .name = "vpbe-v4l2",
1343 .id = -1,
1344 .num_resources = ARRAY_SIZE(dm365_v4l2_disp_resources),
1345 .resource = dm365_v4l2_disp_resources,
1346 .dev = {
1347 .dma_mask = &dm365_video_dma_mask,
1348 .coherent_dma_mask = DMA_BIT_MASK(32),
1349 },
1350};
1351
1352struct venc_platform_data dm365_venc_pdata = {
1353 .setup_pinmux = dm365_vpbe_setup_pinmux,
1354 .setup_clock = dm365_venc_setup_clock,
1355};
1356
1357static struct platform_device dm365_venc_dev = {
1358 .name = DM365_VPBE_VENC_SUBDEV_NAME,
1359 .id = -1,
1360 .num_resources = ARRAY_SIZE(dm365_venc_resources),
1361 .resource = dm365_venc_resources,
1362 .dev = {
1363 .dma_mask = &dm365_video_dma_mask,
1364 .coherent_dma_mask = DMA_BIT_MASK(32),
1365 .platform_data = (void *)&dm365_venc_pdata,
1366 },
1367};
1368
1369static struct platform_device dm365_vpbe_dev = {
1370 .name = "vpbe_controller",
1371 .id = -1,
1372 .dev = {
1373 .dma_mask = &dm365_video_dma_mask,
1374 .coherent_dma_mask = DMA_BIT_MASK(32),
1375 },
1376};
1377
1378int __init dm365_init_video(struct vpfe_config *vpfe_cfg,
1379 struct vpbe_config *vpbe_cfg)
1380{
1381 if (vpfe_cfg || vpbe_cfg)
1382 platform_device_register(&dm365_vpss_device);
1383
1384 if (vpfe_cfg) {
1385 vpfe_capture_dev.dev.platform_data = vpfe_cfg;
1386 platform_device_register(&dm365_isif_dev);
1387 platform_device_register(&vpfe_capture_dev);
1388 }
1389 if (vpbe_cfg) {
1390 dm365_vpbe_dev.dev.platform_data = vpbe_cfg;
1391 platform_device_register(&dm365_osd_dev);
1392 platform_device_register(&dm365_venc_dev);
1393 platform_device_register(&dm365_vpbe_dev);
1394 platform_device_register(&dm365_vpbe_display);
1395 }
1396
1397 return 0;
1398}
1399
1229static int __init dm365_init_devices(void) 1400static int __init dm365_init_devices(void)
1230{ 1401{
1231 if (!cpu_is_davinci_dm365()) 1402 if (!cpu_is_davinci_dm365())
@@ -1239,16 +1410,6 @@ static int __init dm365_init_devices(void)
1239 clk_add_alias(NULL, dev_name(&dm365_mdio_device.dev), 1410 clk_add_alias(NULL, dev_name(&dm365_mdio_device.dev),
1240 NULL, &dm365_emac_device.dev); 1411 NULL, &dm365_emac_device.dev);
1241 1412
1242 /* Add isif clock alias */
1243 clk_add_alias("master", dm365_isif_dev.name, "vpss_master", NULL);
1244 platform_device_register(&dm365_vpss_device);
1245 platform_device_register(&dm365_isif_dev);
1246 platform_device_register(&vpfe_capture_dev);
1247 return 0; 1413 return 0;
1248} 1414}
1249postcore_initcall(dm365_init_devices); 1415postcore_initcall(dm365_init_devices);
1250
1251void dm365_set_vpfe_config(struct vpfe_config *cfg)
1252{
1253 vpfe_capture_dev.dev.platform_data = cfg;
1254}
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index db1dd92e00af..c2a9273330bf 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -300,8 +300,8 @@ static struct clk_lookup dm644x_clks[] = {
300 CLK(NULL, "dsp", &dsp_clk), 300 CLK(NULL, "dsp", &dsp_clk),
301 CLK(NULL, "arm", &arm_clk), 301 CLK(NULL, "arm", &arm_clk),
302 CLK(NULL, "vicp", &vicp_clk), 302 CLK(NULL, "vicp", &vicp_clk),
303 CLK(NULL, "vpss_master", &vpss_master_clk), 303 CLK("vpss", "master", &vpss_master_clk),
304 CLK(NULL, "vpss_slave", &vpss_slave_clk), 304 CLK("vpss", "slave", &vpss_slave_clk),
305 CLK(NULL, "arm", &arm_clk), 305 CLK(NULL, "arm", &arm_clk),
306 CLK(NULL, "uart0", &uart0_clk), 306 CLK(NULL, "uart0", &uart0_clk),
307 CLK(NULL, "uart1", &uart1_clk), 307 CLK(NULL, "uart1", &uart1_clk),
@@ -706,7 +706,7 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type,
706 v |= DM644X_VPSS_DACCLKEN; 706 v |= DM644X_VPSS_DACCLKEN;
707 writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); 707 writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
708 break; 708 break;
709 case VPBE_ENC_CUSTOM_TIMINGS: 709 case VPBE_ENC_DV_TIMINGS:
710 if (pclock <= 27000000) { 710 if (pclock <= 27000000) {
711 v |= DM644X_VPSS_DACCLKEN; 711 v |= DM644X_VPSS_DACCLKEN;
712 writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); 712 writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
@@ -901,11 +901,6 @@ int __init dm644x_init_video(struct vpfe_config *vpfe_cfg,
901 dm644x_vpfe_dev.dev.platform_data = vpfe_cfg; 901 dm644x_vpfe_dev.dev.platform_data = vpfe_cfg;
902 platform_device_register(&dm644x_ccdc_dev); 902 platform_device_register(&dm644x_ccdc_dev);
903 platform_device_register(&dm644x_vpfe_dev); 903 platform_device_register(&dm644x_vpfe_dev);
904 /* Add ccdc clock aliases */
905 clk_add_alias("master", dm644x_ccdc_dev.name,
906 "vpss_master", NULL);
907 clk_add_alias("slave", dm644x_ccdc_dev.name,
908 "vpss_slave", NULL);
909 } 904 }
910 905
911 if (vpbe_cfg) { 906 if (vpbe_cfg) {
diff --git a/arch/arm/mach-davinci/pm_domain.c b/arch/arm/mach-davinci/pm_domain.c
index c90250e3bef8..6b98413cebd6 100644
--- a/arch/arm/mach-davinci/pm_domain.c
+++ b/arch/arm/mach-davinci/pm_domain.c
@@ -53,7 +53,7 @@ static struct dev_pm_domain davinci_pm_domain = {
53 53
54static struct pm_clk_notifier_block platform_bus_notifier = { 54static struct pm_clk_notifier_block platform_bus_notifier = {
55 .pm_domain = &davinci_pm_domain, 55 .pm_domain = &davinci_pm_domain,
56 .con_ids = { "fck", NULL, }, 56 .con_ids = { "fck", "master", "slave", NULL },
57}; 57};
58 58
59static int __init davinci_pm_runtime_init(void) 59static int __init davinci_pm_runtime_init(void)
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 70f94c87479d..d5dde0727339 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -72,10 +72,12 @@ config SOC_EXYNOS5440
72 bool "SAMSUNG EXYNOS5440" 72 bool "SAMSUNG EXYNOS5440"
73 default y 73 default y
74 depends on ARCH_EXYNOS5 74 depends on ARCH_EXYNOS5
75 select ARCH_HAS_OPP
75 select ARM_ARCH_TIMER 76 select ARM_ARCH_TIMER
76 select AUTO_ZRELADDR 77 select AUTO_ZRELADDR
77 select PINCTRL 78 select PINCTRL
78 select PINCTRL_EXYNOS5440 79 select PINCTRL_EXYNOS5440
80 select PM_OPP
79 help 81 help
80 Enable EXYNOS5440 SoC support 82 Enable EXYNOS5440 SoC support
81 83
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index fcfe0251aa3e..498a7a23e260 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -58,7 +58,6 @@ static DEFINE_PER_CPU(struct cpuidle_device, exynos4_cpuidle_device);
58static struct cpuidle_driver exynos4_idle_driver = { 58static struct cpuidle_driver exynos4_idle_driver = {
59 .name = "exynos4_idle", 59 .name = "exynos4_idle",
60 .owner = THIS_MODULE, 60 .owner = THIS_MODULE,
61 .en_core_tk_irqen = 1,
62}; 61};
63 62
64/* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */ 63/* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 497fcb793dc1..d28c7fbaba2d 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -97,6 +97,19 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
97static struct regulator_consumer_supply max8952_consumer = 97static struct regulator_consumer_supply max8952_consumer =
98 REGULATOR_SUPPLY("vdd_arm", NULL); 98 REGULATOR_SUPPLY("vdd_arm", NULL);
99 99
100static struct regulator_init_data universal_max8952_reg_data = {
101 .constraints = {
102 .name = "VARM_1.2V",
103 .min_uV = 770000,
104 .max_uV = 1400000,
105 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
106 .always_on = 1,
107 .boot_on = 1,
108 },
109 .num_consumer_supplies = 1,
110 .consumer_supplies = &max8952_consumer,
111};
112
100static struct max8952_platform_data universal_max8952_pdata __initdata = { 113static struct max8952_platform_data universal_max8952_pdata __initdata = {
101 .gpio_vid0 = EXYNOS4_GPX0(3), 114 .gpio_vid0 = EXYNOS4_GPX0(3),
102 .gpio_vid1 = EXYNOS4_GPX0(4), 115 .gpio_vid1 = EXYNOS4_GPX0(4),
@@ -105,19 +118,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = {
105 .dvs_mode = { 48, 32, 28, 18 }, /* 1.25, 1.20, 1.05, 0.95V */ 118 .dvs_mode = { 48, 32, 28, 18 }, /* 1.25, 1.20, 1.05, 0.95V */
106 .sync_freq = 0, /* default: fastest */ 119 .sync_freq = 0, /* default: fastest */
107 .ramp_speed = 0, /* default: fastest */ 120 .ramp_speed = 0, /* default: fastest */
108 121 .reg_data = &universal_max8952_reg_data,
109 .reg_data = {
110 .constraints = {
111 .name = "VARM_1.2V",
112 .min_uV = 770000,
113 .max_uV = 1400000,
114 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
115 .always_on = 1,
116 .boot_on = 1,
117 },
118 .num_consumer_supplies = 1,
119 .consumer_supplies = &max8952_consumer,
120 },
121}; 122};
122 123
123static struct regulator_consumer_supply lp3974_buck1_consumer = 124static struct regulator_consumer_supply lp3974_buck1_consumer =
diff --git a/arch/arm/mach-exynos/setup-usb-phy.c b/arch/arm/mach-exynos/setup-usb-phy.c
index b81cc569a8dd..6af40662a449 100644
--- a/arch/arm/mach-exynos/setup-usb-phy.c
+++ b/arch/arm/mach-exynos/setup-usb-phy.c
@@ -204,9 +204,9 @@ static int exynos4210_usb_phy1_exit(struct platform_device *pdev)
204 204
205int s5p_usb_phy_init(struct platform_device *pdev, int type) 205int s5p_usb_phy_init(struct platform_device *pdev, int type)
206{ 206{
207 if (type == S5P_USB_PHY_DEVICE) 207 if (type == USB_PHY_TYPE_DEVICE)
208 return exynos4210_usb_phy0_init(pdev); 208 return exynos4210_usb_phy0_init(pdev);
209 else if (type == S5P_USB_PHY_HOST) 209 else if (type == USB_PHY_TYPE_HOST)
210 return exynos4210_usb_phy1_init(pdev); 210 return exynos4210_usb_phy1_init(pdev);
211 211
212 return -EINVAL; 212 return -EINVAL;
@@ -214,9 +214,9 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type)
214 214
215int s5p_usb_phy_exit(struct platform_device *pdev, int type) 215int s5p_usb_phy_exit(struct platform_device *pdev, int type)
216{ 216{
217 if (type == S5P_USB_PHY_DEVICE) 217 if (type == USB_PHY_TYPE_DEVICE)
218 return exynos4210_usb_phy0_exit(pdev); 218 return exynos4210_usb_phy0_exit(pdev);
219 else if (type == S5P_USB_PHY_HOST) 219 else if (type == USB_PHY_TYPE_HOST)
220 return exynos4210_usb_phy1_exit(pdev); 220 return exynos4210_usb_phy1_exit(pdev);
221 221
222 return -EINVAL; 222 return -EINVAL;
diff --git a/arch/arm/mach-gemini/idle.c b/arch/arm/mach-gemini/idle.c
index 92bbd6bb600a..87dff4f5059e 100644
--- a/arch/arm/mach-gemini/idle.c
+++ b/arch/arm/mach-gemini/idle.c
@@ -13,9 +13,11 @@ static void gemini_idle(void)
13 * will never wakeup... Acctualy it is not very good to enable 13 * will never wakeup... Acctualy it is not very good to enable
14 * interrupts first since scheduler can miss a tick, but there is 14 * interrupts first since scheduler can miss a tick, but there is
15 * no other way around this. Platforms that needs it for power saving 15 * no other way around this. Platforms that needs it for power saving
16 * should call enable_hlt() in init code, since by default it is 16 * should enable it in init code, since by default it is
17 * disabled. 17 * disabled.
18 */ 18 */
19
20 /* FIXME: Enabling interrupts here is racy! */
19 local_irq_enable(); 21 local_irq_enable();
20 cpu_do_idle(); 22 cpu_do_idle();
21} 23}
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index 020852d3bdd8..6d8f6d1669ff 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -15,6 +15,8 @@
15#include <linux/stddef.h> 15#include <linux/stddef.h>
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/cpu.h>
19
18#include <asm/irq.h> 20#include <asm/irq.h>
19#include <asm/mach/irq.h> 21#include <asm/mach/irq.h>
20#include <asm/system_misc.h> 22#include <asm/system_misc.h>
@@ -77,7 +79,7 @@ void __init gemini_init_irq(void)
77 * Disable the idle handler by default since it is buggy 79 * Disable the idle handler by default since it is buggy
78 * For more info see arch/arm/mach-gemini/idle.c 80 * For more info see arch/arm/mach-gemini/idle.c
79 */ 81 */
80 disable_hlt(); 82 cpu_idle_poll_ctrl(true);
81 83
82 request_resource(&iomem_resource, &irq_resource); 84 request_resource(&iomem_resource, &irq_resource);
83 85
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index c4ce0906d76a..cb70961b6239 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -30,7 +30,7 @@ obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o
30obj-$(CONFIG_CPU_FREQ_IMX) += cpufreq.o 30obj-$(CONFIG_CPU_FREQ_IMX) += cpufreq.o
31 31
32ifeq ($(CONFIG_CPU_IDLE),y) 32ifeq ($(CONFIG_CPU_IDLE),y)
33obj-y += cpuidle.o 33obj-$(CONFIG_SOC_IMX5) += cpuidle-imx5.o
34obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o 34obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o
35endif 35endif
36 36
diff --git a/arch/arm/mach-imx/clk-busy.c b/arch/arm/mach-imx/clk-busy.c
index 1ab91b5209e6..85b728cc27ab 100644
--- a/arch/arm/mach-imx/clk-busy.c
+++ b/arch/arm/mach-imx/clk-busy.c
@@ -169,7 +169,7 @@ struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
169 169
170 busy->mux.reg = reg; 170 busy->mux.reg = reg;
171 busy->mux.shift = shift; 171 busy->mux.shift = shift;
172 busy->mux.width = width; 172 busy->mux.mask = BIT(width) - 1;
173 busy->mux.lock = &imx_ccm_lock; 173 busy->mux.lock = &imx_ccm_lock;
174 busy->mux_ops = &clk_mux_ops; 174 busy->mux_ops = &clk_mux_ops;
175 175
diff --git a/arch/arm/mach-imx/cpufreq.c b/arch/arm/mach-imx/cpufreq.c
index d8c75c3c925d..387dc4cceca2 100644
--- a/arch/arm/mach-imx/cpufreq.c
+++ b/arch/arm/mach-imx/cpufreq.c
@@ -87,13 +87,12 @@ static int mxc_set_target(struct cpufreq_policy *policy,
87 87
88 freqs.old = clk_get_rate(cpu_clk) / 1000; 88 freqs.old = clk_get_rate(cpu_clk) / 1000;
89 freqs.new = freq_Hz / 1000; 89 freqs.new = freq_Hz / 1000;
90 freqs.cpu = 0;
91 freqs.flags = 0; 90 freqs.flags = 0;
92 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 91 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
93 92
94 ret = set_cpu_freq(freq_Hz); 93 ret = set_cpu_freq(freq_Hz);
95 94
96 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 95 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
97 96
98 return ret; 97 return ret;
99} 98}
@@ -145,14 +144,11 @@ static int mxc_cpufreq_init(struct cpufreq_policy *policy)
145 imx_freq_table[i].frequency = CPUFREQ_TABLE_END; 144 imx_freq_table[i].frequency = CPUFREQ_TABLE_END;
146 145
147 policy->cur = clk_get_rate(cpu_clk) / 1000; 146 policy->cur = clk_get_rate(cpu_clk) / 1000;
148 policy->min = policy->cpuinfo.min_freq = cpu_freq_khz_min;
149 policy->max = policy->cpuinfo.max_freq = cpu_freq_khz_max;
150 147
151 /* Manual states, that PLL stabilizes in two CLK32 periods */ 148 /* Manual states, that PLL stabilizes in two CLK32 periods */
152 policy->cpuinfo.transition_latency = 2 * NANOSECOND / CLK32_FREQ; 149 policy->cpuinfo.transition_latency = 2 * NANOSECOND / CLK32_FREQ;
153 150
154 ret = cpufreq_frequency_table_cpuinfo(policy, imx_freq_table); 151 ret = cpufreq_frequency_table_cpuinfo(policy, imx_freq_table);
155
156 if (ret < 0) { 152 if (ret < 0) {
157 printk(KERN_ERR "%s: failed to register i.MXC CPUfreq with error code %d\n", 153 printk(KERN_ERR "%s: failed to register i.MXC CPUfreq with error code %d\n",
158 __func__, ret); 154 __func__, ret);
diff --git a/arch/arm/mach-imx/cpuidle-imx5.c b/arch/arm/mach-imx/cpuidle-imx5.c
new file mode 100644
index 000000000000..5a47e3c6172f
--- /dev/null
+++ b/arch/arm/mach-imx/cpuidle-imx5.c
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2012 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/cpuidle.h>
10#include <linux/module.h>
11#include <asm/system_misc.h>
12
13static int imx5_cpuidle_enter(struct cpuidle_device *dev,
14 struct cpuidle_driver *drv, int index)
15{
16 arm_pm_idle();
17 return index;
18}
19
20static struct cpuidle_driver imx5_cpuidle_driver = {
21 .name = "imx5_cpuidle",
22 .owner = THIS_MODULE,
23 .states[0] = {
24 .enter = imx5_cpuidle_enter,
25 .exit_latency = 2,
26 .target_residency = 1,
27 .flags = CPUIDLE_FLAG_TIME_VALID,
28 .name = "IMX5 SRPG",
29 .desc = "CPU state retained,powered off",
30 },
31 .state_count = 1,
32};
33
34int __init imx5_cpuidle_init(void)
35{
36 return cpuidle_register(&imx5_cpuidle_driver, NULL);
37}
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index d533e2695f0e..23ddfb693b2d 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -6,7 +6,6 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#include <linux/clockchips.h>
10#include <linux/cpuidle.h> 9#include <linux/cpuidle.h>
11#include <linux/module.h> 10#include <linux/module.h>
12#include <asm/cpuidle.h> 11#include <asm/cpuidle.h>
@@ -21,10 +20,6 @@ static DEFINE_SPINLOCK(master_lock);
21static int imx6q_enter_wait(struct cpuidle_device *dev, 20static int imx6q_enter_wait(struct cpuidle_device *dev,
22 struct cpuidle_driver *drv, int index) 21 struct cpuidle_driver *drv, int index)
23{ 22{
24 int cpu = dev->cpu;
25
26 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
27
28 if (atomic_inc_return(&master) == num_online_cpus()) { 23 if (atomic_inc_return(&master) == num_online_cpus()) {
29 /* 24 /*
30 * With this lock, we prevent other cpu to exit and enter 25 * With this lock, we prevent other cpu to exit and enter
@@ -43,26 +38,13 @@ idle:
43 cpu_do_idle(); 38 cpu_do_idle();
44done: 39done:
45 atomic_dec(&master); 40 atomic_dec(&master);
46 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
47 41
48 return index; 42 return index;
49} 43}
50 44
51/*
52 * For each cpu, setup the broadcast timer because local timer
53 * stops for the states other than WFI.
54 */
55static void imx6q_setup_broadcast_timer(void *arg)
56{
57 int cpu = smp_processor_id();
58
59 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
60}
61
62static struct cpuidle_driver imx6q_cpuidle_driver = { 45static struct cpuidle_driver imx6q_cpuidle_driver = {
63 .name = "imx6q_cpuidle", 46 .name = "imx6q_cpuidle",
64 .owner = THIS_MODULE, 47 .owner = THIS_MODULE,
65 .en_core_tk_irqen = 1,
66 .states = { 48 .states = {
67 /* WFI */ 49 /* WFI */
68 ARM_CPUIDLE_WFI_STATE, 50 ARM_CPUIDLE_WFI_STATE,
@@ -70,7 +52,8 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
70 { 52 {
71 .exit_latency = 50, 53 .exit_latency = 50,
72 .target_residency = 75, 54 .target_residency = 75,
73 .flags = CPUIDLE_FLAG_TIME_VALID, 55 .flags = CPUIDLE_FLAG_TIME_VALID |
56 CPUIDLE_FLAG_TIMER_STOP,
74 .enter = imx6q_enter_wait, 57 .enter = imx6q_enter_wait,
75 .name = "WAIT", 58 .name = "WAIT",
76 .desc = "Clock off", 59 .desc = "Clock off",
@@ -88,8 +71,5 @@ int __init imx6q_cpuidle_init(void)
88 /* Set chicken bit to get a reliable WAIT mode support */ 71 /* Set chicken bit to get a reliable WAIT mode support */
89 imx6q_set_chicken_bit(); 72 imx6q_set_chicken_bit();
90 73
91 /* Configure the broadcast timer on each cpu */ 74 return cpuidle_register(&imx6q_cpuidle_driver, NULL);
92 on_each_cpu(imx6q_setup_broadcast_timer, NULL, 1);
93
94 return imx_cpuidle_init(&imx6q_cpuidle_driver);
95} 75}
diff --git a/arch/arm/mach-imx/cpuidle.c b/arch/arm/mach-imx/cpuidle.c
deleted file mode 100644
index d4cb511a44a8..000000000000
--- a/arch/arm/mach-imx/cpuidle.c
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 * Copyright 2012 Linaro Ltd.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13#include <linux/cpuidle.h>
14#include <linux/err.h>
15#include <linux/hrtimer.h>
16#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/slab.h>
19
20static struct cpuidle_device __percpu * imx_cpuidle_devices;
21
22static void __init imx_cpuidle_devices_uninit(void)
23{
24 int cpu_id;
25 struct cpuidle_device *dev;
26
27 for_each_possible_cpu(cpu_id) {
28 dev = per_cpu_ptr(imx_cpuidle_devices, cpu_id);
29 cpuidle_unregister_device(dev);
30 }
31
32 free_percpu(imx_cpuidle_devices);
33}
34
35int __init imx_cpuidle_init(struct cpuidle_driver *drv)
36{
37 struct cpuidle_device *dev;
38 int cpu_id, ret;
39
40 if (drv->state_count > CPUIDLE_STATE_MAX) {
41 pr_err("%s: state_count exceeds maximum\n", __func__);
42 return -EINVAL;
43 }
44
45 ret = cpuidle_register_driver(drv);
46 if (ret) {
47 pr_err("%s: Failed to register cpuidle driver with error: %d\n",
48 __func__, ret);
49 return ret;
50 }
51
52 imx_cpuidle_devices = alloc_percpu(struct cpuidle_device);
53 if (imx_cpuidle_devices == NULL) {
54 ret = -ENOMEM;
55 goto unregister_drv;
56 }
57
58 /* initialize state data for each cpuidle_device */
59 for_each_possible_cpu(cpu_id) {
60 dev = per_cpu_ptr(imx_cpuidle_devices, cpu_id);
61 dev->cpu = cpu_id;
62 dev->state_count = drv->state_count;
63
64 ret = cpuidle_register_device(dev);
65 if (ret) {
66 pr_err("%s: Failed to register cpu %u, error: %d\n",
67 __func__, cpu_id, ret);
68 goto uninit;
69 }
70 }
71
72 return 0;
73
74uninit:
75 imx_cpuidle_devices_uninit();
76
77unregister_drv:
78 cpuidle_unregister_driver(drv);
79 return ret;
80}
diff --git a/arch/arm/mach-imx/cpuidle.h b/arch/arm/mach-imx/cpuidle.h
index e092d1359d94..786f98ecc145 100644
--- a/arch/arm/mach-imx/cpuidle.h
+++ b/arch/arm/mach-imx/cpuidle.h
@@ -10,18 +10,16 @@
10 * http://www.gnu.org/copyleft/gpl.html 10 * http://www.gnu.org/copyleft/gpl.html
11 */ 11 */
12 12
13#include <linux/cpuidle.h>
14
15#ifdef CONFIG_CPU_IDLE 13#ifdef CONFIG_CPU_IDLE
16extern int imx_cpuidle_init(struct cpuidle_driver *drv); 14extern int imx5_cpuidle_init(void);
17extern int imx6q_cpuidle_init(void); 15extern int imx6q_cpuidle_init(void);
18#else 16#else
19static inline int imx_cpuidle_init(struct cpuidle_driver *drv) 17static inline int imx5_cpuidle_init(void)
20{ 18{
21 return -ENODEV; 19 return 0;
22} 20}
23static inline int imx6q_cpuidle_init(void) 21static inline int imx6q_cpuidle_init(void)
24{ 22{
25 return -ENODEV; 23 return 0;
26} 24}
27#endif 25#endif
diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
index f67fd7ee8127..82e79c658eb2 100644
--- a/arch/arm/mach-imx/pm-imx5.c
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -149,33 +149,6 @@ static void imx5_pm_idle(void)
149 imx5_cpu_do_idle(); 149 imx5_cpu_do_idle();
150} 150}
151 151
152static int imx5_cpuidle_enter(struct cpuidle_device *dev,
153 struct cpuidle_driver *drv, int idx)
154{
155 int ret;
156
157 ret = imx5_cpu_do_idle();
158 if (ret < 0)
159 return ret;
160
161 return idx;
162}
163
164static struct cpuidle_driver imx5_cpuidle_driver = {
165 .name = "imx5_cpuidle",
166 .owner = THIS_MODULE,
167 .en_core_tk_irqen = 1,
168 .states[0] = {
169 .enter = imx5_cpuidle_enter,
170 .exit_latency = 2,
171 .target_residency = 1,
172 .flags = CPUIDLE_FLAG_TIME_VALID,
173 .name = "IMX5 SRPG",
174 .desc = "CPU state retained,powered off",
175 },
176 .state_count = 1,
177};
178
179static int __init imx5_pm_common_init(void) 152static int __init imx5_pm_common_init(void)
180{ 153{
181 int ret; 154 int ret;
@@ -193,8 +166,7 @@ static int __init imx5_pm_common_init(void)
193 /* Set the registers to the default cpu idle state. */ 166 /* Set the registers to the default cpu idle state. */
194 mx5_cpu_lp_set(IMX5_DEFAULT_CPU_IDLE_STATE); 167 mx5_cpu_lp_set(IMX5_DEFAULT_CPU_IDLE_STATE);
195 168
196 imx_cpuidle_init(&imx5_cpuidle_driver); 169 return imx5_cpuidle_init();
197 return 0;
198} 170}
199 171
200void __init imx51_pm_init(void) 172void __init imx51_pm_init(void)
diff --git a/arch/arm/mach-integrator/Makefile b/arch/arm/mach-integrator/Makefile
index 5521d18bf19a..d14d6b76f4c2 100644
--- a/arch/arm/mach-integrator/Makefile
+++ b/arch/arm/mach-integrator/Makefile
@@ -9,5 +9,4 @@ obj-$(CONFIG_ARCH_INTEGRATOR_AP) += integrator_ap.o
9obj-$(CONFIG_ARCH_INTEGRATOR_CP) += integrator_cp.o 9obj-$(CONFIG_ARCH_INTEGRATOR_CP) += integrator_cp.o
10 10
11obj-$(CONFIG_PCI) += pci_v3.o pci.o 11obj-$(CONFIG_PCI) += pci_v3.o pci.o
12obj-$(CONFIG_CPU_FREQ_INTEGRATOR) += cpu.o
13obj-$(CONFIG_INTEGRATOR_IMPD1) += impd1.o 12obj-$(CONFIG_INTEGRATOR_IMPD1) += impd1.o
diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c
deleted file mode 100644
index 590c192cdf4d..000000000000
--- a/arch/arm/mach-integrator/cpu.c
+++ /dev/null
@@ -1,224 +0,0 @@
1/*
2 * linux/arch/arm/mach-integrator/cpu.c
3 *
4 * Copyright (C) 2001-2002 Deep Blue Solutions Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * CPU support functions
11 */
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/cpufreq.h>
16#include <linux/sched.h>
17#include <linux/smp.h>
18#include <linux/init.h>
19#include <linux/io.h>
20
21#include <mach/hardware.h>
22#include <mach/platform.h>
23#include <asm/mach-types.h>
24#include <asm/hardware/icst.h>
25
26static struct cpufreq_driver integrator_driver;
27
28#define CM_ID __io_address(INTEGRATOR_HDR_ID)
29#define CM_OSC __io_address(INTEGRATOR_HDR_OSC)
30#define CM_STAT __io_address(INTEGRATOR_HDR_STAT)
31#define CM_LOCK __io_address(INTEGRATOR_HDR_LOCK)
32
33static const struct icst_params lclk_params = {
34 .ref = 24000000,
35 .vco_max = ICST525_VCO_MAX_5V,
36 .vco_min = ICST525_VCO_MIN,
37 .vd_min = 8,
38 .vd_max = 132,
39 .rd_min = 24,
40 .rd_max = 24,
41 .s2div = icst525_s2div,
42 .idx2s = icst525_idx2s,
43};
44
45static const struct icst_params cclk_params = {
46 .ref = 24000000,
47 .vco_max = ICST525_VCO_MAX_5V,
48 .vco_min = ICST525_VCO_MIN,
49 .vd_min = 12,
50 .vd_max = 160,
51 .rd_min = 24,
52 .rd_max = 24,
53 .s2div = icst525_s2div,
54 .idx2s = icst525_idx2s,
55};
56
57/*
58 * Validate the speed policy.
59 */
60static int integrator_verify_policy(struct cpufreq_policy *policy)
61{
62 struct icst_vco vco;
63
64 cpufreq_verify_within_limits(policy,
65 policy->cpuinfo.min_freq,
66 policy->cpuinfo.max_freq);
67
68 vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
69 policy->max = icst_hz(&cclk_params, vco) / 1000;
70
71 vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
72 policy->min = icst_hz(&cclk_params, vco) / 1000;
73
74 cpufreq_verify_within_limits(policy,
75 policy->cpuinfo.min_freq,
76 policy->cpuinfo.max_freq);
77
78 return 0;
79}
80
81
82static int integrator_set_target(struct cpufreq_policy *policy,
83 unsigned int target_freq,
84 unsigned int relation)
85{
86 cpumask_t cpus_allowed;
87 int cpu = policy->cpu;
88 struct icst_vco vco;
89 struct cpufreq_freqs freqs;
90 u_int cm_osc;
91
92 /*
93 * Save this threads cpus_allowed mask.
94 */
95 cpus_allowed = current->cpus_allowed;
96
97 /*
98 * Bind to the specified CPU. When this call returns,
99 * we should be running on the right CPU.
100 */
101 set_cpus_allowed(current, cpumask_of_cpu(cpu));
102 BUG_ON(cpu != smp_processor_id());
103
104 /* get current setting */
105 cm_osc = __raw_readl(CM_OSC);
106
107 if (machine_is_integrator()) {
108 vco.s = (cm_osc >> 8) & 7;
109 } else if (machine_is_cintegrator()) {
110 vco.s = 1;
111 }
112 vco.v = cm_osc & 255;
113 vco.r = 22;
114 freqs.old = icst_hz(&cclk_params, vco) / 1000;
115
116 /* icst_hz_to_vco rounds down -- so we need the next
117 * larger freq in case of CPUFREQ_RELATION_L.
118 */
119 if (relation == CPUFREQ_RELATION_L)
120 target_freq += 999;
121 if (target_freq > policy->max)
122 target_freq = policy->max;
123 vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
124 freqs.new = icst_hz(&cclk_params, vco) / 1000;
125
126 freqs.cpu = policy->cpu;
127
128 if (freqs.old == freqs.new) {
129 set_cpus_allowed(current, cpus_allowed);
130 return 0;
131 }
132
133 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
134
135 cm_osc = __raw_readl(CM_OSC);
136
137 if (machine_is_integrator()) {
138 cm_osc &= 0xfffff800;
139 cm_osc |= vco.s << 8;
140 } else if (machine_is_cintegrator()) {
141 cm_osc &= 0xffffff00;
142 }
143 cm_osc |= vco.v;
144
145 __raw_writel(0xa05f, CM_LOCK);
146 __raw_writel(cm_osc, CM_OSC);
147 __raw_writel(0, CM_LOCK);
148
149 /*
150 * Restore the CPUs allowed mask.
151 */
152 set_cpus_allowed(current, cpus_allowed);
153
154 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
155
156 return 0;
157}
158
159static unsigned int integrator_get(unsigned int cpu)
160{
161 cpumask_t cpus_allowed;
162 unsigned int current_freq;
163 u_int cm_osc;
164 struct icst_vco vco;
165
166 cpus_allowed = current->cpus_allowed;
167
168 set_cpus_allowed(current, cpumask_of_cpu(cpu));
169 BUG_ON(cpu != smp_processor_id());
170
171 /* detect memory etc. */
172 cm_osc = __raw_readl(CM_OSC);
173
174 if (machine_is_integrator()) {
175 vco.s = (cm_osc >> 8) & 7;
176 } else {
177 vco.s = 1;
178 }
179 vco.v = cm_osc & 255;
180 vco.r = 22;
181
182 current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
183
184 set_cpus_allowed(current, cpus_allowed);
185
186 return current_freq;
187}
188
189static int integrator_cpufreq_init(struct cpufreq_policy *policy)
190{
191
192 /* set default policy and cpuinfo */
193 policy->cpuinfo.max_freq = 160000;
194 policy->cpuinfo.min_freq = 12000;
195 policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
196 policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
197
198 return 0;
199}
200
201static struct cpufreq_driver integrator_driver = {
202 .verify = integrator_verify_policy,
203 .target = integrator_set_target,
204 .get = integrator_get,
205 .init = integrator_cpufreq_init,
206 .name = "integrator",
207};
208
209static int __init integrator_cpu_init(void)
210{
211 return cpufreq_register_driver(&integrator_driver);
212}
213
214static void __exit integrator_cpu_exit(void)
215{
216 cpufreq_unregister_driver(&integrator_driver);
217}
218
219MODULE_AUTHOR ("Russell M. King");
220MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
221MODULE_LICENSE ("GPL");
222
223module_init(integrator_cpu_init);
224module_exit(integrator_cpu_exit);
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 1dbeb7c99d58..6600cff6bd92 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -29,6 +29,7 @@
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/cpu.h>
32 33
33#include <mach/udc.h> 34#include <mach/udc.h>
34#include <mach/hardware.h> 35#include <mach/hardware.h>
@@ -239,7 +240,7 @@ void __init ixp4xx_init_irq(void)
239 * ixp4xx does not implement the XScale PWRMODE register 240 * ixp4xx does not implement the XScale PWRMODE register
240 * so it must not call cpu_do_idle(). 241 * so it must not call cpu_do_idle().
241 */ 242 */
242 disable_hlt(); 243 cpu_idle_poll_ctrl(true);
243 244
244 /* Route all sources to IRQ instead of FIQ */ 245 /* Route all sources to IRQ instead of FIQ */
245 *IXP4XX_ICLR = 0x0; 246 *IXP4XX_ICLR = 0x0;
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index 9f64d5632e07..76901f4ce611 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -223,13 +223,7 @@ static struct pxa27x_keypad_platform_data aspenite_keypad_info __initdata = {
223}; 223};
224 224
225#if defined(CONFIG_USB_EHCI_MV) 225#if defined(CONFIG_USB_EHCI_MV)
226static char *pxa168_sph_clock_name[] = {
227 [0] = "PXA168-USBCLK",
228};
229
230static struct mv_usb_platform_data pxa168_sph_pdata = { 226static struct mv_usb_platform_data pxa168_sph_pdata = {
231 .clknum = 1,
232 .clkname = pxa168_sph_clock_name,
233 .mode = MV_USB_MODE_HOST, 227 .mode = MV_USB_MODE_HOST,
234 .phy_init = pxa_usb_phy_init, 228 .phy_init = pxa_usb_phy_init,
235 .phy_deinit = pxa_usb_phy_deinit, 229 .phy_deinit = pxa_usb_phy_deinit,
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 22a9058f9f4d..6528a5fa6a26 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -162,13 +162,7 @@ static struct i2c_board_info ttc_dkb_i2c_info[] = {
162#ifdef CONFIG_USB_SUPPORT 162#ifdef CONFIG_USB_SUPPORT
163#if defined(CONFIG_USB_MV_UDC) || defined(CONFIG_USB_EHCI_MV_U2O) 163#if defined(CONFIG_USB_MV_UDC) || defined(CONFIG_USB_EHCI_MV_U2O)
164 164
165static char *pxa910_usb_clock_name[] = {
166 [0] = "U2OCLK",
167};
168
169static struct mv_usb_platform_data ttc_usb_pdata = { 165static struct mv_usb_platform_data ttc_usb_pdata = {
170 .clknum = 1,
171 .clkname = pxa910_usb_clock_name,
172 .vbus = NULL, 166 .vbus = NULL,
173 .mode = MV_USB_MODE_OTG, 167 .mode = MV_USB_MODE_OTG,
174 .otg_force_a_bus_req = 1, 168 .otg_force_a_bus_req = 1,
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c
index d5970f5a1e8d..830139a3e2ba 100644
--- a/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -57,7 +57,7 @@ static struct irq_domain *armada_370_xp_mpic_domain;
57/* 57/*
58 * In SMP mode: 58 * In SMP mode:
59 * For shared global interrupts, mask/unmask global enable bit 59 * For shared global interrupts, mask/unmask global enable bit
60 * For CPU interrtups, mask/unmask the calling CPU's bit 60 * For CPU interrupts, mask/unmask the calling CPU's bit
61 */ 61 */
62static void armada_370_xp_irq_mask(struct irq_data *d) 62static void armada_370_xp_irq_mask(struct irq_data *d)
63{ 63{
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 7a7690ab6cb8..db37f49da5ac 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -43,6 +43,7 @@
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/io.h> 44#include <linux/io.h>
45#include <linux/atomic.h> 45#include <linux/atomic.h>
46#include <linux/cpu.h>
46 47
47#include <asm/fncpy.h> 48#include <asm/fncpy.h>
48#include <asm/system_misc.h> 49#include <asm/system_misc.h>
@@ -584,8 +585,7 @@ static void omap_pm_init_proc(void)
584static int omap_pm_prepare(void) 585static int omap_pm_prepare(void)
585{ 586{
586 /* We cannot sleep in idle until we have resumed */ 587 /* We cannot sleep in idle until we have resumed */
587 disable_hlt(); 588 cpu_idle_poll_ctrl(true);
588
589 return 0; 589 return 0;
590} 590}
591 591
@@ -621,7 +621,7 @@ static int omap_pm_enter(suspend_state_t state)
621 621
622static void omap_pm_finish(void) 622static void omap_pm_finish(void)
623{ 623{
624 enable_hlt(); 624 cpu_idle_poll_ctrl(false);
625} 625}
626 626
627 627
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index d6ba13e1c540..14522d077c88 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -249,7 +249,6 @@ extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
249extern int omap4_finish_suspend(unsigned long cpu_state); 249extern int omap4_finish_suspend(unsigned long cpu_state);
250extern void omap4_cpu_resume(void); 250extern void omap4_cpu_resume(void);
251extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); 251extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
252extern u32 omap4_mpuss_read_prev_context_state(void);
253#else 252#else
254static inline int omap4_enter_lowpower(unsigned int cpu, 253static inline int omap4_enter_lowpower(unsigned int cpu,
255 unsigned int power_state) 254 unsigned int power_state)
@@ -277,10 +276,6 @@ static inline int omap4_finish_suspend(unsigned long cpu_state)
277static inline void omap4_cpu_resume(void) 276static inline void omap4_cpu_resume(void)
278{} 277{}
279 278
280static inline u32 omap4_mpuss_read_prev_context_state(void)
281{
282 return 0;
283}
284#endif 279#endif
285 280
286struct omap_sdrc_params; 281struct omap_sdrc_params;
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 80392fca86c6..cca045c95fbf 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -26,6 +26,7 @@
26#include <linux/cpuidle.h> 26#include <linux/cpuidle.h>
27#include <linux/export.h> 27#include <linux/export.h>
28#include <linux/cpu_pm.h> 28#include <linux/cpu_pm.h>
29#include <asm/cpuidle.h>
29 30
30#include "powerdomain.h" 31#include "powerdomain.h"
31#include "clockdomain.h" 32#include "clockdomain.h"
@@ -99,11 +100,15 @@ static struct omap3_idle_statedata omap3_idle_data[] = {
99 }, 100 },
100}; 101};
101 102
102/* Private functions */ 103/**
103 104 * omap3_enter_idle - Programs OMAP3 to enter the specified state
104static int __omap3_enter_idle(struct cpuidle_device *dev, 105 * @dev: cpuidle device
105 struct cpuidle_driver *drv, 106 * @drv: cpuidle driver
106 int index) 107 * @index: the index of state to be entered
108 */
109static int omap3_enter_idle(struct cpuidle_device *dev,
110 struct cpuidle_driver *drv,
111 int index)
107{ 112{
108 struct omap3_idle_statedata *cx = &omap3_idle_data[index]; 113 struct omap3_idle_statedata *cx = &omap3_idle_data[index];
109 114
@@ -149,22 +154,6 @@ return_sleep_time:
149} 154}
150 155
151/** 156/**
152 * omap3_enter_idle - Programs OMAP3 to enter the specified state
153 * @dev: cpuidle device
154 * @drv: cpuidle driver
155 * @index: the index of state to be entered
156 *
157 * Called from the CPUidle framework to program the device to the
158 * specified target state selected by the governor.
159 */
160static inline int omap3_enter_idle(struct cpuidle_device *dev,
161 struct cpuidle_driver *drv,
162 int index)
163{
164 return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle);
165}
166
167/**
168 * next_valid_state - Find next valid C-state 157 * next_valid_state - Find next valid C-state
169 * @dev: cpuidle device 158 * @dev: cpuidle device
170 * @drv: cpuidle driver 159 * @drv: cpuidle driver
@@ -271,11 +260,9 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
271 return ret; 260 return ret;
272} 261}
273 262
274static DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
275
276static struct cpuidle_driver omap3_idle_driver = { 263static struct cpuidle_driver omap3_idle_driver = {
277 .name = "omap3_idle", 264 .name = "omap3_idle",
278 .owner = THIS_MODULE, 265 .owner = THIS_MODULE,
279 .states = { 266 .states = {
280 { 267 {
281 .enter = omap3_enter_idle_bm, 268 .enter = omap3_enter_idle_bm,
@@ -348,8 +335,6 @@ static struct cpuidle_driver omap3_idle_driver = {
348 */ 335 */
349int __init omap3_idle_init(void) 336int __init omap3_idle_init(void)
350{ 337{
351 struct cpuidle_device *dev;
352
353 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 338 mpu_pd = pwrdm_lookup("mpu_pwrdm");
354 core_pd = pwrdm_lookup("core_pwrdm"); 339 core_pd = pwrdm_lookup("core_pwrdm");
355 per_pd = pwrdm_lookup("per_pwrdm"); 340 per_pd = pwrdm_lookup("per_pwrdm");
@@ -358,16 +343,5 @@ int __init omap3_idle_init(void)
358 if (!mpu_pd || !core_pd || !per_pd || !cam_pd) 343 if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
359 return -ENODEV; 344 return -ENODEV;
360 345
361 cpuidle_register_driver(&omap3_idle_driver); 346 return cpuidle_register(&omap3_idle_driver, NULL);
362
363 dev = &per_cpu(omap3_idle_dev, smp_processor_id());
364 dev->cpu = 0;
365
366 if (cpuidle_register_device(dev)) {
367 printk(KERN_ERR "%s: CPUidle register device failed\n",
368 __func__);
369 return -EIO;
370 }
371
372 return 0;
373} 347}
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index d639aef0deda..5a286b56205e 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * OMAP4 CPU idle Routines 2 * OMAP4+ CPU idle Routines
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments, Inc. 4 * Copyright (C) 2011-2013 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com> 5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Rajendra Nayak <rnayak@ti.com> 6 * Rajendra Nayak <rnayak@ti.com>
7 * 7 *
@@ -14,8 +14,8 @@
14#include <linux/cpuidle.h> 14#include <linux/cpuidle.h>
15#include <linux/cpu_pm.h> 15#include <linux/cpu_pm.h>
16#include <linux/export.h> 16#include <linux/export.h>
17#include <linux/clockchips.h>
18 17
18#include <asm/cpuidle.h>
19#include <asm/proc-fns.h> 19#include <asm/proc-fns.h>
20 20
21#include "common.h" 21#include "common.h"
@@ -24,13 +24,13 @@
24#include "clockdomain.h" 24#include "clockdomain.h"
25 25
26/* Machine specific information */ 26/* Machine specific information */
27struct omap4_idle_statedata { 27struct idle_statedata {
28 u32 cpu_state; 28 u32 cpu_state;
29 u32 mpu_logic_state; 29 u32 mpu_logic_state;
30 u32 mpu_state; 30 u32 mpu_state;
31}; 31};
32 32
33static struct omap4_idle_statedata omap4_idle_data[] = { 33static struct idle_statedata omap4_idle_data[] = {
34 { 34 {
35 .cpu_state = PWRDM_POWER_ON, 35 .cpu_state = PWRDM_POWER_ON,
36 .mpu_state = PWRDM_POWER_ON, 36 .mpu_state = PWRDM_POWER_ON,
@@ -53,11 +53,12 @@ static struct clockdomain *cpu_clkdm[NR_CPUS];
53 53
54static atomic_t abort_barrier; 54static atomic_t abort_barrier;
55static bool cpu_done[NR_CPUS]; 55static bool cpu_done[NR_CPUS];
56static struct idle_statedata *state_ptr = &omap4_idle_data[0];
56 57
57/* Private functions */ 58/* Private functions */
58 59
59/** 60/**
60 * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions 61 * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
61 * @dev: cpuidle device 62 * @dev: cpuidle device
62 * @drv: cpuidle driver 63 * @drv: cpuidle driver
63 * @index: the index of state to be entered 64 * @index: the index of state to be entered
@@ -66,7 +67,7 @@ static bool cpu_done[NR_CPUS];
66 * specified low power state selected by the governor. 67 * specified low power state selected by the governor.
67 * Returns the amount of time spent in the low power state. 68 * Returns the amount of time spent in the low power state.
68 */ 69 */
69static int omap4_enter_idle_simple(struct cpuidle_device *dev, 70static int omap_enter_idle_simple(struct cpuidle_device *dev,
70 struct cpuidle_driver *drv, 71 struct cpuidle_driver *drv,
71 int index) 72 int index)
72{ 73{
@@ -77,12 +78,11 @@ static int omap4_enter_idle_simple(struct cpuidle_device *dev,
77 return index; 78 return index;
78} 79}
79 80
80static int omap4_enter_idle_coupled(struct cpuidle_device *dev, 81static int omap_enter_idle_coupled(struct cpuidle_device *dev,
81 struct cpuidle_driver *drv, 82 struct cpuidle_driver *drv,
82 int index) 83 int index)
83{ 84{
84 struct omap4_idle_statedata *cx = &omap4_idle_data[index]; 85 struct idle_statedata *cx = state_ptr + index;
85 int cpu_id = smp_processor_id();
86 86
87 local_fiq_disable(); 87 local_fiq_disable();
88 88
@@ -109,8 +109,6 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
109 } 109 }
110 } 110 }
111 111
112 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
113
114 /* 112 /*
115 * Call idle CPU PM enter notifier chain so that 113 * Call idle CPU PM enter notifier chain so that
116 * VFP and per CPU interrupt context is saved. 114 * VFP and per CPU interrupt context is saved.
@@ -149,11 +147,10 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
149 * Call idle CPU cluster PM exit notifier chain 147 * Call idle CPU cluster PM exit notifier chain
150 * to restore GIC and wakeupgen context. 148 * to restore GIC and wakeupgen context.
151 */ 149 */
152 if (omap4_mpuss_read_prev_context_state()) 150 if ((cx->mpu_state == PWRDM_POWER_RET) &&
151 (cx->mpu_logic_state == PWRDM_POWER_OFF))
153 cpu_cluster_pm_exit(); 152 cpu_cluster_pm_exit();
154 153
155 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
156
157fail: 154fail:
158 cpuidle_coupled_parallel_barrier(dev, &abort_barrier); 155 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
159 cpu_done[dev->cpu] = false; 156 cpu_done[dev->cpu] = false;
@@ -163,49 +160,38 @@ fail:
163 return index; 160 return index;
164} 161}
165 162
166/*
167 * For each cpu, setup the broadcast timer because local timers
168 * stops for the states above C1.
169 */
170static void omap_setup_broadcast_timer(void *arg)
171{
172 int cpu = smp_processor_id();
173 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
174}
175
176static DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
177
178static struct cpuidle_driver omap4_idle_driver = { 163static struct cpuidle_driver omap4_idle_driver = {
179 .name = "omap4_idle", 164 .name = "omap4_idle",
180 .owner = THIS_MODULE, 165 .owner = THIS_MODULE,
181 .en_core_tk_irqen = 1,
182 .states = { 166 .states = {
183 { 167 {
184 /* C1 - CPU0 ON + CPU1 ON + MPU ON */ 168 /* C1 - CPU0 ON + CPU1 ON + MPU ON */
185 .exit_latency = 2 + 2, 169 .exit_latency = 2 + 2,
186 .target_residency = 5, 170 .target_residency = 5,
187 .flags = CPUIDLE_FLAG_TIME_VALID, 171 .flags = CPUIDLE_FLAG_TIME_VALID,
188 .enter = omap4_enter_idle_simple, 172 .enter = omap_enter_idle_simple,
189 .name = "C1", 173 .name = "C1",
190 .desc = "MPUSS ON" 174 .desc = "CPUx ON, MPUSS ON"
191 }, 175 },
192 { 176 {
193 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ 177 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
194 .exit_latency = 328 + 440, 178 .exit_latency = 328 + 440,
195 .target_residency = 960, 179 .target_residency = 960,
196 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, 180 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
197 .enter = omap4_enter_idle_coupled, 181 CPUIDLE_FLAG_TIMER_STOP,
182 .enter = omap_enter_idle_coupled,
198 .name = "C2", 183 .name = "C2",
199 .desc = "MPUSS CSWR", 184 .desc = "CPUx OFF, MPUSS CSWR",
200 }, 185 },
201 { 186 {
202 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ 187 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
203 .exit_latency = 460 + 518, 188 .exit_latency = 460 + 518,
204 .target_residency = 1100, 189 .target_residency = 1100,
205 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, 190 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
206 .enter = omap4_enter_idle_coupled, 191 CPUIDLE_FLAG_TIMER_STOP,
192 .enter = omap_enter_idle_coupled,
207 .name = "C3", 193 .name = "C3",
208 .desc = "MPUSS OSWR", 194 .desc = "CPUx OFF, MPUSS OSWR",
209 }, 195 },
210 }, 196 },
211 .state_count = ARRAY_SIZE(omap4_idle_data), 197 .state_count = ARRAY_SIZE(omap4_idle_data),
@@ -215,16 +201,13 @@ static struct cpuidle_driver omap4_idle_driver = {
215/* Public functions */ 201/* Public functions */
216 202
217/** 203/**
218 * omap4_idle_init - Init routine for OMAP4 idle 204 * omap4_idle_init - Init routine for OMAP4+ idle
219 * 205 *
220 * Registers the OMAP4 specific cpuidle driver to the cpuidle 206 * Registers the OMAP4+ specific cpuidle driver to the cpuidle
221 * framework with the valid set of states. 207 * framework with the valid set of states.
222 */ 208 */
223int __init omap4_idle_init(void) 209int __init omap4_idle_init(void)
224{ 210{
225 struct cpuidle_device *dev;
226 unsigned int cpu_id = 0;
227
228 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 211 mpu_pd = pwrdm_lookup("mpu_pwrdm");
229 cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm"); 212 cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
230 cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm"); 213 cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
@@ -236,22 +219,5 @@ int __init omap4_idle_init(void)
236 if (!cpu_clkdm[0] || !cpu_clkdm[1]) 219 if (!cpu_clkdm[0] || !cpu_clkdm[1])
237 return -ENODEV; 220 return -ENODEV;
238 221
239 /* Configure the broadcast timer on each cpu */ 222 return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
240 on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
241
242 for_each_cpu(cpu_id, cpu_online_mask) {
243 dev = &per_cpu(omap4_idle_dev, cpu_id);
244 dev->cpu = cpu_id;
245#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
246 dev->coupled_cpus = *cpu_online_mask;
247#endif
248 cpuidle_register_driver(&omap4_idle_driver);
249
250 if (cpuidle_register_device(dev)) {
251 pr_err("%s: CPUidle register failed\n", __func__);
252 return -EIO;
253 }
254 }
255
256 return 0;
257} 223}
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 8bcb64bcdcdb..e80327b6c81f 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -139,20 +139,6 @@ static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
139 } 139 }
140} 140}
141 141
142/**
143 * omap4_mpuss_read_prev_context_state:
144 * Function returns the MPUSS previous context state
145 */
146u32 omap4_mpuss_read_prev_context_state(void)
147{
148 u32 reg;
149
150 reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
151 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
152 reg &= OMAP4430_LOSTCONTEXT_DFF_MASK;
153 return reg;
154}
155
156/* 142/*
157 * Store the CPU cluster state for L2X0 low power operations. 143 * Store the CPU cluster state for L2X0 low power operations.
158 */ 144 */
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index a202a4785104..e512253601c8 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -138,6 +138,7 @@
138#include <linux/spinlock.h> 138#include <linux/spinlock.h>
139#include <linux/slab.h> 139#include <linux/slab.h>
140#include <linux/bootmem.h> 140#include <linux/bootmem.h>
141#include <linux/cpu.h>
141 142
142#include <asm/system_misc.h> 143#include <asm/system_misc.h>
143 144
@@ -2157,7 +2158,7 @@ static int _enable(struct omap_hwmod *oh)
2157 if (soc_ops.enable_module) 2158 if (soc_ops.enable_module)
2158 soc_ops.enable_module(oh); 2159 soc_ops.enable_module(oh);
2159 if (oh->flags & HWMOD_BLOCK_WFI) 2160 if (oh->flags & HWMOD_BLOCK_WFI)
2160 disable_hlt(); 2161 cpu_idle_poll_ctrl(true);
2161 2162
2162 if (soc_ops.update_context_lost) 2163 if (soc_ops.update_context_lost)
2163 soc_ops.update_context_lost(oh); 2164 soc_ops.update_context_lost(oh);
@@ -2221,7 +2222,7 @@ static int _idle(struct omap_hwmod *oh)
2221 _del_initiator_dep(oh, mpu_oh); 2222 _del_initiator_dep(oh, mpu_oh);
2222 2223
2223 if (oh->flags & HWMOD_BLOCK_WFI) 2224 if (oh->flags & HWMOD_BLOCK_WFI)
2224 enable_hlt(); 2225 cpu_idle_poll_ctrl(false);
2225 if (soc_ops.disable_module) 2226 if (soc_ops.disable_module)
2226 soc_ops.disable_module(oh); 2227 soc_ops.disable_module(oh);
2227 2228
@@ -2331,7 +2332,7 @@ static int _shutdown(struct omap_hwmod *oh)
2331 _del_initiator_dep(oh, mpu_oh); 2332 _del_initiator_dep(oh, mpu_oh);
2332 /* XXX what about the other system initiators here? dma, dsp */ 2333 /* XXX what about the other system initiators here? dma, dsp */
2333 if (oh->flags & HWMOD_BLOCK_WFI) 2334 if (oh->flags & HWMOD_BLOCK_WFI)
2334 enable_hlt(); 2335 cpu_idle_poll_ctrl(false);
2335 if (soc_ops.disable_module) 2336 if (soc_ops.disable_module)
2336 soc_ops.disable_module(oh); 2337 soc_ops.disable_module(oh);
2337 _disable_clocks(oh); 2338 _disable_clocks(oh);
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 673a4c1d1d76..e742118fcfd2 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -218,7 +218,7 @@ static int omap_pm_enter(suspend_state_t suspend_state)
218 218
219static int omap_pm_begin(suspend_state_t state) 219static int omap_pm_begin(suspend_state_t state)
220{ 220{
221 disable_hlt(); 221 cpu_idle_poll_ctrl(true);
222 if (cpu_is_omap34xx()) 222 if (cpu_is_omap34xx())
223 omap_prcm_irq_prepare(); 223 omap_prcm_irq_prepare();
224 return 0; 224 return 0;
@@ -226,8 +226,7 @@ static int omap_pm_begin(suspend_state_t state)
226 226
227static void omap_pm_end(void) 227static void omap_pm_end(void)
228{ 228{
229 enable_hlt(); 229 cpu_idle_poll_ctrl(false);
230 return;
231} 230}
232 231
233static void omap_pm_finish(void) 232static void omap_pm_finish(void)
@@ -265,6 +264,12 @@ static void __init omap4_init_voltages(void)
265 omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva"); 264 omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
266} 265}
267 266
267static inline void omap_init_cpufreq(void)
268{
269 struct platform_device_info devinfo = { .name = "omap-cpufreq", };
270 platform_device_register_full(&devinfo);
271}
272
268static int __init omap2_common_pm_init(void) 273static int __init omap2_common_pm_init(void)
269{ 274{
270 if (!of_have_populated_dt()) 275 if (!of_have_populated_dt())
@@ -294,6 +299,9 @@ int __init omap2_common_pm_late_init(void)
294 299
295 /* Smartreflex device init */ 300 /* Smartreflex device init */
296 omap_devinit_smartreflex(); 301 omap_devinit_smartreflex();
302
303 /* cpufreq dummy device instantiation */
304 omap_init_cpufreq();
297 } 305 }
298 306
299#ifdef CONFIG_SUSPEND 307#ifdef CONFIG_SUSPEND
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index ea62e75ef21d..152a10cf4f1d 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -126,8 +126,8 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
126 * omap_default_idle - OMAP4 default ilde routine.' 126 * omap_default_idle - OMAP4 default ilde routine.'
127 * 127 *
128 * Implements OMAP4 memory, IO ordering requirements which can't be addressed 128 * Implements OMAP4 memory, IO ordering requirements which can't be addressed
129 * with default cpu_do_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and 129 * with default cpu_do_idle() hook. Used by all CPUs with !CONFIG_CPU_IDLE and
130 * by secondary CPU with CONFIG_CPUIDLE. 130 * by secondary CPU with CONFIG_CPU_IDLE.
131 */ 131 */
132static void omap_default_idle(void) 132static void omap_default_idle(void)
133{ 133{
diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c
index 35a8014529ca..94fbb815680c 100644
--- a/arch/arm/mach-orion5x/board-dt.c
+++ b/arch/arm/mach-orion5x/board-dt.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/cpu.h>
17#include <asm/system_misc.h> 18#include <asm/system_misc.h>
18#include <asm/mach/arch.h> 19#include <asm/mach/arch.h>
19#include <mach/orion5x.h> 20#include <mach/orion5x.h>
@@ -52,7 +53,7 @@ static void __init orion5x_dt_init(void)
52 */ 53 */
53 if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { 54 if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) {
54 printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); 55 printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n");
55 disable_hlt(); 56 cpu_idle_poll_ctrl(true);
56 } 57 }
57 58
58 if (of_machine_is_compatible("lacie,ethernet-disk-mini-v2")) 59 if (of_machine_is_compatible("lacie,ethernet-disk-mini-v2"))
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index d068f1431c40..ad71c8a03ffd 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -293,7 +293,7 @@ void __init orion5x_init(void)
293 */ 293 */
294 if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { 294 if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) {
295 printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); 295 printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n");
296 disable_hlt(); 296 cpu_idle_poll_ctrl(true);
297 } 297 }
298 298
299 /* 299 /*
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index 12c500558387..648867a8caa8 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -7,12 +7,6 @@ obj-y += clock.o devices.o generic.o irq.o \
7 time.o reset.o 7 time.o reset.o
8obj-$(CONFIG_PM) += pm.o sleep.o standby.o 8obj-$(CONFIG_PM) += pm.o sleep.o standby.o
9 9
10ifeq ($(CONFIG_CPU_FREQ),y)
11obj-$(CONFIG_PXA25x) += cpufreq-pxa2xx.o
12obj-$(CONFIG_PXA27x) += cpufreq-pxa2xx.o
13obj-$(CONFIG_PXA3xx) += cpufreq-pxa3xx.o
14endif
15
16# Generic drivers that other drivers may depend upon 10# Generic drivers that other drivers may depend upon
17 11
18# SoC-specific code 12# SoC-specific code
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
deleted file mode 100644
index 6a7aeab42f6c..000000000000
--- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ /dev/null
@@ -1,494 +0,0 @@
1/*
2 * linux/arch/arm/mach-pxa/cpufreq-pxa2xx.c
3 *
4 * Copyright (C) 2002,2003 Intrinsyc Software
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * History:
21 * 31-Jul-2002 : Initial version [FB]
22 * 29-Jan-2003 : added PXA255 support [FB]
23 * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.)
24 *
25 * Note:
26 * This driver may change the memory bus clock rate, but will not do any
27 * platform specific access timing changes... for example if you have flash
28 * memory connected to CS0, you will need to register a platform specific
29 * notifier which will adjust the memory access strobes to maintain a
30 * minimum strobe width.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/sched.h>
37#include <linux/init.h>
38#include <linux/cpufreq.h>
39#include <linux/err.h>
40#include <linux/regulator/consumer.h>
41#include <linux/io.h>
42
43#include <mach/pxa2xx-regs.h>
44#include <mach/smemc.h>
45
46#ifdef DEBUG
47static unsigned int freq_debug;
48module_param(freq_debug, uint, 0);
49MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0");
50#else
51#define freq_debug 0
52#endif
53
54static struct regulator *vcc_core;
55
56static unsigned int pxa27x_maxfreq;
57module_param(pxa27x_maxfreq, uint, 0);
58MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
59 "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
60
61typedef struct {
62 unsigned int khz;
63 unsigned int membus;
64 unsigned int cccr;
65 unsigned int div2;
66 unsigned int cclkcfg;
67 int vmin;
68 int vmax;
69} pxa_freqs_t;
70
71/* Define the refresh period in mSec for the SDRAM and the number of rows */
72#define SDRAM_TREF 64 /* standard 64ms SDRAM */
73static unsigned int sdram_rows;
74
75#define CCLKCFG_TURBO 0x1
76#define CCLKCFG_FCS 0x2
77#define CCLKCFG_HALFTURBO 0x4
78#define CCLKCFG_FASTBUS 0x8
79#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
80#define MDREFR_DRI_MASK 0xFFF
81
82#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
83#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
84
85/*
86 * PXA255 definitions
87 */
88/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
89#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
90
91static pxa_freqs_t pxa255_run_freqs[] =
92{
93 /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
94 { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
95 {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */
96 {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */
97 {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */
98 {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */
99 {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */
100};
101
102/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
103static pxa_freqs_t pxa255_turbo_freqs[] =
104{
105 /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
106 { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
107 {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */
108 {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */
109 {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */
110 {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */
111};
112
113#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
114#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs)
115
116static struct cpufreq_frequency_table
117 pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1];
118static struct cpufreq_frequency_table
119 pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1];
120
121static unsigned int pxa255_turbo_table;
122module_param(pxa255_turbo_table, uint, 0);
123MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
124
125/*
126 * PXA270 definitions
127 *
128 * For the PXA27x:
129 * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
130 *
131 * A = 0 => memory controller clock from table 3-7,
132 * A = 1 => memory controller clock = system bus clock
133 * Run mode frequency = 13 MHz * L
134 * Turbo mode frequency = 13 MHz * L * N
135 * System bus frequency = 13 MHz * L / (B + 1)
136 *
137 * In CCCR:
138 * A = 1
139 * L = 16 oscillator to run mode ratio
140 * 2N = 6 2 * (turbo mode to run mode ratio)
141 *
142 * In CCLKCFG:
143 * B = 1 Fast bus mode
144 * HT = 0 Half-Turbo mode
145 * T = 1 Turbo mode
146 *
147 * For now, just support some of the combinations in table 3-7 of
148 * PXA27x Processor Family Developer's Manual to simplify frequency
149 * change sequences.
150 */
151#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
152#define CCLKCFG2(B, HT, T) \
153 (CCLKCFG_FCS | \
154 ((B) ? CCLKCFG_FASTBUS : 0) | \
155 ((HT) ? CCLKCFG_HALFTURBO : 0) | \
156 ((T) ? CCLKCFG_TURBO : 0))
157
158static pxa_freqs_t pxa27x_freqs[] = {
159 {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
160 {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
161 {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
162 {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
163 {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
164 {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 },
165 {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 }
166};
167
168#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
169static struct cpufreq_frequency_table
170 pxa27x_freq_table[NUM_PXA27x_FREQS+1];
171
172extern unsigned get_clk_frequency_khz(int info);
173
174#ifdef CONFIG_REGULATOR
175
176static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
177{
178 int ret = 0;
179 int vmin, vmax;
180
181 if (!cpu_is_pxa27x())
182 return 0;
183
184 vmin = pxa_freq->vmin;
185 vmax = pxa_freq->vmax;
186 if ((vmin == -1) || (vmax == -1))
187 return 0;
188
189 ret = regulator_set_voltage(vcc_core, vmin, vmax);
190 if (ret)
191 pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
192 vmin, vmax);
193 return ret;
194}
195
196static __init void pxa_cpufreq_init_voltages(void)
197{
198 vcc_core = regulator_get(NULL, "vcc_core");
199 if (IS_ERR(vcc_core)) {
200 pr_info("cpufreq: Didn't find vcc_core regulator\n");
201 vcc_core = NULL;
202 } else {
203 pr_info("cpufreq: Found vcc_core regulator\n");
204 }
205}
206#else
207static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
208{
209 return 0;
210}
211
212static __init void pxa_cpufreq_init_voltages(void) { }
213#endif
214
215static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
216 pxa_freqs_t **pxa_freqs)
217{
218 if (cpu_is_pxa25x()) {
219 if (!pxa255_turbo_table) {
220 *pxa_freqs = pxa255_run_freqs;
221 *freq_table = pxa255_run_freq_table;
222 } else {
223 *pxa_freqs = pxa255_turbo_freqs;
224 *freq_table = pxa255_turbo_freq_table;
225 }
226 }
227 if (cpu_is_pxa27x()) {
228 *pxa_freqs = pxa27x_freqs;
229 *freq_table = pxa27x_freq_table;
230 }
231}
232
233static void pxa27x_guess_max_freq(void)
234{
235 if (!pxa27x_maxfreq) {
236 pxa27x_maxfreq = 416000;
237 printk(KERN_INFO "PXA CPU 27x max frequency not defined "
238 "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
239 pxa27x_maxfreq);
240 } else {
241 pxa27x_maxfreq *= 1000;
242 }
243}
244
245static void init_sdram_rows(void)
246{
247 uint32_t mdcnfg = __raw_readl(MDCNFG);
248 unsigned int drac2 = 0, drac0 = 0;
249
250 if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
251 drac2 = MDCNFG_DRAC2(mdcnfg);
252
253 if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
254 drac0 = MDCNFG_DRAC0(mdcnfg);
255
256 sdram_rows = 1 << (11 + max(drac0, drac2));
257}
258
259static u32 mdrefr_dri(unsigned int freq)
260{
261 u32 interval = freq * SDRAM_TREF / sdram_rows;
262
263 return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
264}
265
266/* find a valid frequency point */
267static int pxa_verify_policy(struct cpufreq_policy *policy)
268{
269 struct cpufreq_frequency_table *pxa_freqs_table;
270 pxa_freqs_t *pxa_freqs;
271 int ret;
272
273 find_freq_tables(&pxa_freqs_table, &pxa_freqs);
274 ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
275
276 if (freq_debug)
277 pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
278 policy->min, policy->max);
279
280 return ret;
281}
282
283static unsigned int pxa_cpufreq_get(unsigned int cpu)
284{
285 return get_clk_frequency_khz(0);
286}
287
288static int pxa_set_target(struct cpufreq_policy *policy,
289 unsigned int target_freq,
290 unsigned int relation)
291{
292 struct cpufreq_frequency_table *pxa_freqs_table;
293 pxa_freqs_t *pxa_freq_settings;
294 struct cpufreq_freqs freqs;
295 unsigned int idx;
296 unsigned long flags;
297 unsigned int new_freq_cpu, new_freq_mem;
298 unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
299 int ret = 0;
300
301 /* Get the current policy */
302 find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
303
304 /* Lookup the next frequency */
305 if (cpufreq_frequency_table_target(policy, pxa_freqs_table,
306 target_freq, relation, &idx)) {
307 return -EINVAL;
308 }
309
310 new_freq_cpu = pxa_freq_settings[idx].khz;
311 new_freq_mem = pxa_freq_settings[idx].membus;
312 freqs.old = policy->cur;
313 freqs.new = new_freq_cpu;
314 freqs.cpu = policy->cpu;
315
316 if (freq_debug)
317 pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
318 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
319 (new_freq_mem / 2000) : (new_freq_mem / 1000));
320
321 if (vcc_core && freqs.new > freqs.old)
322 ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
323 if (ret)
324 return ret;
325 /*
326 * Tell everyone what we're about to do...
327 * you should add a notify client with any platform specific
328 * Vcc changing capability
329 */
330 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
331
332 /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
333 * we need to preset the smaller DRI before the change. If we're
334 * speeding up we need to set the larger DRI value after the change.
335 */
336 preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR);
337 if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) {
338 preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
339 preset_mdrefr |= mdrefr_dri(new_freq_mem);
340 }
341 postset_mdrefr =
342 (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem);
343
344 /* If we're dividing the memory clock by two for the SDRAM clock, this
345 * must be set prior to the change. Clearing the divide must be done
346 * after the change.
347 */
348 if (pxa_freq_settings[idx].div2) {
349 preset_mdrefr |= MDREFR_DB2_MASK;
350 postset_mdrefr |= MDREFR_DB2_MASK;
351 } else {
352 postset_mdrefr &= ~MDREFR_DB2_MASK;
353 }
354
355 local_irq_save(flags);
356
357 /* Set new the CCCR and prepare CCLKCFG */
358 CCCR = pxa_freq_settings[idx].cccr;
359 cclkcfg = pxa_freq_settings[idx].cclkcfg;
360
361 asm volatile(" \n\
362 ldr r4, [%1] /* load MDREFR */ \n\
363 b 2f \n\
364 .align 5 \n\
3651: \n\
366 str %3, [%1] /* preset the MDREFR */ \n\
367 mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\
368 str %4, [%1] /* postset the MDREFR */ \n\
369 \n\
370 b 3f \n\
3712: b 1b \n\
3723: nop \n\
373 "
374 : "=&r" (unused)
375 : "r" (MDREFR), "r" (cclkcfg),
376 "r" (preset_mdrefr), "r" (postset_mdrefr)
377 : "r4", "r5");
378 local_irq_restore(flags);
379
380 /*
381 * Tell everyone what we've just done...
382 * you should add a notify client with any platform specific
383 * SDRAM refresh timer adjustments
384 */
385 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
386
387 /*
388 * Even if voltage setting fails, we don't report it, as the frequency
389 * change succeeded. The voltage reduction is not a critical failure,
390 * only power savings will suffer from this.
391 *
392 * Note: if the voltage change fails, and a return value is returned, a
393 * bug is triggered (seems a deadlock). Should anybody find out where,
394 * the "return 0" should become a "return ret".
395 */
396 if (vcc_core && freqs.new < freqs.old)
397 ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
398
399 return 0;
400}
401
402static int pxa_cpufreq_init(struct cpufreq_policy *policy)
403{
404 int i;
405 unsigned int freq;
406 struct cpufreq_frequency_table *pxa255_freq_table;
407 pxa_freqs_t *pxa255_freqs;
408
409 /* try to guess pxa27x cpu */
410 if (cpu_is_pxa27x())
411 pxa27x_guess_max_freq();
412
413 pxa_cpufreq_init_voltages();
414
415 init_sdram_rows();
416
417 /* set default policy and cpuinfo */
418 policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
419 policy->cur = get_clk_frequency_khz(0); /* current freq */
420 policy->min = policy->max = policy->cur;
421
422 /* Generate pxa25x the run cpufreq_frequency_table struct */
423 for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
424 pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz;
425 pxa255_run_freq_table[i].index = i;
426 }
427 pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END;
428
429 /* Generate pxa25x the turbo cpufreq_frequency_table struct */
430 for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) {
431 pxa255_turbo_freq_table[i].frequency =
432 pxa255_turbo_freqs[i].khz;
433 pxa255_turbo_freq_table[i].index = i;
434 }
435 pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END;
436
437 pxa255_turbo_table = !!pxa255_turbo_table;
438
439 /* Generate the pxa27x cpufreq_frequency_table struct */
440 for (i = 0; i < NUM_PXA27x_FREQS; i++) {
441 freq = pxa27x_freqs[i].khz;
442 if (freq > pxa27x_maxfreq)
443 break;
444 pxa27x_freq_table[i].frequency = freq;
445 pxa27x_freq_table[i].index = i;
446 }
447 pxa27x_freq_table[i].index = i;
448 pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
449
450 /*
451 * Set the policy's minimum and maximum frequencies from the tables
452 * just constructed. This sets cpuinfo.mxx_freq, min and max.
453 */
454 if (cpu_is_pxa25x()) {
455 find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
456 pr_info("PXA255 cpufreq using %s frequency table\n",
457 pxa255_turbo_table ? "turbo" : "run");
458 cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
459 }
460 else if (cpu_is_pxa27x())
461 cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
462
463 printk(KERN_INFO "PXA CPU frequency change support initialized\n");
464
465 return 0;
466}
467
468static struct cpufreq_driver pxa_cpufreq_driver = {
469 .verify = pxa_verify_policy,
470 .target = pxa_set_target,
471 .init = pxa_cpufreq_init,
472 .get = pxa_cpufreq_get,
473 .name = "PXA2xx",
474};
475
476static int __init pxa_cpu_init(void)
477{
478 int ret = -ENODEV;
479 if (cpu_is_pxa25x() || cpu_is_pxa27x())
480 ret = cpufreq_register_driver(&pxa_cpufreq_driver);
481 return ret;
482}
483
484static void __exit pxa_cpu_exit(void)
485{
486 cpufreq_unregister_driver(&pxa_cpufreq_driver);
487}
488
489
490MODULE_AUTHOR("Intrinsyc Software Inc.");
491MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture");
492MODULE_LICENSE("GPL");
493module_init(pxa_cpu_init);
494module_exit(pxa_cpu_exit);
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
deleted file mode 100644
index b85b4ab7aac6..000000000000
--- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ /dev/null
@@ -1,258 +0,0 @@
1/*
2 * linux/arch/arm/mach-pxa/cpufreq-pxa3xx.c
3 *
4 * Copyright (C) 2008 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/init.h>
16#include <linux/cpufreq.h>
17#include <linux/slab.h>
18#include <linux/io.h>
19
20#include <mach/pxa3xx-regs.h>
21
22#include "generic.h"
23
24#define HSS_104M (0)
25#define HSS_156M (1)
26#define HSS_208M (2)
27#define HSS_312M (3)
28
29#define SMCFS_78M (0)
30#define SMCFS_104M (2)
31#define SMCFS_208M (5)
32
33#define SFLFS_104M (0)
34#define SFLFS_156M (1)
35#define SFLFS_208M (2)
36#define SFLFS_312M (3)
37
38#define XSPCLK_156M (0)
39#define XSPCLK_NONE (3)
40
41#define DMCFS_26M (0)
42#define DMCFS_260M (3)
43
44struct pxa3xx_freq_info {
45 unsigned int cpufreq_mhz;
46 unsigned int core_xl : 5;
47 unsigned int core_xn : 3;
48 unsigned int hss : 2;
49 unsigned int dmcfs : 2;
50 unsigned int smcfs : 3;
51 unsigned int sflfs : 2;
52 unsigned int df_clkdiv : 3;
53
54 int vcc_core; /* in mV */
55 int vcc_sram; /* in mV */
56};
57
58#define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \
59{ \
60 .cpufreq_mhz = cpufreq, \
61 .core_xl = _xl, \
62 .core_xn = _xn, \
63 .hss = HSS_##_hss##M, \
64 .dmcfs = DMCFS_##_dmc##M, \
65 .smcfs = SMCFS_##_smc##M, \
66 .sflfs = SFLFS_##_sfl##M, \
67 .df_clkdiv = _dfi, \
68 .vcc_core = vcore, \
69 .vcc_sram = vsram, \
70}
71
72static struct pxa3xx_freq_info pxa300_freqs[] = {
73 /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
74 OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
75 OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
76 OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
77 OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
78};
79
80static struct pxa3xx_freq_info pxa320_freqs[] = {
81 /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
82 OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
83 OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
84 OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
85 OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
86 OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */
87};
88
89static unsigned int pxa3xx_freqs_num;
90static struct pxa3xx_freq_info *pxa3xx_freqs;
91static struct cpufreq_frequency_table *pxa3xx_freqs_table;
92
93static int setup_freqs_table(struct cpufreq_policy *policy,
94 struct pxa3xx_freq_info *freqs, int num)
95{
96 struct cpufreq_frequency_table *table;
97 int i;
98
99 table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL);
100 if (table == NULL)
101 return -ENOMEM;
102
103 for (i = 0; i < num; i++) {
104 table[i].index = i;
105 table[i].frequency = freqs[i].cpufreq_mhz * 1000;
106 }
107 table[num].index = i;
108 table[num].frequency = CPUFREQ_TABLE_END;
109
110 pxa3xx_freqs = freqs;
111 pxa3xx_freqs_num = num;
112 pxa3xx_freqs_table = table;
113
114 return cpufreq_frequency_table_cpuinfo(policy, table);
115}
116
117static void __update_core_freq(struct pxa3xx_freq_info *info)
118{
119 uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK;
120 uint32_t accr = ACCR;
121 uint32_t xclkcfg;
122
123 accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK);
124 accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
125
126 /* No clock until core PLL is re-locked */
127 accr |= ACCR_XSPCLK(XSPCLK_NONE);
128
129 xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */
130
131 ACCR = accr;
132 __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
133
134 while ((ACSR & mask) != (accr & mask))
135 cpu_relax();
136}
137
138static void __update_bus_freq(struct pxa3xx_freq_info *info)
139{
140 uint32_t mask;
141 uint32_t accr = ACCR;
142
143 mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
144 ACCR_DMCFS_MASK;
145
146 accr &= ~mask;
147 accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
148 ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
149
150 ACCR = accr;
151
152 while ((ACSR & mask) != (accr & mask))
153 cpu_relax();
154}
155
156static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
157{
158 return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
159}
160
161static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
162{
163 return pxa3xx_get_clk_frequency_khz(0);
164}
165
166static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
167 unsigned int target_freq,
168 unsigned int relation)
169{
170 struct pxa3xx_freq_info *next;
171 struct cpufreq_freqs freqs;
172 unsigned long flags;
173 int idx;
174
175 if (policy->cpu != 0)
176 return -EINVAL;
177
178 /* Lookup the next frequency */
179 if (cpufreq_frequency_table_target(policy, pxa3xx_freqs_table,
180 target_freq, relation, &idx))
181 return -EINVAL;
182
183 next = &pxa3xx_freqs[idx];
184
185 freqs.old = policy->cur;
186 freqs.new = next->cpufreq_mhz * 1000;
187 freqs.cpu = policy->cpu;
188
189 pr_debug("CPU frequency from %d MHz to %d MHz%s\n",
190 freqs.old / 1000, freqs.new / 1000,
191 (freqs.old == freqs.new) ? " (skipped)" : "");
192
193 if (freqs.old == target_freq)
194 return 0;
195
196 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
197
198 local_irq_save(flags);
199 __update_core_freq(next);
200 __update_bus_freq(next);
201 local_irq_restore(flags);
202
203 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
204
205 return 0;
206}
207
208static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
209{
210 int ret = -EINVAL;
211
212 /* set default policy and cpuinfo */
213 policy->cpuinfo.min_freq = 104000;
214 policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
215 policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
216 policy->max = pxa3xx_get_clk_frequency_khz(0);
217 policy->cur = policy->min = policy->max;
218
219 if (cpu_is_pxa300() || cpu_is_pxa310())
220 ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs));
221
222 if (cpu_is_pxa320())
223 ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs));
224
225 if (ret) {
226 pr_err("failed to setup frequency table\n");
227 return ret;
228 }
229
230 pr_info("CPUFREQ support for PXA3xx initialized\n");
231 return 0;
232}
233
234static struct cpufreq_driver pxa3xx_cpufreq_driver = {
235 .verify = pxa3xx_cpufreq_verify,
236 .target = pxa3xx_cpufreq_set,
237 .init = pxa3xx_cpufreq_init,
238 .get = pxa3xx_cpufreq_get,
239 .name = "pxa3xx-cpufreq",
240};
241
242static int __init cpufreq_init(void)
243{
244 if (cpu_is_pxa3xx())
245 return cpufreq_register_driver(&pxa3xx_cpufreq_driver);
246
247 return 0;
248}
249module_init(cpufreq_init);
250
251static void __exit cpufreq_exit(void)
252{
253 cpufreq_unregister_driver(&pxa3xx_cpufreq_driver);
254}
255module_exit(cpufreq_exit);
256
257MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx");
258MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-pxa/include/mach/generic.h b/arch/arm/mach-pxa/include/mach/generic.h
new file mode 100644
index 000000000000..665542e0c9e2
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/generic.h
@@ -0,0 +1 @@
#include "../../generic.h"
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 04b87ec92537..1069b5680826 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -123,6 +123,11 @@ static struct clk s3c2440_clk_ac97 = {
123 .ctrlbit = S3C2440_CLKCON_AC97, 123 .ctrlbit = S3C2440_CLKCON_AC97,
124}; 124};
125 125
126#define S3C24XX_VA_UART0 (S3C_VA_UART)
127#define S3C24XX_VA_UART1 (S3C_VA_UART + 0x4000 )
128#define S3C24XX_VA_UART2 (S3C_VA_UART + 0x8000 )
129#define S3C24XX_VA_UART3 (S3C_VA_UART + 0xC000 )
130
126static unsigned long s3c2440_fclk_n_getrate(struct clk *clk) 131static unsigned long s3c2440_fclk_n_getrate(struct clk *clk)
127{ 132{
128 unsigned long ucon0, ucon1, ucon2, divisor; 133 unsigned long ucon0, ucon1, ucon2, divisor;
diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c
index 6bcf87f65f9e..92e609440c57 100644
--- a/arch/arm/mach-s3c24xx/common.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -239,6 +239,11 @@ void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
239 239
240/* Serial port registrations */ 240/* Serial port registrations */
241 241
242#define S3C2410_PA_UART0 (S3C24XX_PA_UART)
243#define S3C2410_PA_UART1 (S3C24XX_PA_UART + 0x4000 )
244#define S3C2410_PA_UART2 (S3C24XX_PA_UART + 0x8000 )
245#define S3C2443_PA_UART3 (S3C24XX_PA_UART + 0xC000 )
246
242static struct resource s3c2410_uart0_resource[] = { 247static struct resource s3c2410_uart0_resource[] = {
243 [0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K), 248 [0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K),
244 [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \ 249 [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \
diff --git a/arch/arm/mach-s3c24xx/cpufreq.c b/arch/arm/mach-s3c24xx/cpufreq.c
index 5f181e733eee..3c0e78ede0da 100644
--- a/arch/arm/mach-s3c24xx/cpufreq.c
+++ b/arch/arm/mach-s3c24xx/cpufreq.c
@@ -204,7 +204,6 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
204 freqs.old = cpu_cur.freq; 204 freqs.old = cpu_cur.freq;
205 freqs.new = cpu_new.freq; 205 freqs.new = cpu_new.freq;
206 206
207 freqs.freqs.cpu = 0;
208 freqs.freqs.old = cpu_cur.freq.armclk / 1000; 207 freqs.freqs.old = cpu_cur.freq.armclk / 1000;
209 freqs.freqs.new = cpu_new.freq.armclk / 1000; 208 freqs.freqs.new = cpu_new.freq.armclk / 1000;
210 209
@@ -218,9 +217,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
218 s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk); 217 s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
219 218
220 /* start the frequency change */ 219 /* start the frequency change */
221 220 cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE);
222 if (policy)
223 cpufreq_notify_transition(&freqs.freqs, CPUFREQ_PRECHANGE);
224 221
225 /* If hclk is staying the same, then we do not need to 222 /* If hclk is staying the same, then we do not need to
226 * re-write the IO or the refresh timings whilst we are changing 223 * re-write the IO or the refresh timings whilst we are changing
@@ -264,8 +261,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
264 local_irq_restore(flags); 261 local_irq_restore(flags);
265 262
266 /* notify everyone we've done this */ 263 /* notify everyone we've done this */
267 if (policy) 264 cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE);
268 cpufreq_notify_transition(&freqs.freqs, CPUFREQ_POSTCHANGE);
269 265
270 s3c_freq_dbg("%s: finished\n", __func__); 266 s3c_freq_dbg("%s: finished\n", __func__);
271 return 0; 267 return 0;
diff --git a/arch/arm/mach-s3c64xx/cpuidle.c b/arch/arm/mach-s3c64xx/cpuidle.c
index ead5fab0dbb5..3c8ab07c2012 100644
--- a/arch/arm/mach-s3c64xx/cpuidle.c
+++ b/arch/arm/mach-s3c64xx/cpuidle.c
@@ -40,12 +40,9 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev,
40 return index; 40 return index;
41} 41}
42 42
43static DEFINE_PER_CPU(struct cpuidle_device, s3c64xx_cpuidle_device);
44
45static struct cpuidle_driver s3c64xx_cpuidle_driver = { 43static struct cpuidle_driver s3c64xx_cpuidle_driver = {
46 .name = "s3c64xx_cpuidle", 44 .name = "s3c64xx_cpuidle",
47 .owner = THIS_MODULE, 45 .owner = THIS_MODULE,
48 .en_core_tk_irqen = 1,
49 .states = { 46 .states = {
50 { 47 {
51 .enter = s3c64xx_enter_idle, 48 .enter = s3c64xx_enter_idle,
@@ -61,16 +58,6 @@ static struct cpuidle_driver s3c64xx_cpuidle_driver = {
61 58
62static int __init s3c64xx_init_cpuidle(void) 59static int __init s3c64xx_init_cpuidle(void)
63{ 60{
64 int ret; 61 return cpuidle_register(&s3c64xx_cpuidle_driver, NULL);
65
66 cpuidle_register_driver(&s3c64xx_cpuidle_driver);
67
68 ret = cpuidle_register_device(&s3c64xx_cpuidle_device);
69 if (ret) {
70 pr_err("Failed to register cpuidle device: %d\n", ret);
71 return ret;
72 }
73
74 return 0;
75} 62}
76device_initcall(s3c64xx_init_cpuidle); 63device_initcall(s3c64xx_init_cpuidle);
diff --git a/arch/arm/mach-s3c64xx/setup-usb-phy.c b/arch/arm/mach-s3c64xx/setup-usb-phy.c
index c8174d95339b..ca960bda02fd 100644
--- a/arch/arm/mach-s3c64xx/setup-usb-phy.c
+++ b/arch/arm/mach-s3c64xx/setup-usb-phy.c
@@ -76,7 +76,7 @@ static int s3c_usb_otgphy_exit(struct platform_device *pdev)
76 76
77int s5p_usb_phy_init(struct platform_device *pdev, int type) 77int s5p_usb_phy_init(struct platform_device *pdev, int type)
78{ 78{
79 if (type == S5P_USB_PHY_DEVICE) 79 if (type == USB_PHY_TYPE_DEVICE)
80 return s3c_usb_otgphy_init(pdev); 80 return s3c_usb_otgphy_init(pdev);
81 81
82 return -EINVAL; 82 return -EINVAL;
@@ -84,7 +84,7 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type)
84 84
85int s5p_usb_phy_exit(struct platform_device *pdev, int type) 85int s5p_usb_phy_exit(struct platform_device *pdev, int type)
86{ 86{
87 if (type == S5P_USB_PHY_DEVICE) 87 if (type == USB_PHY_TYPE_DEVICE)
88 return s3c_usb_otgphy_exit(pdev); 88 return s3c_usb_otgphy_exit(pdev);
89 89
90 return -EINVAL; 90 return -EINVAL;
diff --git a/arch/arm/mach-s5pv210/setup-usb-phy.c b/arch/arm/mach-s5pv210/setup-usb-phy.c
index 356a0900af03..b2ee5333f89c 100644
--- a/arch/arm/mach-s5pv210/setup-usb-phy.c
+++ b/arch/arm/mach-s5pv210/setup-usb-phy.c
@@ -80,7 +80,7 @@ static int s5pv210_usb_otgphy_exit(struct platform_device *pdev)
80 80
81int s5p_usb_phy_init(struct platform_device *pdev, int type) 81int s5p_usb_phy_init(struct platform_device *pdev, int type)
82{ 82{
83 if (type == S5P_USB_PHY_DEVICE) 83 if (type == USB_PHY_TYPE_DEVICE)
84 return s5pv210_usb_otgphy_init(pdev); 84 return s5pv210_usb_otgphy_init(pdev);
85 85
86 return -EINVAL; 86 return -EINVAL;
@@ -88,7 +88,7 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type)
88 88
89int s5p_usb_phy_exit(struct platform_device *pdev, int type) 89int s5p_usb_phy_exit(struct platform_device *pdev, int type)
90{ 90{
91 if (type == S5P_USB_PHY_DEVICE) 91 if (type == USB_PHY_TYPE_DEVICE)
92 return s5pv210_usb_otgphy_exit(pdev); 92 return s5pv210_usb_otgphy_exit(pdev);
93 93
94 return -EINVAL; 94 return -EINVAL;
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig
index ca14dbdcfb22..04f9784ff0ed 100644
--- a/arch/arm/mach-sa1100/Kconfig
+++ b/arch/arm/mach-sa1100/Kconfig
@@ -4,7 +4,7 @@ menu "SA11x0 Implementations"
4 4
5config SA1100_ASSABET 5config SA1100_ASSABET
6 bool "Assabet" 6 bool "Assabet"
7 select CPU_FREQ_SA1110 7 select ARM_SA1110_CPUFREQ
8 help 8 help
9 Say Y here if you are using the Intel(R) StrongARM(R) SA-1110 9 Say Y here if you are using the Intel(R) StrongARM(R) SA-1110
10 Microprocessor Development Board (also known as the Assabet). 10 Microprocessor Development Board (also known as the Assabet).
@@ -20,7 +20,7 @@ config ASSABET_NEPONSET
20 20
21config SA1100_CERF 21config SA1100_CERF
22 bool "CerfBoard" 22 bool "CerfBoard"
23 select CPU_FREQ_SA1110 23 select ARM_SA1110_CPUFREQ
24 help 24 help
25 The Intrinsyc CerfBoard is based on the StrongARM 1110 (Discontinued). 25 The Intrinsyc CerfBoard is based on the StrongARM 1110 (Discontinued).
26 More information is available at: 26 More information is available at:
@@ -47,7 +47,7 @@ endchoice
47 47
48config SA1100_COLLIE 48config SA1100_COLLIE
49 bool "Sharp Zaurus SL5500" 49 bool "Sharp Zaurus SL5500"
50 # FIXME: select CPU_FREQ_SA11x0 50 # FIXME: select ARM_SA11x0_CPUFREQ
51 select SHARP_LOCOMO 51 select SHARP_LOCOMO
52 select SHARP_PARAM 52 select SHARP_PARAM
53 select SHARP_SCOOP 53 select SHARP_SCOOP
@@ -56,7 +56,7 @@ config SA1100_COLLIE
56 56
57config SA1100_H3100 57config SA1100_H3100
58 bool "Compaq iPAQ H3100" 58 bool "Compaq iPAQ H3100"
59 select CPU_FREQ_SA1110 59 select ARM_SA1110_CPUFREQ
60 select HTC_EGPIO 60 select HTC_EGPIO
61 help 61 help
62 Say Y here if you intend to run this kernel on the Compaq iPAQ 62 Say Y here if you intend to run this kernel on the Compaq iPAQ
@@ -67,7 +67,7 @@ config SA1100_H3100
67 67
68config SA1100_H3600 68config SA1100_H3600
69 bool "Compaq iPAQ H3600/H3700" 69 bool "Compaq iPAQ H3600/H3700"
70 select CPU_FREQ_SA1110 70 select ARM_SA1110_CPUFREQ
71 select HTC_EGPIO 71 select HTC_EGPIO
72 help 72 help
73 Say Y here if you intend to run this kernel on the Compaq iPAQ 73 Say Y here if you intend to run this kernel on the Compaq iPAQ
@@ -78,7 +78,7 @@ config SA1100_H3600
78 78
79config SA1100_BADGE4 79config SA1100_BADGE4
80 bool "HP Labs BadgePAD 4" 80 bool "HP Labs BadgePAD 4"
81 select CPU_FREQ_SA1100 81 select ARM_SA1100_CPUFREQ
82 select SA1111 82 select SA1111
83 help 83 help
84 Say Y here if you want to build a kernel for the HP Laboratories 84 Say Y here if you want to build a kernel for the HP Laboratories
@@ -86,7 +86,7 @@ config SA1100_BADGE4
86 86
87config SA1100_JORNADA720 87config SA1100_JORNADA720
88 bool "HP Jornada 720" 88 bool "HP Jornada 720"
89 # FIXME: select CPU_FREQ_SA11x0 89 # FIXME: select ARM_SA11x0_CPUFREQ
90 select SA1111 90 select SA1111
91 help 91 help
92 Say Y here if you want to build a kernel for the HP Jornada 720 92 Say Y here if you want to build a kernel for the HP Jornada 720
@@ -105,14 +105,14 @@ config SA1100_JORNADA720_SSP
105 105
106config SA1100_HACKKIT 106config SA1100_HACKKIT
107 bool "HackKit Core CPU Board" 107 bool "HackKit Core CPU Board"
108 select CPU_FREQ_SA1100 108 select ARM_SA1100_CPUFREQ
109 help 109 help
110 Say Y here to support the HackKit Core CPU Board 110 Say Y here to support the HackKit Core CPU Board
111 <http://hackkit.eletztrick.de>; 111 <http://hackkit.eletztrick.de>;
112 112
113config SA1100_LART 113config SA1100_LART
114 bool "LART" 114 bool "LART"
115 select CPU_FREQ_SA1100 115 select ARM_SA1100_CPUFREQ
116 help 116 help
117 Say Y here if you are using the Linux Advanced Radio Terminal 117 Say Y here if you are using the Linux Advanced Radio Terminal
118 (also known as the LART). See <http://www.lartmaker.nl/> for 118 (also known as the LART). See <http://www.lartmaker.nl/> for
@@ -120,7 +120,7 @@ config SA1100_LART
120 120
121config SA1100_NANOENGINE 121config SA1100_NANOENGINE
122 bool "nanoEngine" 122 bool "nanoEngine"
123 select CPU_FREQ_SA1110 123 select ARM_SA1110_CPUFREQ
124 select PCI 124 select PCI
125 select PCI_NANOENGINE 125 select PCI_NANOENGINE
126 help 126 help
@@ -130,7 +130,7 @@ config SA1100_NANOENGINE
130 130
131config SA1100_PLEB 131config SA1100_PLEB
132 bool "PLEB" 132 bool "PLEB"
133 select CPU_FREQ_SA1100 133 select ARM_SA1100_CPUFREQ
134 help 134 help
135 Say Y here if you are using version 1 of the Portable Linux 135 Say Y here if you are using version 1 of the Portable Linux
136 Embedded Board (also known as PLEB). 136 Embedded Board (also known as PLEB).
@@ -139,7 +139,7 @@ config SA1100_PLEB
139 139
140config SA1100_SHANNON 140config SA1100_SHANNON
141 bool "Shannon" 141 bool "Shannon"
142 select CPU_FREQ_SA1100 142 select ARM_SA1100_CPUFREQ
143 help 143 help
144 The Shannon (also known as a Tuxscreen, and also as a IS2630) was a 144 The Shannon (also known as a Tuxscreen, and also as a IS2630) was a
145 limited edition webphone produced by Philips. The Shannon is a SA1100 145 limited edition webphone produced by Philips. The Shannon is a SA1100
@@ -148,7 +148,7 @@ config SA1100_SHANNON
148 148
149config SA1100_SIMPAD 149config SA1100_SIMPAD
150 bool "Simpad" 150 bool "Simpad"
151 select CPU_FREQ_SA1110 151 select ARM_SA1110_CPUFREQ
152 help 152 help
153 The SIEMENS webpad SIMpad is based on the StrongARM 1110. There 153 The SIEMENS webpad SIMpad is based on the StrongARM 1110. There
154 are two different versions CL4 and SL4. CL4 has 32MB RAM and 16MB 154 are two different versions CL4 and SL4. CL4 has 32MB RAM and 16MB
diff --git a/arch/arm/mach-sa1100/Makefile b/arch/arm/mach-sa1100/Makefile
index 1aed9e70465d..2732eef48966 100644
--- a/arch/arm/mach-sa1100/Makefile
+++ b/arch/arm/mach-sa1100/Makefile
@@ -8,9 +8,6 @@ obj-m :=
8obj-n := 8obj-n :=
9obj- := 9obj- :=
10 10
11obj-$(CONFIG_CPU_FREQ_SA1100) += cpu-sa1100.o
12obj-$(CONFIG_CPU_FREQ_SA1110) += cpu-sa1110.o
13
14# Specific board support 11# Specific board support
15obj-$(CONFIG_SA1100_ASSABET) += assabet.o 12obj-$(CONFIG_SA1100_ASSABET) += assabet.o
16obj-$(CONFIG_ASSABET_NEPONSET) += neponset.o 13obj-$(CONFIG_ASSABET_NEPONSET) += neponset.o
diff --git a/arch/arm/mach-sa1100/cpu-sa1100.c b/arch/arm/mach-sa1100/cpu-sa1100.c
deleted file mode 100644
index e8f4d1e19233..000000000000
--- a/arch/arm/mach-sa1100/cpu-sa1100.c
+++ /dev/null
@@ -1,249 +0,0 @@
1/*
2 * cpu-sa1100.c: clock scaling for the SA1100
3 *
4 * Copyright (C) 2000 2001, The Delft University of Technology
5 *
6 * Authors:
7 * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version
8 * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
9 * - major rewrite for linux-2.3.99
10 * - rewritten for the more generic power management scheme in
11 * linux-2.4.5-rmk1
12 *
13 * This software has been developed while working on the LART
14 * computing board (http://www.lartmaker.nl/), which is
15 * sponsored by the Mobile Multi-media Communications
16 * (http://www.mobimedia.org/) and Ubiquitous Communications
17 * (http://www.ubicom.tudelft.nl/) projects.
18 *
19 * The authors can be reached at:
20 *
21 * Erik Mouw
22 * Information and Communication Theory Group
23 * Faculty of Information Technology and Systems
24 * Delft University of Technology
25 * P.O. Box 5031
26 * 2600 GA Delft
27 * The Netherlands
28 *
29 *
30 * This program is free software; you can redistribute it and/or modify
31 * it under the terms of the GNU General Public License as published by
32 * the Free Software Foundation; either version 2 of the License, or
33 * (at your option) any later version.
34 *
35 * This program is distributed in the hope that it will be useful,
36 * but WITHOUT ANY WARRANTY; without even the implied warranty of
37 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
38 * GNU General Public License for more details.
39 *
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
43 *
44 *
45 * Theory of operations
46 * ====================
47 *
48 * Clock scaling can be used to lower the power consumption of the CPU
49 * core. This will give you a somewhat longer running time.
50 *
51 * The SA-1100 has a single register to change the core clock speed:
52 *
53 * PPCR 0x90020014 PLL config
54 *
55 * However, the DRAM timings are closely related to the core clock
56 * speed, so we need to change these, too. The used registers are:
57 *
58 * MDCNFG 0xA0000000 DRAM config
59 * MDCAS0 0xA0000004 Access waveform
60 * MDCAS1 0xA0000008 Access waveform
61 * MDCAS2 0xA000000C Access waveform
62 *
63 * Care must be taken to change the DRAM parameters the correct way,
64 * because otherwise the DRAM becomes unusable and the kernel will
65 * crash.
66 *
67 * The simple solution to avoid a kernel crash is to put the actual
68 * clock change in ROM and jump to that code from the kernel. The main
69 * disadvantage is that the ROM has to be modified, which is not
70 * possible on all SA-1100 platforms. Another disadvantage is that
71 * jumping to ROM makes clock switching unnecessary complicated.
72 *
73 * The idea behind this driver is that the memory configuration can be
74 * changed while running from DRAM (even with interrupts turned on!)
75 * as long as all re-configuration steps yield a valid DRAM
76 * configuration. The advantages are clear: it will run on all SA-1100
77 * platforms, and the code is very simple.
78 *
79 * If you really want to understand what is going on in
80 * sa1100_update_dram_timings(), you'll have to read sections 8.2,
81 * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor
82 * Developers Manual" (available for free from Intel).
83 *
84 */
85
86#include <linux/kernel.h>
87#include <linux/types.h>
88#include <linux/init.h>
89#include <linux/cpufreq.h>
90#include <linux/io.h>
91
92#include <asm/cputype.h>
93
94#include <mach/hardware.h>
95
96#include "generic.h"
97
98struct sa1100_dram_regs {
99 int speed;
100 u32 mdcnfg;
101 u32 mdcas0;
102 u32 mdcas1;
103 u32 mdcas2;
104};
105
106
107static struct cpufreq_driver sa1100_driver;
108
109static struct sa1100_dram_regs sa1100_dram_settings[] = {
110 /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */
111 { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */
112 { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */
113 { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */
114 {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */
115 {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */
116 {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */
117 {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */
118 {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */
119 {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */
120 {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */
121 {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */
122 {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */
123 {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */
124 {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */
125 {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */
126 {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */
127 { 0, 0, 0, 0, 0 } /* last entry */
128};
129
130static void sa1100_update_dram_timings(int current_speed, int new_speed)
131{
132 struct sa1100_dram_regs *settings = sa1100_dram_settings;
133
134 /* find speed */
135 while (settings->speed != 0) {
136 if (new_speed == settings->speed)
137 break;
138
139 settings++;
140 }
141
142 if (settings->speed == 0) {
143 panic("%s: couldn't find dram setting for speed %d\n",
144 __func__, new_speed);
145 }
146
147 /* No risk, no fun: run with interrupts on! */
148 if (new_speed > current_speed) {
149 /* We're going FASTER, so first relax the memory
150 * timings before changing the core frequency
151 */
152
153 /* Half the memory access clock */
154 MDCNFG |= MDCNFG_CDB2;
155
156 /* The order of these statements IS important, keep 8
157 * pulses!!
158 */
159 MDCAS2 = settings->mdcas2;
160 MDCAS1 = settings->mdcas1;
161 MDCAS0 = settings->mdcas0;
162 MDCNFG = settings->mdcnfg;
163 } else {
164 /* We're going SLOWER: first decrease the core
165 * frequency and then tighten the memory settings.
166 */
167
168 /* Half the memory access clock */
169 MDCNFG |= MDCNFG_CDB2;
170
171 /* The order of these statements IS important, keep 8
172 * pulses!!
173 */
174 MDCAS0 = settings->mdcas0;
175 MDCAS1 = settings->mdcas1;
176 MDCAS2 = settings->mdcas2;
177 MDCNFG = settings->mdcnfg;
178 }
179}
180
181static int sa1100_target(struct cpufreq_policy *policy,
182 unsigned int target_freq,
183 unsigned int relation)
184{
185 unsigned int cur = sa11x0_getspeed(0);
186 unsigned int new_ppcr;
187 struct cpufreq_freqs freqs;
188
189 new_ppcr = sa11x0_freq_to_ppcr(target_freq);
190 switch (relation) {
191 case CPUFREQ_RELATION_L:
192 if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max)
193 new_ppcr--;
194 break;
195 case CPUFREQ_RELATION_H:
196 if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) &&
197 (sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min))
198 new_ppcr--;
199 break;
200 }
201
202 freqs.old = cur;
203 freqs.new = sa11x0_ppcr_to_freq(new_ppcr);
204 freqs.cpu = 0;
205
206 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
207
208 if (freqs.new > cur)
209 sa1100_update_dram_timings(cur, freqs.new);
210
211 PPCR = new_ppcr;
212
213 if (freqs.new < cur)
214 sa1100_update_dram_timings(cur, freqs.new);
215
216 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
217
218 return 0;
219}
220
221static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
222{
223 if (policy->cpu != 0)
224 return -EINVAL;
225 policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
226 policy->cpuinfo.min_freq = 59000;
227 policy->cpuinfo.max_freq = 287000;
228 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
229 return 0;
230}
231
232static struct cpufreq_driver sa1100_driver __refdata = {
233 .flags = CPUFREQ_STICKY,
234 .verify = sa11x0_verify_speed,
235 .target = sa1100_target,
236 .get = sa11x0_getspeed,
237 .init = sa1100_cpu_init,
238 .name = "sa1100",
239};
240
241static int __init sa1100_dram_init(void)
242{
243 if (cpu_is_sa1100())
244 return cpufreq_register_driver(&sa1100_driver);
245 else
246 return -ENODEV;
247}
248
249arch_initcall(sa1100_dram_init);
diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/arch/arm/mach-sa1100/cpu-sa1110.c
deleted file mode 100644
index 48c45b0c92bb..000000000000
--- a/arch/arm/mach-sa1100/cpu-sa1110.c
+++ /dev/null
@@ -1,408 +0,0 @@
1/*
2 * linux/arch/arm/mach-sa1100/cpu-sa1110.c
3 *
4 * Copyright (C) 2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Note: there are two erratas that apply to the SA1110 here:
11 * 7 - SDRAM auto-power-up failure (rev A0)
12 * 13 - Corruption of internal register reads/writes following
13 * SDRAM reads (rev A0, B0, B1)
14 *
15 * We ignore rev. A0 and B0 devices; I don't think they're worth supporting.
16 *
17 * The SDRAM type can be passed on the command line as cpu_sa1110.sdram=type
18 */
19#include <linux/cpufreq.h>
20#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/io.h>
23#include <linux/kernel.h>
24#include <linux/moduleparam.h>
25#include <linux/types.h>
26
27#include <asm/cputype.h>
28#include <asm/mach-types.h>
29
30#include <mach/hardware.h>
31
32#include "generic.h"
33
34#undef DEBUG
35
36struct sdram_params {
37 const char name[20];
38 u_char rows; /* bits */
39 u_char cas_latency; /* cycles */
40 u_char tck; /* clock cycle time (ns) */
41 u_char trcd; /* activate to r/w (ns) */
42 u_char trp; /* precharge to activate (ns) */
43 u_char twr; /* write recovery time (ns) */
44 u_short refresh; /* refresh time for array (us) */
45};
46
47struct sdram_info {
48 u_int mdcnfg;
49 u_int mdrefr;
50 u_int mdcas[3];
51};
52
53static struct sdram_params sdram_tbl[] __initdata = {
54 { /* Toshiba TC59SM716 CL2 */
55 .name = "TC59SM716-CL2",
56 .rows = 12,
57 .tck = 10,
58 .trcd = 20,
59 .trp = 20,
60 .twr = 10,
61 .refresh = 64000,
62 .cas_latency = 2,
63 }, { /* Toshiba TC59SM716 CL3 */
64 .name = "TC59SM716-CL3",
65 .rows = 12,
66 .tck = 8,
67 .trcd = 20,
68 .trp = 20,
69 .twr = 8,
70 .refresh = 64000,
71 .cas_latency = 3,
72 }, { /* Samsung K4S641632D TC75 */
73 .name = "K4S641632D",
74 .rows = 14,
75 .tck = 9,
76 .trcd = 27,
77 .trp = 20,
78 .twr = 9,
79 .refresh = 64000,
80 .cas_latency = 3,
81 }, { /* Samsung K4S281632B-1H */
82 .name = "K4S281632B-1H",
83 .rows = 12,
84 .tck = 10,
85 .trp = 20,
86 .twr = 10,
87 .refresh = 64000,
88 .cas_latency = 3,
89 }, { /* Samsung KM416S4030CT */
90 .name = "KM416S4030CT",
91 .rows = 13,
92 .tck = 8,
93 .trcd = 24, /* 3 CLKs */
94 .trp = 24, /* 3 CLKs */
95 .twr = 16, /* Trdl: 2 CLKs */
96 .refresh = 64000,
97 .cas_latency = 3,
98 }, { /* Winbond W982516AH75L CL3 */
99 .name = "W982516AH75L",
100 .rows = 16,
101 .tck = 8,
102 .trcd = 20,
103 .trp = 20,
104 .twr = 8,
105 .refresh = 64000,
106 .cas_latency = 3,
107 }, { /* Micron MT48LC8M16A2TG-75 */
108 .name = "MT48LC8M16A2TG-75",
109 .rows = 12,
110 .tck = 8,
111 .trcd = 20,
112 .trp = 20,
113 .twr = 8,
114 .refresh = 64000,
115 .cas_latency = 3,
116 },
117};
118
119static struct sdram_params sdram_params;
120
121/*
122 * Given a period in ns and frequency in khz, calculate the number of
123 * cycles of frequency in period. Note that we round up to the next
124 * cycle, even if we are only slightly over.
125 */
126static inline u_int ns_to_cycles(u_int ns, u_int khz)
127{
128 return (ns * khz + 999999) / 1000000;
129}
130
131/*
132 * Create the MDCAS register bit pattern.
133 */
134static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd)
135{
136 u_int shift;
137
138 rcd = 2 * rcd - 1;
139 shift = delayed + 1 + rcd;
140
141 mdcas[0] = (1 << rcd) - 1;
142 mdcas[0] |= 0x55555555 << shift;
143 mdcas[1] = mdcas[2] = 0x55555555 << (shift & 1);
144}
145
146static void
147sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz,
148 struct sdram_params *sdram)
149{
150 u_int mem_khz, sd_khz, trp, twr;
151
152 mem_khz = cpu_khz / 2;
153 sd_khz = mem_khz;
154
155 /*
156 * If SDCLK would invalidate the SDRAM timings,
157 * run SDCLK at half speed.
158 *
159 * CPU steppings prior to B2 must either run the memory at
160 * half speed or use delayed read latching (errata 13).
161 */
162 if ((ns_to_cycles(sdram->tck, sd_khz) > 1) ||
163 (CPU_REVISION < CPU_SA1110_B2 && sd_khz < 62000))
164 sd_khz /= 2;
165
166 sd->mdcnfg = MDCNFG & 0x007f007f;
167
168 twr = ns_to_cycles(sdram->twr, mem_khz);
169
170 /* trp should always be >1 */
171 trp = ns_to_cycles(sdram->trp, mem_khz) - 1;
172 if (trp < 1)
173 trp = 1;
174
175 sd->mdcnfg |= trp << 8;
176 sd->mdcnfg |= trp << 24;
177 sd->mdcnfg |= sdram->cas_latency << 12;
178 sd->mdcnfg |= sdram->cas_latency << 28;
179 sd->mdcnfg |= twr << 14;
180 sd->mdcnfg |= twr << 30;
181
182 sd->mdrefr = MDREFR & 0xffbffff0;
183 sd->mdrefr |= 7;
184
185 if (sd_khz != mem_khz)
186 sd->mdrefr |= MDREFR_K1DB2;
187
188 /* initial number of '1's in MDCAS + 1 */
189 set_mdcas(sd->mdcas, sd_khz >= 62000,
190 ns_to_cycles(sdram->trcd, mem_khz));
191
192#ifdef DEBUG
193 printk(KERN_DEBUG "MDCNFG: %08x MDREFR: %08x MDCAS0: %08x MDCAS1: %08x MDCAS2: %08x\n",
194 sd->mdcnfg, sd->mdrefr, sd->mdcas[0], sd->mdcas[1],
195 sd->mdcas[2]);
196#endif
197}
198
199/*
200 * Set the SDRAM refresh rate.
201 */
202static inline void sdram_set_refresh(u_int dri)
203{
204 MDREFR = (MDREFR & 0xffff000f) | (dri << 4);
205 (void) MDREFR;
206}
207
208/*
209 * Update the refresh period. We do this such that we always refresh
210 * the SDRAMs within their permissible period. The refresh period is
211 * always a multiple of the memory clock (fixed at cpu_clock / 2).
212 *
213 * FIXME: we don't currently take account of burst accesses here,
214 * but neither do Intels DM nor Angel.
215 */
216static void
217sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
218{
219 u_int ns_row = (sdram->refresh * 1000) >> sdram->rows;
220 u_int dri = ns_to_cycles(ns_row, cpu_khz / 2) / 32;
221
222#ifdef DEBUG
223 mdelay(250);
224 printk(KERN_DEBUG "new dri value = %d\n", dri);
225#endif
226
227 sdram_set_refresh(dri);
228}
229
230/*
231 * Ok, set the CPU frequency.
232 */
233static int sa1110_target(struct cpufreq_policy *policy,
234 unsigned int target_freq,
235 unsigned int relation)
236{
237 struct sdram_params *sdram = &sdram_params;
238 struct cpufreq_freqs freqs;
239 struct sdram_info sd;
240 unsigned long flags;
241 unsigned int ppcr, unused;
242
243 switch (relation) {
244 case CPUFREQ_RELATION_L:
245 ppcr = sa11x0_freq_to_ppcr(target_freq);
246 if (sa11x0_ppcr_to_freq(ppcr) > policy->max)
247 ppcr--;
248 break;
249 case CPUFREQ_RELATION_H:
250 ppcr = sa11x0_freq_to_ppcr(target_freq);
251 if (ppcr && (sa11x0_ppcr_to_freq(ppcr) > target_freq) &&
252 (sa11x0_ppcr_to_freq(ppcr-1) >= policy->min))
253 ppcr--;
254 break;
255 default:
256 return -EINVAL;
257 }
258
259 freqs.old = sa11x0_getspeed(0);
260 freqs.new = sa11x0_ppcr_to_freq(ppcr);
261 freqs.cpu = 0;
262
263 sdram_calculate_timing(&sd, freqs.new, sdram);
264
265#if 0
266 /*
267 * These values are wrong according to the SA1110 documentation
268 * and errata, but they seem to work. Need to get a storage
269 * scope on to the SDRAM signals to work out why.
270 */
271 if (policy->max < 147500) {
272 sd.mdrefr |= MDREFR_K1DB2;
273 sd.mdcas[0] = 0xaaaaaa7f;
274 } else {
275 sd.mdrefr &= ~MDREFR_K1DB2;
276 sd.mdcas[0] = 0xaaaaaa9f;
277 }
278 sd.mdcas[1] = 0xaaaaaaaa;
279 sd.mdcas[2] = 0xaaaaaaaa;
280#endif
281
282 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
283
284 /*
285 * The clock could be going away for some time. Set the SDRAMs
286 * to refresh rapidly (every 64 memory clock cycles). To get
287 * through the whole array, we need to wait 262144 mclk cycles.
288 * We wait 20ms to be safe.
289 */
290 sdram_set_refresh(2);
291 if (!irqs_disabled())
292 msleep(20);
293 else
294 mdelay(20);
295
296 /*
297 * Reprogram the DRAM timings with interrupts disabled, and
298 * ensure that we are doing this within a complete cache line.
299 * This means that we won't access SDRAM for the duration of
300 * the programming.
301 */
302 local_irq_save(flags);
303 asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
304 udelay(10);
305 __asm__ __volatile__("\n\
306 b 2f \n\
307 .align 5 \n\
3081: str %3, [%1, #0] @ MDCNFG \n\
309 str %4, [%1, #28] @ MDREFR \n\
310 str %5, [%1, #4] @ MDCAS0 \n\
311 str %6, [%1, #8] @ MDCAS1 \n\
312 str %7, [%1, #12] @ MDCAS2 \n\
313 str %8, [%2, #0] @ PPCR \n\
314 ldr %0, [%1, #0] \n\
315 b 3f \n\
3162: b 1b \n\
3173: nop \n\
318 nop"
319 : "=&r" (unused)
320 : "r" (&MDCNFG), "r" (&PPCR), "0" (sd.mdcnfg),
321 "r" (sd.mdrefr), "r" (sd.mdcas[0]),
322 "r" (sd.mdcas[1]), "r" (sd.mdcas[2]), "r" (ppcr));
323 local_irq_restore(flags);
324
325 /*
326 * Now, return the SDRAM refresh back to normal.
327 */
328 sdram_update_refresh(freqs.new, sdram);
329
330 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
331
332 return 0;
333}
334
335static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
336{
337 if (policy->cpu != 0)
338 return -EINVAL;
339 policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
340 policy->cpuinfo.min_freq = 59000;
341 policy->cpuinfo.max_freq = 287000;
342 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
343 return 0;
344}
345
346/* sa1110_driver needs __refdata because it must remain after init registers
347 * it with cpufreq_register_driver() */
348static struct cpufreq_driver sa1110_driver __refdata = {
349 .flags = CPUFREQ_STICKY,
350 .verify = sa11x0_verify_speed,
351 .target = sa1110_target,
352 .get = sa11x0_getspeed,
353 .init = sa1110_cpu_init,
354 .name = "sa1110",
355};
356
357static struct sdram_params *sa1110_find_sdram(const char *name)
358{
359 struct sdram_params *sdram;
360
361 for (sdram = sdram_tbl; sdram < sdram_tbl + ARRAY_SIZE(sdram_tbl);
362 sdram++)
363 if (strcmp(name, sdram->name) == 0)
364 return sdram;
365
366 return NULL;
367}
368
369static char sdram_name[16];
370
371static int __init sa1110_clk_init(void)
372{
373 struct sdram_params *sdram;
374 const char *name = sdram_name;
375
376 if (!cpu_is_sa1110())
377 return -ENODEV;
378
379 if (!name[0]) {
380 if (machine_is_assabet())
381 name = "TC59SM716-CL3";
382 if (machine_is_pt_system3())
383 name = "K4S641632D";
384 if (machine_is_h3100())
385 name = "KM416S4030CT";
386 if (machine_is_jornada720())
387 name = "K4S281632B-1H";
388 if (machine_is_nanoengine())
389 name = "MT48LC8M16A2TG-75";
390 }
391
392 sdram = sa1110_find_sdram(name);
393 if (sdram) {
394 printk(KERN_DEBUG "SDRAM: tck: %d trcd: %d trp: %d"
395 " twr: %d refresh: %d cas_latency: %d\n",
396 sdram->tck, sdram->trcd, sdram->trp,
397 sdram->twr, sdram->refresh, sdram->cas_latency);
398
399 memcpy(&sdram_params, sdram, sizeof(sdram_params));
400
401 return cpufreq_register_driver(&sa1110_driver);
402 }
403
404 return 0;
405}
406
407module_param_string(sdram, sdram_name, sizeof(sdram_name), 0);
408arch_initcall(sa1110_clk_init);
diff --git a/arch/arm/mach-sa1100/include/mach/generic.h b/arch/arm/mach-sa1100/include/mach/generic.h
new file mode 100644
index 000000000000..665542e0c9e2
--- /dev/null
+++ b/arch/arm/mach-sa1100/include/mach/generic.h
@@ -0,0 +1 @@
#include "../../generic.h"
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index b63dec848195..153555724988 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -10,6 +10,7 @@
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/serial_8250.h> 11#include <linux/serial_8250.h>
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/cpu.h>
13 14
14#include <asm/setup.h> 15#include <asm/setup.h>
15#include <asm/mach-types.h> 16#include <asm/mach-types.h>
@@ -130,7 +131,7 @@ static void __init shark_timer_init(void)
130 131
131static void shark_init_early(void) 132static void shark_init_early(void)
132{ 133{
133 disable_hlt(); 134 cpu_idle_poll_ctrl(true);
134} 135}
135 136
136MACHINE_START(SHARK, "Shark") 137MACHINE_START(SHARK, "Shark")
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index f2ec0777cfbe..ff8b7ba9b93c 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -169,7 +169,7 @@ static int usbhsf_get_id(struct platform_device *pdev)
169 return USBHS_GADGET; 169 return USBHS_GADGET;
170} 170}
171 171
172static void usbhsf_power_ctrl(struct platform_device *pdev, 172static int usbhsf_power_ctrl(struct platform_device *pdev,
173 void __iomem *base, int enable) 173 void __iomem *base, int enable)
174{ 174{
175 struct usbhsf_private *priv = usbhsf_get_priv(pdev); 175 struct usbhsf_private *priv = usbhsf_get_priv(pdev);
@@ -223,6 +223,8 @@ static void usbhsf_power_ctrl(struct platform_device *pdev,
223 clk_disable(priv->pci); /* usb work around */ 223 clk_disable(priv->pci); /* usb work around */
224 clk_disable(priv->usb24); /* usb work around */ 224 clk_disable(priv->usb24); /* usb work around */
225 } 225 }
226
227 return 0;
226} 228}
227 229
228static int usbhsf_get_vbus(struct platform_device *pdev) 230static int usbhsf_get_vbus(struct platform_device *pdev)
@@ -239,7 +241,7 @@ static irqreturn_t usbhsf_interrupt(int irq, void *data)
239 return IRQ_HANDLED; 241 return IRQ_HANDLED;
240} 242}
241 243
242static void usbhsf_hardware_exit(struct platform_device *pdev) 244static int usbhsf_hardware_exit(struct platform_device *pdev)
243{ 245{
244 struct usbhsf_private *priv = usbhsf_get_priv(pdev); 246 struct usbhsf_private *priv = usbhsf_get_priv(pdev);
245 247
@@ -264,6 +266,8 @@ static void usbhsf_hardware_exit(struct platform_device *pdev)
264 priv->usbh_base = NULL; 266 priv->usbh_base = NULL;
265 267
266 free_irq(IRQ7, pdev); 268 free_irq(IRQ7, pdev);
269
270 return 0;
267} 271}
268 272
269static int usbhsf_hardware_init(struct platform_device *pdev) 273static int usbhsf_hardware_init(struct platform_device *pdev)
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index 7f3a6b7e7b7c..a385f570bbfc 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -155,12 +155,14 @@ static int usbhs_get_vbus(struct platform_device *pdev)
155 return !((1 << 7) & __raw_readw(priv->cr2)); 155 return !((1 << 7) & __raw_readw(priv->cr2));
156} 156}
157 157
158static void usbhs_phy_reset(struct platform_device *pdev) 158static int usbhs_phy_reset(struct platform_device *pdev)
159{ 159{
160 struct usbhs_private *priv = usbhs_get_priv(pdev); 160 struct usbhs_private *priv = usbhs_get_priv(pdev);
161 161
162 /* init phy */ 162 /* init phy */
163 __raw_writew(0x8a0a, priv->cr2); 163 __raw_writew(0x8a0a, priv->cr2);
164
165 return 0;
164} 166}
165 167
166static int usbhs_get_id(struct platform_device *pdev) 168static int usbhs_get_id(struct platform_device *pdev)
@@ -202,7 +204,7 @@ static int usbhs_hardware_init(struct platform_device *pdev)
202 return 0; 204 return 0;
203} 205}
204 206
205static void usbhs_hardware_exit(struct platform_device *pdev) 207static int usbhs_hardware_exit(struct platform_device *pdev)
206{ 208{
207 struct usbhs_private *priv = usbhs_get_priv(pdev); 209 struct usbhs_private *priv = usbhs_get_priv(pdev);
208 210
@@ -210,6 +212,8 @@ static void usbhs_hardware_exit(struct platform_device *pdev)
210 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy); 212 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy);
211 213
212 free_irq(IRQ15, pdev); 214 free_irq(IRQ15, pdev);
215
216 return 0;
213} 217}
214 218
215static u32 usbhs_pipe_cfg[] = { 219static u32 usbhs_pipe_cfg[] = {
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index db968a585ff0..979237c18dad 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -596,12 +596,14 @@ static int usbhs_get_vbus(struct platform_device *pdev)
596 return usbhs_is_connected(usbhs_get_priv(pdev)); 596 return usbhs_is_connected(usbhs_get_priv(pdev));
597} 597}
598 598
599static void usbhs_phy_reset(struct platform_device *pdev) 599static int usbhs_phy_reset(struct platform_device *pdev)
600{ 600{
601 struct usbhs_private *priv = usbhs_get_priv(pdev); 601 struct usbhs_private *priv = usbhs_get_priv(pdev);
602 602
603 /* init phy */ 603 /* init phy */
604 __raw_writew(0x8a0a, priv->usbcrcaddr); 604 __raw_writew(0x8a0a, priv->usbcrcaddr);
605
606 return 0;
605} 607}
606 608
607static int usbhs0_get_id(struct platform_device *pdev) 609static int usbhs0_get_id(struct platform_device *pdev)
@@ -628,11 +630,13 @@ static int usbhs0_hardware_init(struct platform_device *pdev)
628 return 0; 630 return 0;
629} 631}
630 632
631static void usbhs0_hardware_exit(struct platform_device *pdev) 633static int usbhs0_hardware_exit(struct platform_device *pdev)
632{ 634{
633 struct usbhs_private *priv = usbhs_get_priv(pdev); 635 struct usbhs_private *priv = usbhs_get_priv(pdev);
634 636
635 cancel_delayed_work_sync(&priv->work); 637 cancel_delayed_work_sync(&priv->work);
638
639 return 0;
636} 640}
637 641
638static struct usbhs_private usbhs0_private = { 642static struct usbhs_private usbhs0_private = {
@@ -735,7 +739,7 @@ static int usbhs1_hardware_init(struct platform_device *pdev)
735 return 0; 739 return 0;
736} 740}
737 741
738static void usbhs1_hardware_exit(struct platform_device *pdev) 742static int usbhs1_hardware_exit(struct platform_device *pdev)
739{ 743{
740 struct usbhs_private *priv = usbhs_get_priv(pdev); 744 struct usbhs_private *priv = usbhs_get_priv(pdev);
741 745
@@ -743,6 +747,8 @@ static void usbhs1_hardware_exit(struct platform_device *pdev)
743 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr); 747 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
744 748
745 free_irq(IRQ8, pdev); 749 free_irq(IRQ8, pdev);
750
751 return 0;
746} 752}
747 753
748static int usbhs1_get_id(struct platform_device *pdev) 754static int usbhs1_get_id(struct platform_device *pdev)
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 9e050268cde4..0afeb5c7061c 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -16,39 +16,22 @@
16#include <asm/cpuidle.h> 16#include <asm/cpuidle.h>
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19int shmobile_enter_wfi(struct cpuidle_device *dev, struct cpuidle_driver *drv,
20 int index)
21{
22 cpu_do_idle();
23 return 0;
24}
25
26static struct cpuidle_device shmobile_cpuidle_dev;
27static struct cpuidle_driver shmobile_cpuidle_default_driver = { 19static struct cpuidle_driver shmobile_cpuidle_default_driver = {
28 .name = "shmobile_cpuidle", 20 .name = "shmobile_cpuidle",
29 .owner = THIS_MODULE, 21 .owner = THIS_MODULE,
30 .en_core_tk_irqen = 1,
31 .states[0] = ARM_CPUIDLE_WFI_STATE, 22 .states[0] = ARM_CPUIDLE_WFI_STATE,
32 .states[0].enter = shmobile_enter_wfi,
33 .safe_state_index = 0, /* C1 */ 23 .safe_state_index = 0, /* C1 */
34 .state_count = 1, 24 .state_count = 1,
35}; 25};
36 26
37static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver; 27static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver;
38 28
39void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv) 29void __init shmobile_cpuidle_set_driver(struct cpuidle_driver *drv)
40{ 30{
41 cpuidle_drv = drv; 31 cpuidle_drv = drv;
42} 32}
43 33
44int shmobile_cpuidle_init(void) 34int __init shmobile_cpuidle_init(void)
45{ 35{
46 struct cpuidle_device *dev = &shmobile_cpuidle_dev; 36 return cpuidle_register(cpuidle_drv, NULL);
47
48 cpuidle_register_driver(cpuidle_drv);
49
50 dev->state_count = cpuidle_drv->state_count;
51 cpuidle_register_device(dev);
52
53 return 0;
54} 37}
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index e48606d8a2be..362f9b2d2c02 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -13,9 +13,6 @@ extern int shmobile_clk_init(void);
13extern void shmobile_handle_irq_intc(struct pt_regs *); 13extern void shmobile_handle_irq_intc(struct pt_regs *);
14extern struct platform_suspend_ops shmobile_suspend_ops; 14extern struct platform_suspend_ops shmobile_suspend_ops;
15struct cpuidle_driver; 15struct cpuidle_driver;
16struct cpuidle_device;
17extern int shmobile_enter_wfi(struct cpuidle_device *dev,
18 struct cpuidle_driver *drv, int index);
19extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv); 16extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv);
20 17
21extern void sh7372_init_irq(void); 18extern void sh7372_init_irq(void);
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index a0826a48dd08..dec9293bb90d 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -410,11 +410,9 @@ static int sh7372_enter_a4s(struct cpuidle_device *dev,
410static struct cpuidle_driver sh7372_cpuidle_driver = { 410static struct cpuidle_driver sh7372_cpuidle_driver = {
411 .name = "sh7372_cpuidle", 411 .name = "sh7372_cpuidle",
412 .owner = THIS_MODULE, 412 .owner = THIS_MODULE,
413 .en_core_tk_irqen = 1,
414 .state_count = 5, 413 .state_count = 5,
415 .safe_state_index = 0, /* C1 */ 414 .safe_state_index = 0, /* C1 */
416 .states[0] = ARM_CPUIDLE_WFI_STATE, 415 .states[0] = ARM_CPUIDLE_WFI_STATE,
417 .states[0].enter = shmobile_enter_wfi,
418 .states[1] = { 416 .states[1] = {
419 .name = "C2", 417 .name = "C2",
420 .desc = "Core Standby Mode", 418 .desc = "Core Standby Mode",
@@ -450,12 +448,12 @@ static struct cpuidle_driver sh7372_cpuidle_driver = {
450 }, 448 },
451}; 449};
452 450
453static void sh7372_cpuidle_init(void) 451static void __init sh7372_cpuidle_init(void)
454{ 452{
455 shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver); 453 shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver);
456} 454}
457#else 455#else
458static void sh7372_cpuidle_init(void) {} 456static void __init sh7372_cpuidle_init(void) {}
459#endif 457#endif
460 458
461#ifdef CONFIG_SUSPEND 459#ifdef CONFIG_SUSPEND
diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c
index 47d83f7a70b6..5d92b5dd486b 100644
--- a/arch/arm/mach-shmobile/suspend.c
+++ b/arch/arm/mach-shmobile/suspend.c
@@ -12,6 +12,8 @@
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/cpu.h>
16
15#include <asm/io.h> 17#include <asm/io.h>
16#include <asm/system_misc.h> 18#include <asm/system_misc.h>
17 19
@@ -23,13 +25,13 @@ static int shmobile_suspend_default_enter(suspend_state_t suspend_state)
23 25
24static int shmobile_suspend_begin(suspend_state_t state) 26static int shmobile_suspend_begin(suspend_state_t state)
25{ 27{
26 disable_hlt(); 28 cpu_idle_poll_ctrl(true);
27 return 0; 29 return 0;
28} 30}
29 31
30static void shmobile_suspend_end(void) 32static void shmobile_suspend_end(void)
31{ 33{
32 enable_hlt(); 34 cpu_idle_poll_ctrl(false);
33} 35}
34 36
35struct platform_suspend_ops shmobile_suspend_ops = { 37struct platform_suspend_ops shmobile_suspend_ops = {
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index d1c4893894ce..dbc653ea851c 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -18,8 +18,8 @@ config ARCH_TEGRA_2x_SOC
18 select PL310_ERRATA_727915 if CACHE_L2X0 18 select PL310_ERRATA_727915 if CACHE_L2X0
19 select PL310_ERRATA_769419 if CACHE_L2X0 19 select PL310_ERRATA_769419 if CACHE_L2X0
20 select USB_ARCH_HAS_EHCI if USB_SUPPORT 20 select USB_ARCH_HAS_EHCI if USB_SUPPORT
21 select USB_ULPI if USB 21 select USB_ULPI if USB_PHY
22 select USB_ULPI_VIEWPORT if USB_SUPPORT 22 select USB_ULPI_VIEWPORT if USB_PHY
23 help 23 help
24 Support for NVIDIA Tegra AP20 and T20 processors, based on the 24 Support for NVIDIA Tegra AP20 and T20 processors, based on the
25 ARM CortexA9MP CPU and the ARM PL310 L2 cache controller 25 ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
@@ -37,8 +37,8 @@ config ARCH_TEGRA_3x_SOC
37 select PINCTRL_TEGRA30 37 select PINCTRL_TEGRA30
38 select PL310_ERRATA_769419 if CACHE_L2X0 38 select PL310_ERRATA_769419 if CACHE_L2X0
39 select USB_ARCH_HAS_EHCI if USB_SUPPORT 39 select USB_ARCH_HAS_EHCI if USB_SUPPORT
40 select USB_ULPI if USB 40 select USB_ULPI if USB_PHY
41 select USB_ULPI_VIEWPORT if USB_SUPPORT 41 select USB_ULPI_VIEWPORT if USB_PHY
42 help 42 help
43 Support for NVIDIA Tegra T30 processor family, based on the 43 Support for NVIDIA Tegra T30 processor family, based on the
44 ARM CortexA9MP CPU and the ARM PL310 L2 cache controller 44 ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index f6b46ae2b7f8..09b578f9eb84 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += cpuidle-tegra30.o
24endif 24endif
25obj-$(CONFIG_SMP) += platsmp.o headsmp.o 25obj-$(CONFIG_SMP) += platsmp.o headsmp.o
26obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 26obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
27obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o
28obj-$(CONFIG_TEGRA_PCI) += pcie.o 27obj-$(CONFIG_TEGRA_PCI) += pcie.o
29 28
30obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += board-dt-tegra20.o 29obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += board-dt-tegra20.o
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
deleted file mode 100644
index e3d6e15ff188..000000000000
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ /dev/null
@@ -1,293 +0,0 @@
1/*
2 * arch/arm/mach-tegra/cpu-tegra.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * Author:
7 * Colin Cross <ccross@google.com>
8 * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/sched.h>
25#include <linux/cpufreq.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/err.h>
29#include <linux/clk.h>
30#include <linux/io.h>
31#include <linux/suspend.h>
32
33/* Frequency table index must be sequential starting at 0 */
34static struct cpufreq_frequency_table freq_table[] = {
35 { 0, 216000 },
36 { 1, 312000 },
37 { 2, 456000 },
38 { 3, 608000 },
39 { 4, 760000 },
40 { 5, 816000 },
41 { 6, 912000 },
42 { 7, 1000000 },
43 { 8, CPUFREQ_TABLE_END },
44};
45
46#define NUM_CPUS 2
47
48static struct clk *cpu_clk;
49static struct clk *pll_x_clk;
50static struct clk *pll_p_clk;
51static struct clk *emc_clk;
52
53static unsigned long target_cpu_speed[NUM_CPUS];
54static DEFINE_MUTEX(tegra_cpu_lock);
55static bool is_suspended;
56
57static int tegra_verify_speed(struct cpufreq_policy *policy)
58{
59 return cpufreq_frequency_table_verify(policy, freq_table);
60}
61
62static unsigned int tegra_getspeed(unsigned int cpu)
63{
64 unsigned long rate;
65
66 if (cpu >= NUM_CPUS)
67 return 0;
68
69 rate = clk_get_rate(cpu_clk) / 1000;
70 return rate;
71}
72
73static int tegra_cpu_clk_set_rate(unsigned long rate)
74{
75 int ret;
76
77 /*
78 * Take an extra reference to the main pll so it doesn't turn
79 * off when we move the cpu off of it
80 */
81 clk_prepare_enable(pll_x_clk);
82
83 ret = clk_set_parent(cpu_clk, pll_p_clk);
84 if (ret) {
85 pr_err("Failed to switch cpu to clock pll_p\n");
86 goto out;
87 }
88
89 if (rate == clk_get_rate(pll_p_clk))
90 goto out;
91
92 ret = clk_set_rate(pll_x_clk, rate);
93 if (ret) {
94 pr_err("Failed to change pll_x to %lu\n", rate);
95 goto out;
96 }
97
98 ret = clk_set_parent(cpu_clk, pll_x_clk);
99 if (ret) {
100 pr_err("Failed to switch cpu to clock pll_x\n");
101 goto out;
102 }
103
104out:
105 clk_disable_unprepare(pll_x_clk);
106 return ret;
107}
108
109static int tegra_update_cpu_speed(unsigned long rate)
110{
111 int ret = 0;
112 struct cpufreq_freqs freqs;
113
114 freqs.old = tegra_getspeed(0);
115 freqs.new = rate;
116
117 if (freqs.old == freqs.new)
118 return ret;
119
120 /*
121 * Vote on memory bus frequency based on cpu frequency
122 * This sets the minimum frequency, display or avp may request higher
123 */
124 if (rate >= 816000)
125 clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
126 else if (rate >= 456000)
127 clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */
128 else
129 clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
130
131 for_each_online_cpu(freqs.cpu)
132 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
133
134#ifdef CONFIG_CPU_FREQ_DEBUG
135 printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
136 freqs.old, freqs.new);
137#endif
138
139 ret = tegra_cpu_clk_set_rate(freqs.new * 1000);
140 if (ret) {
141 pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
142 freqs.new);
143 return ret;
144 }
145
146 for_each_online_cpu(freqs.cpu)
147 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
148
149 return 0;
150}
151
152static unsigned long tegra_cpu_highest_speed(void)
153{
154 unsigned long rate = 0;
155 int i;
156
157 for_each_online_cpu(i)
158 rate = max(rate, target_cpu_speed[i]);
159 return rate;
160}
161
162static int tegra_target(struct cpufreq_policy *policy,
163 unsigned int target_freq,
164 unsigned int relation)
165{
166 unsigned int idx;
167 unsigned int freq;
168 int ret = 0;
169
170 mutex_lock(&tegra_cpu_lock);
171
172 if (is_suspended) {
173 ret = -EBUSY;
174 goto out;
175 }
176
177 cpufreq_frequency_table_target(policy, freq_table, target_freq,
178 relation, &idx);
179
180 freq = freq_table[idx].frequency;
181
182 target_cpu_speed[policy->cpu] = freq;
183
184 ret = tegra_update_cpu_speed(tegra_cpu_highest_speed());
185
186out:
187 mutex_unlock(&tegra_cpu_lock);
188 return ret;
189}
190
191static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
192 void *dummy)
193{
194 mutex_lock(&tegra_cpu_lock);
195 if (event == PM_SUSPEND_PREPARE) {
196 is_suspended = true;
197 pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
198 freq_table[0].frequency);
199 tegra_update_cpu_speed(freq_table[0].frequency);
200 } else if (event == PM_POST_SUSPEND) {
201 is_suspended = false;
202 }
203 mutex_unlock(&tegra_cpu_lock);
204
205 return NOTIFY_OK;
206}
207
208static struct notifier_block tegra_cpu_pm_notifier = {
209 .notifier_call = tegra_pm_notify,
210};
211
212static int tegra_cpu_init(struct cpufreq_policy *policy)
213{
214 if (policy->cpu >= NUM_CPUS)
215 return -EINVAL;
216
217 clk_prepare_enable(emc_clk);
218 clk_prepare_enable(cpu_clk);
219
220 cpufreq_frequency_table_cpuinfo(policy, freq_table);
221 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
222 policy->cur = tegra_getspeed(policy->cpu);
223 target_cpu_speed[policy->cpu] = policy->cur;
224
225 /* FIXME: what's the actual transition time? */
226 policy->cpuinfo.transition_latency = 300 * 1000;
227
228 cpumask_copy(policy->cpus, cpu_possible_mask);
229
230 if (policy->cpu == 0)
231 register_pm_notifier(&tegra_cpu_pm_notifier);
232
233 return 0;
234}
235
236static int tegra_cpu_exit(struct cpufreq_policy *policy)
237{
238 cpufreq_frequency_table_cpuinfo(policy, freq_table);
239 clk_disable_unprepare(emc_clk);
240 return 0;
241}
242
243static struct freq_attr *tegra_cpufreq_attr[] = {
244 &cpufreq_freq_attr_scaling_available_freqs,
245 NULL,
246};
247
248static struct cpufreq_driver tegra_cpufreq_driver = {
249 .verify = tegra_verify_speed,
250 .target = tegra_target,
251 .get = tegra_getspeed,
252 .init = tegra_cpu_init,
253 .exit = tegra_cpu_exit,
254 .name = "tegra",
255 .attr = tegra_cpufreq_attr,
256};
257
258static int __init tegra_cpufreq_init(void)
259{
260 cpu_clk = clk_get_sys(NULL, "cpu");
261 if (IS_ERR(cpu_clk))
262 return PTR_ERR(cpu_clk);
263
264 pll_x_clk = clk_get_sys(NULL, "pll_x");
265 if (IS_ERR(pll_x_clk))
266 return PTR_ERR(pll_x_clk);
267
268 pll_p_clk = clk_get_sys(NULL, "pll_p_cclk");
269 if (IS_ERR(pll_p_clk))
270 return PTR_ERR(pll_p_clk);
271
272 emc_clk = clk_get_sys("cpu", "emc");
273 if (IS_ERR(emc_clk)) {
274 clk_put(cpu_clk);
275 return PTR_ERR(emc_clk);
276 }
277
278 return cpufreq_register_driver(&tegra_cpufreq_driver);
279}
280
281static void __exit tegra_cpufreq_exit(void)
282{
283 cpufreq_unregister_driver(&tegra_cpufreq_driver);
284 clk_put(emc_clk);
285 clk_put(cpu_clk);
286}
287
288
289MODULE_AUTHOR("Colin Cross <ccross@android.com>");
290MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
291MODULE_LICENSE("GPL");
292module_init(tegra_cpufreq_init);
293module_exit(tegra_cpufreq_exit);
diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c
index 0f4e8c483b34..1d1c6023f4a2 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra114.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra114.c
@@ -23,39 +23,13 @@
23static struct cpuidle_driver tegra_idle_driver = { 23static struct cpuidle_driver tegra_idle_driver = {
24 .name = "tegra_idle", 24 .name = "tegra_idle",
25 .owner = THIS_MODULE, 25 .owner = THIS_MODULE,
26 .en_core_tk_irqen = 1,
27 .state_count = 1, 26 .state_count = 1,
28 .states = { 27 .states = {
29 [0] = ARM_CPUIDLE_WFI_STATE_PWR(600), 28 [0] = ARM_CPUIDLE_WFI_STATE_PWR(600),
30 }, 29 },
31}; 30};
32 31
33static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device);
34
35int __init tegra114_cpuidle_init(void) 32int __init tegra114_cpuidle_init(void)
36{ 33{
37 int ret; 34 return cpuidle_register(&tegra_idle_driver, NULL);
38 unsigned int cpu;
39 struct cpuidle_device *dev;
40 struct cpuidle_driver *drv = &tegra_idle_driver;
41
42 ret = cpuidle_register_driver(&tegra_idle_driver);
43 if (ret) {
44 pr_err("CPUidle driver registration failed\n");
45 return ret;
46 }
47
48 for_each_possible_cpu(cpu) {
49 dev = &per_cpu(tegra_idle_device, cpu);
50 dev->cpu = cpu;
51
52 dev->state_count = drv->state_count;
53 ret = cpuidle_register_device(dev);
54 if (ret) {
55 pr_err("CPU%u: CPUidle device registration failed\n",
56 cpu);
57 return ret;
58 }
59 }
60 return 0;
61} 35}
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 825ced4f7a40..590ec25855dd 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -43,32 +43,33 @@ static atomic_t abort_barrier;
43static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, 43static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
44 struct cpuidle_driver *drv, 44 struct cpuidle_driver *drv,
45 int index); 45 int index);
46#define TEGRA20_MAX_STATES 2
47#else
48#define TEGRA20_MAX_STATES 1
46#endif 49#endif
47 50
48static struct cpuidle_state tegra_idle_states[] = {
49 [0] = ARM_CPUIDLE_WFI_STATE_PWR(600),
50#ifdef CONFIG_PM_SLEEP
51 [1] = {
52 .enter = tegra20_idle_lp2_coupled,
53 .exit_latency = 5000,
54 .target_residency = 10000,
55 .power_usage = 0,
56 .flags = CPUIDLE_FLAG_TIME_VALID |
57 CPUIDLE_FLAG_COUPLED,
58 .name = "powered-down",
59 .desc = "CPU power gated",
60 },
61#endif
62};
63
64static struct cpuidle_driver tegra_idle_driver = { 51static struct cpuidle_driver tegra_idle_driver = {
65 .name = "tegra_idle", 52 .name = "tegra_idle",
66 .owner = THIS_MODULE, 53 .owner = THIS_MODULE,
67 .en_core_tk_irqen = 1, 54 .states = {
55 ARM_CPUIDLE_WFI_STATE_PWR(600),
56#ifdef CONFIG_PM_SLEEP
57 {
58 .enter = tegra20_idle_lp2_coupled,
59 .exit_latency = 5000,
60 .target_residency = 10000,
61 .power_usage = 0,
62 .flags = CPUIDLE_FLAG_TIME_VALID |
63 CPUIDLE_FLAG_COUPLED,
64 .name = "powered-down",
65 .desc = "CPU power gated",
66 },
67#endif
68 },
69 .state_count = TEGRA20_MAX_STATES,
70 .safe_state_index = 0,
68}; 71};
69 72
70static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device);
71
72#ifdef CONFIG_PM_SLEEP 73#ifdef CONFIG_PM_SLEEP
73#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
74static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); 75static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
@@ -217,39 +218,8 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
217 218
218int __init tegra20_cpuidle_init(void) 219int __init tegra20_cpuidle_init(void)
219{ 220{
220 int ret;
221 unsigned int cpu;
222 struct cpuidle_device *dev;
223 struct cpuidle_driver *drv = &tegra_idle_driver;
224
225#ifdef CONFIG_PM_SLEEP 221#ifdef CONFIG_PM_SLEEP
226 tegra_tear_down_cpu = tegra20_tear_down_cpu; 222 tegra_tear_down_cpu = tegra20_tear_down_cpu;
227#endif 223#endif
228 224 return cpuidle_register(&tegra_idle_driver, cpu_possible_mask);
229 drv->state_count = ARRAY_SIZE(tegra_idle_states);
230 memcpy(drv->states, tegra_idle_states,
231 drv->state_count * sizeof(drv->states[0]));
232
233 ret = cpuidle_register_driver(&tegra_idle_driver);
234 if (ret) {
235 pr_err("CPUidle driver registration failed\n");
236 return ret;
237 }
238
239 for_each_possible_cpu(cpu) {
240 dev = &per_cpu(tegra_idle_device, cpu);
241 dev->cpu = cpu;
242#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
243 dev->coupled_cpus = *cpu_possible_mask;
244#endif
245
246 dev->state_count = drv->state_count;
247 ret = cpuidle_register_device(dev);
248 if (ret) {
249 pr_err("CPU%u: CPUidle device registration failed\n",
250 cpu);
251 return ret;
252 }
253 }
254 return 0;
255} 225}
diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c
index 8b50cf4ddd6f..36dc2befa9d8 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra30.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra30.c
@@ -43,7 +43,6 @@ static int tegra30_idle_lp2(struct cpuidle_device *dev,
43static struct cpuidle_driver tegra_idle_driver = { 43static struct cpuidle_driver tegra_idle_driver = {
44 .name = "tegra_idle", 44 .name = "tegra_idle",
45 .owner = THIS_MODULE, 45 .owner = THIS_MODULE,
46 .en_core_tk_irqen = 1,
47#ifdef CONFIG_PM_SLEEP 46#ifdef CONFIG_PM_SLEEP
48 .state_count = 2, 47 .state_count = 2,
49#else 48#else
@@ -65,8 +64,6 @@ static struct cpuidle_driver tegra_idle_driver = {
65 }, 64 },
66}; 65};
67 66
68static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device);
69
70#ifdef CONFIG_PM_SLEEP 67#ifdef CONFIG_PM_SLEEP
71static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev, 68static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev,
72 struct cpuidle_driver *drv, 69 struct cpuidle_driver *drv,
@@ -157,32 +154,8 @@ static int tegra30_idle_lp2(struct cpuidle_device *dev,
157 154
158int __init tegra30_cpuidle_init(void) 155int __init tegra30_cpuidle_init(void)
159{ 156{
160 int ret;
161 unsigned int cpu;
162 struct cpuidle_device *dev;
163 struct cpuidle_driver *drv = &tegra_idle_driver;
164
165#ifdef CONFIG_PM_SLEEP 157#ifdef CONFIG_PM_SLEEP
166 tegra_tear_down_cpu = tegra30_tear_down_cpu; 158 tegra_tear_down_cpu = tegra30_tear_down_cpu;
167#endif 159#endif
168 160 return cpuidle_register(&tegra_idle_driver, NULL);
169 ret = cpuidle_register_driver(&tegra_idle_driver);
170 if (ret) {
171 pr_err("CPUidle driver registration failed\n");
172 return ret;
173 }
174
175 for_each_possible_cpu(cpu) {
176 dev = &per_cpu(tegra_idle_device, cpu);
177 dev->cpu = cpu;
178
179 dev->state_count = drv->state_count;
180 ret = cpuidle_register_device(dev);
181 if (ret) {
182 pr_err("CPU%u: CPUidle device registration failed\n",
183 cpu);
184 return ret;
185 }
186 }
187 return 0;
188} 161}
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 2a17bc506cff..ff3c9f016591 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * Authors: Sundar Iyer <sundar.iyer@stericsson.com> 6 * Authors: Sundar Iyer <sundar.iyer@stericsson.com>
7 * Bengt Jonsson <bengt.g.jonsson@stericsson.com> 7 * Bengt Jonsson <bengt.g.jonsson@stericsson.com>
8 * Daniel Willerud <daniel.willerud@stericsson.com>
8 * 9 *
9 * MOP500 board specific initialization for regulators 10 * MOP500 board specific initialization for regulators
10 */ 11 */
@@ -12,6 +13,7 @@
12#include <linux/regulator/machine.h> 13#include <linux/regulator/machine.h>
13#include <linux/regulator/ab8500.h> 14#include <linux/regulator/ab8500.h>
14#include "board-mop500-regulators.h" 15#include "board-mop500-regulators.h"
16#include "id.h"
15 17
16static struct regulator_consumer_supply gpio_en_3v3_consumers[] = { 18static struct regulator_consumer_supply gpio_en_3v3_consumers[] = {
17 REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), 19 REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
@@ -53,21 +55,37 @@ struct regulator_init_data tps61052_regulator = {
53}; 55};
54 56
55static struct regulator_consumer_supply ab8500_vaux1_consumers[] = { 57static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
56 /* External displays, connector on board 2v5 power supply */ 58 /* Main display, u8500 R3 uib */
57 REGULATOR_SUPPLY("vaux12v5", "mcde.0"), 59 REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
60 /* Main display, u8500 uib and ST uib */
61 REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"),
62 /* Secondary display, ST uib */
63 REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"),
58 /* SFH7741 proximity sensor */ 64 /* SFH7741 proximity sensor */
59 REGULATOR_SUPPLY("vcc", "gpio-keys.0"), 65 REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
60 /* BH1780GLS ambient light sensor */ 66 /* BH1780GLS ambient light sensor */
61 REGULATOR_SUPPLY("vcc", "2-0029"), 67 REGULATOR_SUPPLY("vcc", "2-0029"),
62 /* lsm303dlh accelerometer */ 68 /* lsm303dlh accelerometer */
63 REGULATOR_SUPPLY("vdd", "3-0018"), 69 REGULATOR_SUPPLY("vdd", "2-0018"),
70 /* lsm303dlhc accelerometer */
71 REGULATOR_SUPPLY("vdd", "2-0019"),
64 /* lsm303dlh magnetometer */ 72 /* lsm303dlh magnetometer */
65 REGULATOR_SUPPLY("vdd", "3-001e"), 73 REGULATOR_SUPPLY("vdd", "2-001e"),
66 /* Rohm BU21013 Touchscreen devices */ 74 /* Rohm BU21013 Touchscreen devices */
67 REGULATOR_SUPPLY("avdd", "3-005c"), 75 REGULATOR_SUPPLY("avdd", "3-005c"),
68 REGULATOR_SUPPLY("avdd", "3-005d"), 76 REGULATOR_SUPPLY("avdd", "3-005d"),
69 /* Synaptics RMI4 Touchscreen device */ 77 /* Synaptics RMI4 Touchscreen device */
70 REGULATOR_SUPPLY("vdd", "3-004b"), 78 REGULATOR_SUPPLY("vdd", "3-004b"),
79 /* L3G4200D Gyroscope device */
80 REGULATOR_SUPPLY("vdd", "2-0068"),
81 /* Ambient light sensor device */
82 REGULATOR_SUPPLY("vdd", "3-0029"),
83 /* Pressure sensor device */
84 REGULATOR_SUPPLY("vdd", "2-005c"),
85 /* Cypress TrueTouch Touchscreen device */
86 REGULATOR_SUPPLY("vcpin", "spi8.0"),
87 /* Camera device */
88 REGULATOR_SUPPLY("vaux12v5", "mmio_camera"),
71}; 89};
72 90
73static struct regulator_consumer_supply ab8500_vaux2_consumers[] = { 91static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
@@ -75,18 +93,50 @@ static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
75 REGULATOR_SUPPLY("vmmc", "sdi4"), 93 REGULATOR_SUPPLY("vmmc", "sdi4"),
76 /* AB8500 audio codec */ 94 /* AB8500 audio codec */
77 REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"), 95 REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
96 /* AB8500 accessory detect 1 */
97 REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"),
98 /* AB8500 Tv-out device */
99 REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"),
100 /* AV8100 HDMI device */
101 REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"),
78}; 102};
79 103
80static struct regulator_consumer_supply ab8500_vaux3_consumers[] = { 104static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
105 REGULATOR_SUPPLY("v-SD-STM", "stm"),
81 /* External MMC slot power */ 106 /* External MMC slot power */
82 REGULATOR_SUPPLY("vmmc", "sdi0"), 107 REGULATOR_SUPPLY("vmmc", "sdi0"),
83}; 108};
84 109
110static struct regulator_consumer_supply ab8505_vaux4_consumers[] = {
111};
112
113static struct regulator_consumer_supply ab8505_vaux5_consumers[] = {
114};
115
116static struct regulator_consumer_supply ab8505_vaux6_consumers[] = {
117};
118
119static struct regulator_consumer_supply ab8505_vaux8_consumers[] = {
120 /* AB8500 audio codec device */
121 REGULATOR_SUPPLY("v-aux8", NULL),
122};
123
124static struct regulator_consumer_supply ab8505_vadc_consumers[] = {
125 /* Internal general-purpose ADC */
126 REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
127 /* ADC for charger */
128 REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
129};
130
85static struct regulator_consumer_supply ab8500_vtvout_consumers[] = { 131static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
86 /* TV-out DENC supply */ 132 /* TV-out DENC supply */
87 REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"), 133 REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
88 /* Internal general-purpose ADC */ 134 /* Internal general-purpose ADC */
89 REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"), 135 REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
136 /* ADC for charger */
137 REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
138 /* AB8500 Tv-out device */
139 REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"),
90}; 140};
91 141
92static struct regulator_consumer_supply ab8500_vaud_consumers[] = { 142static struct regulator_consumer_supply ab8500_vaud_consumers[] = {
@@ -114,77 +164,90 @@ static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
114 REGULATOR_SUPPLY("v-intcore", NULL), 164 REGULATOR_SUPPLY("v-intcore", NULL),
115 /* USB Transceiver */ 165 /* USB Transceiver */
116 REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"), 166 REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"),
167 /* Handled by abx500 clk driver */
168 REGULATOR_SUPPLY("v-intcore", "abx500-clk.0"),
169};
170
171static struct regulator_consumer_supply ab8505_usb_consumers[] = {
172 /* HS USB OTG physical interface */
173 REGULATOR_SUPPLY("v-ape", NULL),
117}; 174};
118 175
119static struct regulator_consumer_supply ab8500_vana_consumers[] = { 176static struct regulator_consumer_supply ab8500_vana_consumers[] = {
120 /* External displays, connector on board, 1v8 power supply */ 177 /* DB8500 DSI */
121 REGULATOR_SUPPLY("vsmps2", "mcde.0"), 178 REGULATOR_SUPPLY("vdddsi1v2", "mcde"),
179 REGULATOR_SUPPLY("vdddsi1v2", "b2r2_core"),
180 REGULATOR_SUPPLY("vdddsi1v2", "b2r2_1_core"),
181 REGULATOR_SUPPLY("vdddsi1v2", "dsilink.0"),
182 REGULATOR_SUPPLY("vdddsi1v2", "dsilink.1"),
183 REGULATOR_SUPPLY("vdddsi1v2", "dsilink.2"),
184 /* DB8500 CSI */
185 REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"),
122}; 186};
123 187
124/* ab8500 regulator register initialization */ 188/* ab8500 regulator register initialization */
125struct ab8500_regulator_reg_init 189static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
126ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
127 /* 190 /*
128 * VanaRequestCtrl = HP/LP depending on VxRequest 191 * VanaRequestCtrl = HP/LP depending on VxRequest
129 * VextSupply1RequestCtrl = HP/LP depending on VxRequest 192 * VextSupply1RequestCtrl = HP/LP depending on VxRequest
130 */ 193 */
131 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0x00), 194 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00),
132 /* 195 /*
133 * VextSupply2RequestCtrl = HP/LP depending on VxRequest 196 * VextSupply2RequestCtrl = HP/LP depending on VxRequest
134 * VextSupply3RequestCtrl = HP/LP depending on VxRequest 197 * VextSupply3RequestCtrl = HP/LP depending on VxRequest
135 * Vaux1RequestCtrl = HP/LP depending on VxRequest 198 * Vaux1RequestCtrl = HP/LP depending on VxRequest
136 * Vaux2RequestCtrl = HP/LP depending on VxRequest 199 * Vaux2RequestCtrl = HP/LP depending on VxRequest
137 */ 200 */
138 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0x00), 201 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00),
139 /* 202 /*
140 * Vaux3RequestCtrl = HP/LP depending on VxRequest 203 * Vaux3RequestCtrl = HP/LP depending on VxRequest
141 * SwHPReq = Control through SWValid disabled 204 * SwHPReq = Control through SWValid disabled
142 */ 205 */
143 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x00), 206 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00),
144 /* 207 /*
145 * VanaSysClkReq1HPValid = disabled 208 * VanaSysClkReq1HPValid = disabled
146 * Vaux1SysClkReq1HPValid = disabled 209 * Vaux1SysClkReq1HPValid = disabled
147 * Vaux2SysClkReq1HPValid = disabled 210 * Vaux2SysClkReq1HPValid = disabled
148 * Vaux3SysClkReq1HPValid = disabled 211 * Vaux3SysClkReq1HPValid = disabled
149 */ 212 */
150 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0x00), 213 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
151 /* 214 /*
152 * VextSupply1SysClkReq1HPValid = disabled 215 * VextSupply1SysClkReq1HPValid = disabled
153 * VextSupply2SysClkReq1HPValid = disabled 216 * VextSupply2SysClkReq1HPValid = disabled
154 * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled 217 * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
155 */ 218 */
156 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x40), 219 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40),
157 /* 220 /*
158 * VanaHwHPReq1Valid = disabled 221 * VanaHwHPReq1Valid = disabled
159 * Vaux1HwHPreq1Valid = disabled 222 * Vaux1HwHPreq1Valid = disabled
160 * Vaux2HwHPReq1Valid = disabled 223 * Vaux2HwHPReq1Valid = disabled
161 * Vaux3HwHPReqValid = disabled 224 * Vaux3HwHPReqValid = disabled
162 */ 225 */
163 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0x00), 226 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00),
164 /* 227 /*
165 * VextSupply1HwHPReq1Valid = disabled 228 * VextSupply1HwHPReq1Valid = disabled
166 * VextSupply2HwHPReq1Valid = disabled 229 * VextSupply2HwHPReq1Valid = disabled
167 * VextSupply3HwHPReq1Valid = disabled 230 * VextSupply3HwHPReq1Valid = disabled
168 */ 231 */
169 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x00), 232 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00),
170 /* 233 /*
171 * VanaHwHPReq2Valid = disabled 234 * VanaHwHPReq2Valid = disabled
172 * Vaux1HwHPReq2Valid = disabled 235 * Vaux1HwHPReq2Valid = disabled
173 * Vaux2HwHPReq2Valid = disabled 236 * Vaux2HwHPReq2Valid = disabled
174 * Vaux3HwHPReq2Valid = disabled 237 * Vaux3HwHPReq2Valid = disabled
175 */ 238 */
176 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0x00), 239 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00),
177 /* 240 /*
178 * VextSupply1HwHPReq2Valid = disabled 241 * VextSupply1HwHPReq2Valid = disabled
179 * VextSupply2HwHPReq2Valid = disabled 242 * VextSupply2HwHPReq2Valid = disabled
180 * VextSupply3HwHPReq2Valid = HWReq2 controlled 243 * VextSupply3HwHPReq2Valid = HWReq2 controlled
181 */ 244 */
182 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x04), 245 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04),
183 /* 246 /*
184 * VanaSwHPReqValid = disabled 247 * VanaSwHPReqValid = disabled
185 * Vaux1SwHPReqValid = disabled 248 * Vaux1SwHPReqValid = disabled
186 */ 249 */
187 INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0x00), 250 INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00),
188 /* 251 /*
189 * Vaux2SwHPReqValid = disabled 252 * Vaux2SwHPReqValid = disabled
190 * Vaux3SwHPReqValid = disabled 253 * Vaux3SwHPReqValid = disabled
@@ -192,7 +255,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
192 * VextSupply2SwHPReqValid = disabled 255 * VextSupply2SwHPReqValid = disabled
193 * VextSupply3SwHPReqValid = disabled 256 * VextSupply3SwHPReqValid = disabled
194 */ 257 */
195 INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x00), 258 INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00),
196 /* 259 /*
197 * SysClkReq2Valid1 = SysClkReq2 controlled 260 * SysClkReq2Valid1 = SysClkReq2 controlled
198 * SysClkReq3Valid1 = disabled 261 * SysClkReq3Valid1 = disabled
@@ -202,7 +265,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
202 * SysClkReq7Valid1 = disabled 265 * SysClkReq7Valid1 = disabled
203 * SysClkReq8Valid1 = disabled 266 * SysClkReq8Valid1 = disabled
204 */ 267 */
205 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0x2a), 268 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a),
206 /* 269 /*
207 * SysClkReq2Valid2 = disabled 270 * SysClkReq2Valid2 = disabled
208 * SysClkReq3Valid2 = disabled 271 * SysClkReq3Valid2 = disabled
@@ -212,7 +275,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
212 * SysClkReq7Valid2 = disabled 275 * SysClkReq7Valid2 = disabled
213 * SysClkReq8Valid2 = disabled 276 * SysClkReq8Valid2 = disabled
214 */ 277 */
215 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0x20), 278 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20),
216 /* 279 /*
217 * VTVoutEna = disabled 280 * VTVoutEna = disabled
218 * Vintcore12Ena = disabled 281 * Vintcore12Ena = disabled
@@ -220,66 +283,62 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
220 * Vintcore12LP = inactive (HP) 283 * Vintcore12LP = inactive (HP)
221 * VTVoutLP = inactive (HP) 284 * VTVoutLP = inactive (HP)
222 */ 285 */
223 INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0x10), 286 INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10),
224 /* 287 /*
225 * VaudioEna = disabled 288 * VaudioEna = disabled
226 * VdmicEna = disabled 289 * VdmicEna = disabled
227 * Vamic1Ena = disabled 290 * Vamic1Ena = disabled
228 * Vamic2Ena = disabled 291 * Vamic2Ena = disabled
229 */ 292 */
230 INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x00), 293 INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00),
231 /* 294 /*
232 * Vamic1_dzout = high-Z when Vamic1 is disabled 295 * Vamic1_dzout = high-Z when Vamic1 is disabled
233 * Vamic2_dzout = high-Z when Vamic2 is disabled 296 * Vamic2_dzout = high-Z when Vamic2 is disabled
234 */ 297 */
235 INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x00), 298 INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00),
236 /* 299 /*
237 * VPll = Hw controlled 300 * VPll = Hw controlled (NOTE! PRCMU bits)
238 * VanaRegu = force off 301 * VanaRegu = force off
239 */ 302 */
240 INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x02), 303 INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02),
241 /* 304 /*
242 * VrefDDREna = disabled 305 * VrefDDREna = disabled
243 * VrefDDRSleepMode = inactive (no pulldown) 306 * VrefDDRSleepMode = inactive (no pulldown)
244 */ 307 */
245 INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x00), 308 INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00),
246 /* 309 /*
247 * VextSupply1Regu = HW control 310 * VextSupply1Regu = force LP
248 * VextSupply2Regu = HW control 311 * VextSupply2Regu = force OFF
249 * VextSupply3Regu = HW control 312 * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP)
250 * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0 313 * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
251 * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0 314 * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
252 */ 315 */
253 INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0x2a), 316 INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13),
254 /* 317 /*
255 * Vaux1Regu = force HP 318 * Vaux1Regu = force HP
256 * Vaux2Regu = force off 319 * Vaux2Regu = force off
257 */ 320 */
258 INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x01), 321 INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01),
259 /* 322 /*
260 * Vaux3regu = force off 323 * Vaux3Regu = force off
261 */ 324 */
262 INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x00), 325 INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00),
263 /* 326 /*
264 * Vsmps1 = 1.15V 327 * Vaux1Sel = 2.8 V
265 */ 328 */
266 INIT_REGULATOR_REGISTER(AB8500_VSMPS1SEL1, 0x24), 329 INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C),
267 /*
268 * Vaux1Sel = 2.5 V
269 */
270 INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x08),
271 /* 330 /*
272 * Vaux2Sel = 2.9 V 331 * Vaux2Sel = 2.9 V
273 */ 332 */
274 INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0d), 333 INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d),
275 /* 334 /*
276 * Vaux3Sel = 2.91 V 335 * Vaux3Sel = 2.91 V
277 */ 336 */
278 INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07), 337 INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07),
279 /* 338 /*
280 * VextSupply12LP = disabled (no LP) 339 * VextSupply12LP = disabled (no LP)
281 */ 340 */
282 INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x00), 341 INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00),
283 /* 342 /*
284 * Vaux1Disch = short discharge time 343 * Vaux1Disch = short discharge time
285 * Vaux2Disch = short discharge time 344 * Vaux2Disch = short discharge time
@@ -288,33 +347,26 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
288 * VTVoutDisch = short discharge time 347 * VTVoutDisch = short discharge time
289 * VaudioDisch = short discharge time 348 * VaudioDisch = short discharge time
290 */ 349 */
291 INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0x00), 350 INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00),
292 /* 351 /*
293 * VanaDisch = short discharge time 352 * VanaDisch = short discharge time
294 * VdmicPullDownEna = pulldown disabled when Vdmic is disabled 353 * VdmicPullDownEna = pulldown disabled when Vdmic is disabled
295 * VdmicDisch = short discharge time 354 * VdmicDisch = short discharge time
296 */ 355 */
297 INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x00), 356 INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00),
298}; 357};
299 358
300/* AB8500 regulators */ 359/* AB8500 regulators */
301struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { 360static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
302 /* supplies to the display/camera */ 361 /* supplies to the display/camera */
303 [AB8500_LDO_AUX1] = { 362 [AB8500_LDO_AUX1] = {
304 .constraints = { 363 .constraints = {
305 .name = "V-DISPLAY", 364 .name = "V-DISPLAY",
306 .min_uV = 2500000, 365 .min_uV = 2800000,
307 .max_uV = 2900000, 366 .max_uV = 3300000,
308 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | 367 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
309 REGULATOR_CHANGE_STATUS, 368 REGULATOR_CHANGE_STATUS,
310 .boot_on = 1, /* display is on at boot */ 369 .boot_on = 1, /* display is on at boot */
311 /*
312 * This voltage cannot be disabled right now because
313 * it is somehow affecting the external MMC
314 * functionality, though that typically will use
315 * AUX3.
316 */
317 .always_on = 1,
318 }, 370 },
319 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers), 371 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
320 .consumer_supplies = ab8500_vaux1_consumers, 372 .consumer_supplies = ab8500_vaux1_consumers,
@@ -326,7 +378,10 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
326 .min_uV = 1100000, 378 .min_uV = 1100000,
327 .max_uV = 3300000, 379 .max_uV = 3300000,
328 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | 380 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
329 REGULATOR_CHANGE_STATUS, 381 REGULATOR_CHANGE_STATUS |
382 REGULATOR_CHANGE_MODE,
383 .valid_modes_mask = REGULATOR_MODE_NORMAL |
384 REGULATOR_MODE_IDLE,
330 }, 385 },
331 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers), 386 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
332 .consumer_supplies = ab8500_vaux2_consumers, 387 .consumer_supplies = ab8500_vaux2_consumers,
@@ -338,7 +393,10 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
338 .min_uV = 1100000, 393 .min_uV = 1100000,
339 .max_uV = 3300000, 394 .max_uV = 3300000,
340 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | 395 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
341 REGULATOR_CHANGE_STATUS, 396 REGULATOR_CHANGE_STATUS |
397 REGULATOR_CHANGE_MODE,
398 .valid_modes_mask = REGULATOR_MODE_NORMAL |
399 REGULATOR_MODE_IDLE,
342 }, 400 },
343 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers), 401 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
344 .consumer_supplies = ab8500_vaux3_consumers, 402 .consumer_supplies = ab8500_vaux3_consumers,
@@ -392,18 +450,614 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
392 [AB8500_LDO_INTCORE] = { 450 [AB8500_LDO_INTCORE] = {
393 .constraints = { 451 .constraints = {
394 .name = "V-INTCORE", 452 .name = "V-INTCORE",
395 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 453 .min_uV = 1250000,
454 .max_uV = 1350000,
455 .input_uV = 1800000,
456 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
457 REGULATOR_CHANGE_STATUS |
458 REGULATOR_CHANGE_MODE |
459 REGULATOR_CHANGE_DRMS,
460 .valid_modes_mask = REGULATOR_MODE_NORMAL |
461 REGULATOR_MODE_IDLE,
396 }, 462 },
397 .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers), 463 .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
398 .consumer_supplies = ab8500_vintcore_consumers, 464 .consumer_supplies = ab8500_vintcore_consumers,
399 }, 465 },
400 /* supply for U8500 CSI/DSI, VANA LDO */ 466 /* supply for U8500 CSI-DSI, VANA LDO */
401 [AB8500_LDO_ANA] = { 467 [AB8500_LDO_ANA] = {
402 .constraints = { 468 .constraints = {
403 .name = "V-CSI/DSI", 469 .name = "V-CSI-DSI",
404 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 470 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
405 }, 471 },
406 .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers), 472 .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
407 .consumer_supplies = ab8500_vana_consumers, 473 .consumer_supplies = ab8500_vana_consumers,
408 }, 474 },
409}; 475};
476
477/* supply for VextSupply3 */
478static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = {
479 /* SIM supply for 3 V SIM cards */
480 REGULATOR_SUPPLY("vinvsim", "sim-detect.0"),
481};
482
483/* extended configuration for VextSupply2, only used for HREFP_V20 boards */
484static struct ab8500_ext_regulator_cfg ab8500_ext_supply2 = {
485 .hwreq = true,
486};
487
488/*
489 * AB8500 external regulators
490 */
491static struct regulator_init_data ab8500_ext_regulators[] = {
492 /* fixed Vbat supplies VSMPS1_EXT_1V8 */
493 [AB8500_EXT_SUPPLY1] = {
494 .constraints = {
495 .name = "ab8500-ext-supply1",
496 .min_uV = 1800000,
497 .max_uV = 1800000,
498 .initial_mode = REGULATOR_MODE_IDLE,
499 .boot_on = 1,
500 .always_on = 1,
501 },
502 },
503 /* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */
504 [AB8500_EXT_SUPPLY2] = {
505 .constraints = {
506 .name = "ab8500-ext-supply2",
507 .min_uV = 1360000,
508 .max_uV = 1360000,
509 },
510 },
511 /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */
512 [AB8500_EXT_SUPPLY3] = {
513 .constraints = {
514 .name = "ab8500-ext-supply3",
515 .min_uV = 3400000,
516 .max_uV = 3400000,
517 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
518 .boot_on = 1,
519 },
520 .num_consumer_supplies =
521 ARRAY_SIZE(ab8500_ext_supply3_consumers),
522 .consumer_supplies = ab8500_ext_supply3_consumers,
523 },
524};
525
526/* ab8505 regulator register initialization */
527static struct ab8500_regulator_reg_init ab8505_reg_init[] = {
528 /*
529 * VarmRequestCtrl
530 * VsmpsCRequestCtrl
531 * VsmpsARequestCtrl
532 * VsmpsBRequestCtrl
533 */
534 INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL1, 0x00, 0x00),
535 /*
536 * VsafeRequestCtrl
537 * VpllRequestCtrl
538 * VanaRequestCtrl = HP/LP depending on VxRequest
539 */
540 INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL2, 0x30, 0x00),
541 /*
542 * Vaux1RequestCtrl = HP/LP depending on VxRequest
543 * Vaux2RequestCtrl = HP/LP depending on VxRequest
544 */
545 INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL3, 0xf0, 0x00),
546 /*
547 * Vaux3RequestCtrl = HP/LP depending on VxRequest
548 * SwHPReq = Control through SWValid disabled
549 */
550 INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL4, 0x07, 0x00),
551 /*
552 * VsmpsASysClkReq1HPValid
553 * VsmpsBSysClkReq1HPValid
554 * VsafeSysClkReq1HPValid
555 * VanaSysClkReq1HPValid = disabled
556 * VpllSysClkReq1HPValid
557 * Vaux1SysClkReq1HPValid = disabled
558 * Vaux2SysClkReq1HPValid = disabled
559 * Vaux3SysClkReq1HPValid = disabled
560 */
561 INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
562 /*
563 * VsmpsCSysClkReq1HPValid
564 * VarmSysClkReq1HPValid
565 * VbbSysClkReq1HPValid
566 * VsmpsMSysClkReq1HPValid
567 */
568 INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID2, 0x00, 0x00),
569 /*
570 * VsmpsAHwHPReq1Valid
571 * VsmpsBHwHPReq1Valid
572 * VsafeHwHPReq1Valid
573 * VanaHwHPReq1Valid = disabled
574 * VpllHwHPReq1Valid
575 * Vaux1HwHPreq1Valid = disabled
576 * Vaux2HwHPReq1Valid = disabled
577 * Vaux3HwHPReqValid = disabled
578 */
579 INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID1, 0xe8, 0x00),
580 /*
581 * VsmpsMHwHPReq1Valid
582 */
583 INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID2, 0x00, 0x00),
584 /*
585 * VsmpsAHwHPReq2Valid
586 * VsmpsBHwHPReq2Valid
587 * VsafeHwHPReq2Valid
588 * VanaHwHPReq2Valid = disabled
589 * VpllHwHPReq2Valid
590 * Vaux1HwHPReq2Valid = disabled
591 * Vaux2HwHPReq2Valid = disabled
592 * Vaux3HwHPReq2Valid = disabled
593 */
594 INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID1, 0xe8, 0x00),
595 /*
596 * VsmpsMHwHPReq2Valid
597 */
598 INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID2, 0x00, 0x00),
599 /**
600 * VsmpsCSwHPReqValid
601 * VarmSwHPReqValid
602 * VsmpsASwHPReqValid
603 * VsmpsBSwHPReqValid
604 * VsafeSwHPReqValid
605 * VanaSwHPReqValid
606 * VanaSwHPReqValid = disabled
607 * VpllSwHPReqValid
608 * Vaux1SwHPReqValid = disabled
609 */
610 INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID1, 0xa0, 0x00),
611 /*
612 * Vaux2SwHPReqValid = disabled
613 * Vaux3SwHPReqValid = disabled
614 * VsmpsMSwHPReqValid
615 */
616 INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID2, 0x03, 0x00),
617 /*
618 * SysClkReq2Valid1 = SysClkReq2 controlled
619 * SysClkReq3Valid1 = disabled
620 * SysClkReq4Valid1 = SysClkReq4 controlled
621 */
622 INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID1, 0x0e, 0x0a),
623 /*
624 * SysClkReq2Valid2 = disabled
625 * SysClkReq3Valid2 = disabled
626 * SysClkReq4Valid2 = disabled
627 */
628 INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID2, 0x0e, 0x00),
629 /*
630 * Vaux4SwHPReqValid
631 * Vaux4HwHPReq2Valid
632 * Vaux4HwHPReq1Valid
633 * Vaux4SysClkReq1HPValid
634 */
635 INIT_REGULATOR_REGISTER(AB8505_REGUVAUX4REQVALID, 0x00, 0x00),
636 /*
637 * VadcEna = disabled
638 * VintCore12Ena = disabled
639 * VintCore12Sel = 1.25 V
640 * VintCore12LP = inactive (HP)
641 * VadcLP = inactive (HP)
642 */
643 INIT_REGULATOR_REGISTER(AB8505_REGUMISC1, 0xfe, 0x10),
644 /*
645 * VaudioEna = disabled
646 * Vaux8Ena = disabled
647 * Vamic1Ena = disabled
648 * Vamic2Ena = disabled
649 */
650 INIT_REGULATOR_REGISTER(AB8505_VAUDIOSUPPLY, 0x1e, 0x00),
651 /*
652 * Vamic1_dzout = high-Z when Vamic1 is disabled
653 * Vamic2_dzout = high-Z when Vamic2 is disabled
654 */
655 INIT_REGULATOR_REGISTER(AB8505_REGUCTRL1VAMIC, 0x03, 0x00),
656 /*
657 * VsmpsARegu
658 * VsmpsASelCtrl
659 * VsmpsAAutoMode
660 * VsmpsAPWMMode
661 */
662 INIT_REGULATOR_REGISTER(AB8505_VSMPSAREGU, 0x00, 0x00),
663 /*
664 * VsmpsBRegu
665 * VsmpsBSelCtrl
666 * VsmpsBAutoMode
667 * VsmpsBPWMMode
668 */
669 INIT_REGULATOR_REGISTER(AB8505_VSMPSBREGU, 0x00, 0x00),
670 /*
671 * VsafeRegu
672 * VsafeSelCtrl
673 * VsafeAutoMode
674 * VsafePWMMode
675 */
676 INIT_REGULATOR_REGISTER(AB8505_VSAFEREGU, 0x00, 0x00),
677 /*
678 * VPll = Hw controlled (NOTE! PRCMU bits)
679 * VanaRegu = force off
680 */
681 INIT_REGULATOR_REGISTER(AB8505_VPLLVANAREGU, 0x0f, 0x02),
682 /*
683 * VextSupply1Regu = force OFF (OTP_ExtSupply12LPnPolarity 1)
684 * VextSupply2Regu = force OFF (OTP_ExtSupply12LPnPolarity 1)
685 * VextSupply3Regu = force OFF (OTP_ExtSupply3LPnPolarity 0)
686 * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
687 * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
688 */
689 INIT_REGULATOR_REGISTER(AB8505_EXTSUPPLYREGU, 0xff, 0x30),
690 /*
691 * Vaux1Regu = force HP
692 * Vaux2Regu = force off
693 */
694 INIT_REGULATOR_REGISTER(AB8505_VAUX12REGU, 0x0f, 0x01),
695 /*
696 * Vaux3Regu = force off
697 */
698 INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3REGU, 0x03, 0x00),
699 /*
700 * VsmpsASel1
701 */
702 INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL1, 0x00, 0x00),
703 /*
704 * VsmpsASel2
705 */
706 INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL2, 0x00, 0x00),
707 /*
708 * VsmpsASel3
709 */
710 INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL3, 0x00, 0x00),
711 /*
712 * VsmpsBSel1
713 */
714 INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL1, 0x00, 0x00),
715 /*
716 * VsmpsBSel2
717 */
718 INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL2, 0x00, 0x00),
719 /*
720 * VsmpsBSel3
721 */
722 INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL3, 0x00, 0x00),
723 /*
724 * VsafeSel1
725 */
726 INIT_REGULATOR_REGISTER(AB8505_VSAFESEL1, 0x00, 0x00),
727 /*
728 * VsafeSel2
729 */
730 INIT_REGULATOR_REGISTER(AB8505_VSAFESEL2, 0x00, 0x00),
731 /*
732 * VsafeSel3
733 */
734 INIT_REGULATOR_REGISTER(AB8505_VSAFESEL3, 0x00, 0x00),
735 /*
736 * Vaux1Sel = 2.8 V
737 */
738 INIT_REGULATOR_REGISTER(AB8505_VAUX1SEL, 0x0f, 0x0C),
739 /*
740 * Vaux2Sel = 2.9 V
741 */
742 INIT_REGULATOR_REGISTER(AB8505_VAUX2SEL, 0x0f, 0x0d),
743 /*
744 * Vaux3Sel = 2.91 V
745 */
746 INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3SEL, 0x07, 0x07),
747 /*
748 * Vaux4RequestCtrl
749 */
750 INIT_REGULATOR_REGISTER(AB8505_VAUX4REQCTRL, 0x00, 0x00),
751 /*
752 * Vaux4Regu
753 */
754 INIT_REGULATOR_REGISTER(AB8505_VAUX4REGU, 0x00, 0x00),
755 /*
756 * Vaux4Sel
757 */
758 INIT_REGULATOR_REGISTER(AB8505_VAUX4SEL, 0x00, 0x00),
759 /*
760 * Vaux1Disch = short discharge time
761 * Vaux2Disch = short discharge time
762 * Vaux3Disch = short discharge time
763 * Vintcore12Disch = short discharge time
764 * VTVoutDisch = short discharge time
765 * VaudioDisch = short discharge time
766 */
767 INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH, 0xfc, 0x00),
768 /*
769 * VanaDisch = short discharge time
770 * Vaux8PullDownEna = pulldown disabled when Vaux8 is disabled
771 * Vaux8Disch = short discharge time
772 */
773 INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH2, 0x16, 0x00),
774 /*
775 * Vaux4Disch = short discharge time
776 */
777 INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH3, 0x01, 0x00),
778 /*
779 * Vaux5Sel
780 * Vaux5LP
781 * Vaux5Ena
782 * Vaux5Disch
783 * Vaux5DisSfst
784 * Vaux5DisPulld
785 */
786 INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX5, 0x00, 0x00),
787 /*
788 * Vaux6Sel
789 * Vaux6LP
790 * Vaux6Ena
791 * Vaux6DisPulld
792 */
793 INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX6, 0x00, 0x00),
794};
795
796struct regulator_init_data ab8505_regulators[AB8505_NUM_REGULATORS] = {
797 /* supplies to the display/camera */
798 [AB8505_LDO_AUX1] = {
799 .constraints = {
800 .name = "V-DISPLAY",
801 .min_uV = 2800000,
802 .max_uV = 3300000,
803 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
804 REGULATOR_CHANGE_STATUS,
805 .boot_on = 1, /* display is on at boot */
806 },
807 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
808 .consumer_supplies = ab8500_vaux1_consumers,
809 },
810 /* supplies to the on-board eMMC */
811 [AB8505_LDO_AUX2] = {
812 .constraints = {
813 .name = "V-eMMC1",
814 .min_uV = 1100000,
815 .max_uV = 3300000,
816 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
817 REGULATOR_CHANGE_STATUS |
818 REGULATOR_CHANGE_MODE,
819 .valid_modes_mask = REGULATOR_MODE_NORMAL |
820 REGULATOR_MODE_IDLE,
821 },
822 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
823 .consumer_supplies = ab8500_vaux2_consumers,
824 },
825 /* supply for VAUX3, supplies to SDcard slots */
826 [AB8505_LDO_AUX3] = {
827 .constraints = {
828 .name = "V-MMC-SD",
829 .min_uV = 1100000,
830 .max_uV = 3300000,
831 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
832 REGULATOR_CHANGE_STATUS |
833 REGULATOR_CHANGE_MODE,
834 .valid_modes_mask = REGULATOR_MODE_NORMAL |
835 REGULATOR_MODE_IDLE,
836 },
837 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
838 .consumer_supplies = ab8500_vaux3_consumers,
839 },
840 /* supply for VAUX4, supplies to NFC and standalone secure element */
841 [AB8505_LDO_AUX4] = {
842 .constraints = {
843 .name = "V-NFC-SE",
844 .min_uV = 1100000,
845 .max_uV = 3300000,
846 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
847 REGULATOR_CHANGE_STATUS |
848 REGULATOR_CHANGE_MODE,
849 .valid_modes_mask = REGULATOR_MODE_NORMAL |
850 REGULATOR_MODE_IDLE,
851 },
852 .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux4_consumers),
853 .consumer_supplies = ab8505_vaux4_consumers,
854 },
855 /* supply for VAUX5, supplies to TBD */
856 [AB8505_LDO_AUX5] = {
857 .constraints = {
858 .name = "V-AUX5",
859 .min_uV = 1050000,
860 .max_uV = 2790000,
861 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
862 REGULATOR_CHANGE_STATUS |
863 REGULATOR_CHANGE_MODE,
864 .valid_modes_mask = REGULATOR_MODE_NORMAL |
865 REGULATOR_MODE_IDLE,
866 },
867 .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux5_consumers),
868 .consumer_supplies = ab8505_vaux5_consumers,
869 },
870 /* supply for VAUX6, supplies to TBD */
871 [AB8505_LDO_AUX6] = {
872 .constraints = {
873 .name = "V-AUX6",
874 .min_uV = 1050000,
875 .max_uV = 2790000,
876 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
877 REGULATOR_CHANGE_STATUS |
878 REGULATOR_CHANGE_MODE,
879 .valid_modes_mask = REGULATOR_MODE_NORMAL |
880 REGULATOR_MODE_IDLE,
881 },
882 .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux6_consumers),
883 .consumer_supplies = ab8505_vaux6_consumers,
884 },
885 /* supply for gpadc, ADC LDO */
886 [AB8505_LDO_ADC] = {
887 .constraints = {
888 .name = "V-ADC",
889 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
890 },
891 .num_consumer_supplies = ARRAY_SIZE(ab8505_vadc_consumers),
892 .consumer_supplies = ab8505_vadc_consumers,
893 },
894 /* supply for ab8500-vaudio, VAUDIO LDO */
895 [AB8505_LDO_AUDIO] = {
896 .constraints = {
897 .name = "V-AUD",
898 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
899 },
900 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers),
901 .consumer_supplies = ab8500_vaud_consumers,
902 },
903 /* supply for v-anamic1 VAMic1-LDO */
904 [AB8505_LDO_ANAMIC1] = {
905 .constraints = {
906 .name = "V-AMIC1",
907 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
908 REGULATOR_CHANGE_MODE,
909 .valid_modes_mask = REGULATOR_MODE_NORMAL |
910 REGULATOR_MODE_IDLE,
911 },
912 .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
913 .consumer_supplies = ab8500_vamic1_consumers,
914 },
915 /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
916 [AB8505_LDO_ANAMIC2] = {
917 .constraints = {
918 .name = "V-AMIC2",
919 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
920 REGULATOR_CHANGE_MODE,
921 .valid_modes_mask = REGULATOR_MODE_NORMAL |
922 REGULATOR_MODE_IDLE,
923 },
924 .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
925 .consumer_supplies = ab8500_vamic2_consumers,
926 },
927 /* supply for v-aux8, VAUX8 LDO */
928 [AB8505_LDO_AUX8] = {
929 .constraints = {
930 .name = "V-AUX8",
931 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
932 },
933 .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux8_consumers),
934 .consumer_supplies = ab8505_vaux8_consumers,
935 },
936 /* supply for v-intcore12, VINTCORE12 LDO */
937 [AB8505_LDO_INTCORE] = {
938 .constraints = {
939 .name = "V-INTCORE",
940 .min_uV = 1250000,
941 .max_uV = 1350000,
942 .input_uV = 1800000,
943 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
944 REGULATOR_CHANGE_STATUS |
945 REGULATOR_CHANGE_MODE |
946 REGULATOR_CHANGE_DRMS,
947 .valid_modes_mask = REGULATOR_MODE_NORMAL |
948 REGULATOR_MODE_IDLE,
949 },
950 .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
951 .consumer_supplies = ab8500_vintcore_consumers,
952 },
953 /* supply for LDO USB */
954 [AB8505_LDO_USB] = {
955 .constraints = {
956 .name = "V-USB",
957 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
958 REGULATOR_CHANGE_MODE,
959 .valid_modes_mask = REGULATOR_MODE_NORMAL |
960 REGULATOR_MODE_IDLE,
961 },
962 .num_consumer_supplies = ARRAY_SIZE(ab8505_usb_consumers),
963 .consumer_supplies = ab8505_usb_consumers,
964 },
965 /* supply for U8500 CSI-DSI, VANA LDO */
966 [AB8505_LDO_ANA] = {
967 .constraints = {
968 .name = "V-CSI-DSI",
969 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
970 },
971 .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
972 .consumer_supplies = ab8500_vana_consumers,
973 },
974};
975
976struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
977 .reg_init = ab8500_reg_init,
978 .num_reg_init = ARRAY_SIZE(ab8500_reg_init),
979 .regulator = ab8500_regulators,
980 .num_regulator = ARRAY_SIZE(ab8500_regulators),
981 .ext_regulator = ab8500_ext_regulators,
982 .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators),
983};
984
985/* Use the AB8500 init settings for AB8505 as they are the same right now */
986struct ab8500_regulator_platform_data ab8505_regulator_plat_data = {
987 .reg_init = ab8505_reg_init,
988 .num_reg_init = ARRAY_SIZE(ab8505_reg_init),
989 .regulator = ab8505_regulators,
990 .num_regulator = ARRAY_SIZE(ab8505_regulators),
991};
992
993static void ab8500_modify_reg_init(int id, u8 mask, u8 value)
994{
995 int i;
996
997 if (cpu_is_u8520()) {
998 for (i = ARRAY_SIZE(ab8505_reg_init) - 1; i >= 0; i--) {
999 if (ab8505_reg_init[i].id == id) {
1000 u8 initval = ab8505_reg_init[i].value;
1001 initval = (initval & ~mask) | (value & mask);
1002 ab8505_reg_init[i].value = initval;
1003
1004 BUG_ON(mask & ~ab8505_reg_init[i].mask);
1005 return;
1006 }
1007 }
1008 } else {
1009 for (i = ARRAY_SIZE(ab8500_reg_init) - 1; i >= 0; i--) {
1010 if (ab8500_reg_init[i].id == id) {
1011 u8 initval = ab8500_reg_init[i].value;
1012 initval = (initval & ~mask) | (value & mask);
1013 ab8500_reg_init[i].value = initval;
1014
1015 BUG_ON(mask & ~ab8500_reg_init[i].mask);
1016 return;
1017 }
1018 }
1019 }
1020
1021 BUG_ON(1);
1022}
1023
1024void mop500_regulator_init(void)
1025{
1026 struct regulator_init_data *regulator;
1027
1028 /*
1029 * Temporarily turn on Vaux2 on 8520 machine
1030 */
1031 if (cpu_is_u8520()) {
1032 /* Vaux2 initialized to be on */
1033 ab8500_modify_reg_init(AB8505_VAUX12REGU, 0x0f, 0x05);
1034 }
1035
1036 /*
1037 * Handle AB8500_EXT_SUPPLY2 on HREFP_V20_V50 boards (do it for
1038 * all HREFP_V20 boards)
1039 */
1040 if (cpu_is_u8500v20()) {
1041 /* VextSupply2RequestCtrl = HP/OFF depending on VxRequest */
1042 ab8500_modify_reg_init(AB8500_REGUREQUESTCTRL3, 0x01, 0x01);
1043
1044 /* VextSupply2SysClkReq1HPValid = SysClkReq1 controlled */
1045 ab8500_modify_reg_init(AB8500_REGUSYSCLKREQ1HPVALID2,
1046 0x20, 0x20);
1047
1048 /* VextSupply2 = force HP at initialization */
1049 ab8500_modify_reg_init(AB8500_EXTSUPPLYREGU, 0x0c, 0x04);
1050
1051 /* enable VextSupply2 during platform active */
1052 regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
1053 regulator->constraints.always_on = 1;
1054
1055 /* disable VextSupply2 in suspend */
1056 regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
1057 regulator->constraints.state_mem.disabled = 1;
1058 regulator->constraints.state_standby.disabled = 1;
1059
1060 /* enable VextSupply2 HW control (used in suspend) */
1061 regulator->driver_data = (void *)&ab8500_ext_supply2;
1062 }
1063}
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
index 78a0642a2206..9bece38fe933 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.h
+++ b/arch/arm/mach-ux500/board-mop500-regulators.h
@@ -14,10 +14,11 @@
14#include <linux/regulator/machine.h> 14#include <linux/regulator/machine.h>
15#include <linux/regulator/ab8500.h> 15#include <linux/regulator/ab8500.h>
16 16
17extern struct ab8500_regulator_reg_init 17extern struct ab8500_regulator_platform_data ab8500_regulator_plat_data;
18ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS]; 18extern struct ab8500_regulator_platform_data ab8505_regulator_plat_data;
19extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS];
20extern struct regulator_init_data tps61052_regulator; 19extern struct regulator_init_data tps61052_regulator;
21extern struct regulator_init_data gpio_en_3v3_regulator; 20extern struct regulator_init_data gpio_en_3v3_regulator;
22 21
22void mop500_regulator_init(void);
23
23#endif 24#endif
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 87d2d7b38ce9..ce672378a830 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -199,10 +199,7 @@ static struct platform_device snowball_sbnet_dev = {
199 199
200struct ab8500_platform_data ab8500_platdata = { 200struct ab8500_platform_data ab8500_platdata = {
201 .irq_base = MOP500_AB8500_IRQ_BASE, 201 .irq_base = MOP500_AB8500_IRQ_BASE,
202 .regulator_reg_init = ab8500_regulator_reg_init, 202 .regulator = &ab8500_regulator_plat_data,
203 .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init),
204 .regulator = ab8500_regulators,
205 .num_regulator = ARRAY_SIZE(ab8500_regulators),
206 .gpio = &ab8500_gpio_pdata, 203 .gpio = &ab8500_gpio_pdata,
207 .codec = &ab8500_codec_pdata, 204 .codec = &ab8500_codec_pdata,
208}; 205};
diff --git a/arch/arm/mach-ux500/cpuidle.c b/arch/arm/mach-ux500/cpuidle.c
index ce9149302cc3..488e07472d98 100644
--- a/arch/arm/mach-ux500/cpuidle.c
+++ b/arch/arm/mach-ux500/cpuidle.c
@@ -11,7 +11,6 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/cpuidle.h> 13#include <linux/cpuidle.h>
14#include <linux/clockchips.h>
15#include <linux/spinlock.h> 14#include <linux/spinlock.h>
16#include <linux/atomic.h> 15#include <linux/atomic.h>
17#include <linux/smp.h> 16#include <linux/smp.h>
@@ -22,7 +21,6 @@
22 21
23static atomic_t master = ATOMIC_INIT(0); 22static atomic_t master = ATOMIC_INIT(0);
24static DEFINE_SPINLOCK(master_lock); 23static DEFINE_SPINLOCK(master_lock);
25static DEFINE_PER_CPU(struct cpuidle_device, ux500_cpuidle_device);
26 24
27static inline int ux500_enter_idle(struct cpuidle_device *dev, 25static inline int ux500_enter_idle(struct cpuidle_device *dev,
28 struct cpuidle_driver *drv, int index) 26 struct cpuidle_driver *drv, int index)
@@ -30,8 +28,6 @@ static inline int ux500_enter_idle(struct cpuidle_device *dev,
30 int this_cpu = smp_processor_id(); 28 int this_cpu = smp_processor_id();
31 bool recouple = false; 29 bool recouple = false;
32 30
33 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &this_cpu);
34
35 if (atomic_inc_return(&master) == num_online_cpus()) { 31 if (atomic_inc_return(&master) == num_online_cpus()) {
36 32
37 /* With this lock, we prevent the other cpu to exit and enter 33 /* With this lock, we prevent the other cpu to exit and enter
@@ -91,22 +87,20 @@ out:
91 spin_unlock(&master_lock); 87 spin_unlock(&master_lock);
92 } 88 }
93 89
94 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &this_cpu);
95
96 return index; 90 return index;
97} 91}
98 92
99static struct cpuidle_driver ux500_idle_driver = { 93static struct cpuidle_driver ux500_idle_driver = {
100 .name = "ux500_idle", 94 .name = "ux500_idle",
101 .owner = THIS_MODULE, 95 .owner = THIS_MODULE,
102 .en_core_tk_irqen = 1,
103 .states = { 96 .states = {
104 ARM_CPUIDLE_WFI_STATE, 97 ARM_CPUIDLE_WFI_STATE,
105 { 98 {
106 .enter = ux500_enter_idle, 99 .enter = ux500_enter_idle,
107 .exit_latency = 70, 100 .exit_latency = 70,
108 .target_residency = 260, 101 .target_residency = 260,
109 .flags = CPUIDLE_FLAG_TIME_VALID, 102 .flags = CPUIDLE_FLAG_TIME_VALID |
103 CPUIDLE_FLAG_TIMER_STOP,
110 .name = "ApIdle", 104 .name = "ApIdle",
111 .desc = "ARM Retention", 105 .desc = "ARM Retention",
112 }, 106 },
@@ -115,59 +109,13 @@ static struct cpuidle_driver ux500_idle_driver = {
115 .state_count = 2, 109 .state_count = 2,
116}; 110};
117 111
118/*
119 * For each cpu, setup the broadcast timer because we will
120 * need to migrate the timers for the states >= ApIdle.
121 */
122static void ux500_setup_broadcast_timer(void *arg)
123{
124 int cpu = smp_processor_id();
125 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
126}
127
128int __init ux500_idle_init(void) 112int __init ux500_idle_init(void)
129{ 113{
130 int ret, cpu;
131 struct cpuidle_device *device;
132
133 /* Configure wake up reasons */ 114 /* Configure wake up reasons */
134 prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | 115 prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
135 PRCMU_WAKEUP(ABB)); 116 PRCMU_WAKEUP(ABB));
136 117
137 /* 118 return cpuidle_register(&ux500_idle_driver, NULL);
138 * Configure the timer broadcast for each cpu, that must
139 * be done from the cpu context, so we use a smp cross
140 * call with 'on_each_cpu'.
141 */
142 on_each_cpu(ux500_setup_broadcast_timer, NULL, 1);
143
144 ret = cpuidle_register_driver(&ux500_idle_driver);
145 if (ret) {
146 printk(KERN_ERR "failed to register ux500 idle driver\n");
147 return ret;
148 }
149
150 for_each_online_cpu(cpu) {
151 device = &per_cpu(ux500_cpuidle_device, cpu);
152 device->cpu = cpu;
153 ret = cpuidle_register_device(device);
154 if (ret) {
155 printk(KERN_ERR "Failed to register cpuidle "
156 "device for cpu%d\n", cpu);
157 goto out_unregister;
158 }
159 }
160out:
161 return ret;
162
163out_unregister:
164 for_each_online_cpu(cpu) {
165 device = &per_cpu(ux500_cpuidle_device, cpu);
166 cpuidle_unregister_device(device);
167 }
168
169 cpuidle_unregister_driver(&ux500_idle_driver);
170 goto out;
171} 119}
172 120
173device_initcall(ux500_idle_init); 121device_initcall(ux500_idle_init);
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 52d315b792c8..0f1c5e53fb27 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -17,6 +17,9 @@ config ARCH_VEXPRESS
17 select NO_IOPORT 17 select NO_IOPORT
18 select PLAT_VERSATILE 18 select PLAT_VERSATILE
19 select PLAT_VERSATILE_CLCD 19 select PLAT_VERSATILE_CLCD
20 select POWER_RESET
21 select POWER_RESET_VEXPRESS
22 select POWER_SUPPLY
20 select REGULATOR_FIXED_VOLTAGE if REGULATOR 23 select REGULATOR_FIXED_VOLTAGE if REGULATOR
21 select VEXPRESS_CONFIG 24 select VEXPRESS_CONFIG
22 help 25 help
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 80b64971fbdd..42703e8b4d3b 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -4,7 +4,7 @@
4ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ 4ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
5 -I$(srctree)/arch/arm/plat-versatile/include 5 -I$(srctree)/arch/arm/plat-versatile/include
6 6
7obj-y := v2m.o reset.o 7obj-y := v2m.o
8obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o 8obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
9obj-$(CONFIG_SMP) += platsmp.o 9obj-$(CONFIG_SMP) += platsmp.o
10obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 10obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-vexpress/reset.c b/arch/arm/mach-vexpress/reset.c
deleted file mode 100644
index 465923aa3819..000000000000
--- a/arch/arm/mach-vexpress/reset.c
+++ /dev/null
@@ -1,141 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 */
13
14#include <linux/jiffies.h>
15#include <linux/of.h>
16#include <linux/of_device.h>
17#include <linux/platform_device.h>
18#include <linux/stat.h>
19#include <linux/vexpress.h>
20
21static void vexpress_reset_do(struct device *dev, const char *what)
22{
23 int err = -ENOENT;
24 struct vexpress_config_func *func =
25 vexpress_config_func_get_by_dev(dev);
26
27 if (func) {
28 unsigned long timeout;
29
30 err = vexpress_config_write(func, 0, 0);
31
32 timeout = jiffies + HZ;
33 while (time_before(jiffies, timeout))
34 cpu_relax();
35 }
36
37 dev_emerg(dev, "Unable to %s (%d)\n", what, err);
38}
39
40static struct device *vexpress_power_off_device;
41
42void vexpress_power_off(void)
43{
44 vexpress_reset_do(vexpress_power_off_device, "power off");
45}
46
47static struct device *vexpress_restart_device;
48
49void vexpress_restart(char str, const char *cmd)
50{
51 vexpress_reset_do(vexpress_restart_device, "restart");
52}
53
54static ssize_t vexpress_reset_active_show(struct device *dev,
55 struct device_attribute *attr, char *buf)
56{
57 return sprintf(buf, "%d\n", vexpress_restart_device == dev);
58}
59
60static ssize_t vexpress_reset_active_store(struct device *dev,
61 struct device_attribute *attr, const char *buf, size_t count)
62{
63 long value;
64 int err = kstrtol(buf, 0, &value);
65
66 if (!err && value)
67 vexpress_restart_device = dev;
68
69 return err ? err : count;
70}
71
72DEVICE_ATTR(active, S_IRUGO | S_IWUSR, vexpress_reset_active_show,
73 vexpress_reset_active_store);
74
75
76enum vexpress_reset_func { FUNC_RESET, FUNC_SHUTDOWN, FUNC_REBOOT };
77
78static struct of_device_id vexpress_reset_of_match[] = {
79 {
80 .compatible = "arm,vexpress-reset",
81 .data = (void *)FUNC_RESET,
82 }, {
83 .compatible = "arm,vexpress-shutdown",
84 .data = (void *)FUNC_SHUTDOWN
85 }, {
86 .compatible = "arm,vexpress-reboot",
87 .data = (void *)FUNC_REBOOT
88 },
89 {}
90};
91
92static int vexpress_reset_probe(struct platform_device *pdev)
93{
94 enum vexpress_reset_func func;
95 const struct of_device_id *match =
96 of_match_device(vexpress_reset_of_match, &pdev->dev);
97
98 if (match)
99 func = (enum vexpress_reset_func)match->data;
100 else
101 func = pdev->id_entry->driver_data;
102
103 switch (func) {
104 case FUNC_SHUTDOWN:
105 vexpress_power_off_device = &pdev->dev;
106 break;
107 case FUNC_RESET:
108 if (!vexpress_restart_device)
109 vexpress_restart_device = &pdev->dev;
110 device_create_file(&pdev->dev, &dev_attr_active);
111 break;
112 case FUNC_REBOOT:
113 vexpress_restart_device = &pdev->dev;
114 device_create_file(&pdev->dev, &dev_attr_active);
115 break;
116 };
117
118 return 0;
119}
120
121static const struct platform_device_id vexpress_reset_id_table[] = {
122 { .name = "vexpress-reset", .driver_data = FUNC_RESET, },
123 { .name = "vexpress-shutdown", .driver_data = FUNC_SHUTDOWN, },
124 { .name = "vexpress-reboot", .driver_data = FUNC_REBOOT, },
125 {}
126};
127
128static struct platform_driver vexpress_reset_driver = {
129 .probe = vexpress_reset_probe,
130 .driver = {
131 .name = "vexpress-reset",
132 .of_match_table = vexpress_reset_of_match,
133 },
134 .id_table = vexpress_reset_id_table,
135};
136
137static int __init vexpress_reset_init(void)
138{
139 return platform_driver_register(&vexpress_reset_driver);
140}
141device_initcall(vexpress_reset_init);
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 915683cb67d6..eb2b3a627f03 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -21,6 +21,8 @@
21#include <linux/regulator/fixed.h> 21#include <linux/regulator/fixed.h>
22#include <linux/regulator/machine.h> 22#include <linux/regulator/machine.h>
23#include <linux/vexpress.h> 23#include <linux/vexpress.h>
24#include <linux/clk-provider.h>
25#include <linux/clkdev.h>
24 26
25#include <asm/arch_timer.h> 27#include <asm/arch_timer.h>
26#include <asm/mach-types.h> 28#include <asm/mach-types.h>
@@ -361,8 +363,6 @@ static void __init v2m_init(void)
361 for (i = 0; i < ARRAY_SIZE(v2m_amba_devs); i++) 363 for (i = 0; i < ARRAY_SIZE(v2m_amba_devs); i++)
362 amba_device_register(v2m_amba_devs[i], &iomem_resource); 364 amba_device_register(v2m_amba_devs[i], &iomem_resource);
363 365
364 pm_power_off = vexpress_power_off;
365
366 ct_desc->init_tile(); 366 ct_desc->init_tile();
367} 367}
368 368
@@ -374,7 +374,6 @@ MACHINE_START(VEXPRESS, "ARM-Versatile Express")
374 .init_irq = v2m_init_irq, 374 .init_irq = v2m_init_irq,
375 .init_time = v2m_timer_init, 375 .init_time = v2m_timer_init,
376 .init_machine = v2m_init, 376 .init_machine = v2m_init,
377 .restart = vexpress_restart,
378MACHINE_END 377MACHINE_END
379 378
380static struct map_desc v2m_rs1_io_desc __initdata = { 379static struct map_desc v2m_rs1_io_desc __initdata = {
@@ -433,7 +432,7 @@ static void __init v2m_dt_timer_init(void)
433{ 432{
434 struct device_node *node = NULL; 433 struct device_node *node = NULL;
435 434
436 vexpress_clk_of_init(); 435 of_clk_init(NULL);
437 436
438 do { 437 do {
439 node = of_find_compatible_node(node, NULL, "arm,sp804"); 438 node = of_find_compatible_node(node, NULL, "arm,sp804");
@@ -441,6 +440,10 @@ static void __init v2m_dt_timer_init(void)
441 if (node) { 440 if (node) {
442 pr_info("Using SP804 '%s' as a clock & events source\n", 441 pr_info("Using SP804 '%s' as a clock & events source\n",
443 node->full_name); 442 node->full_name);
443 WARN_ON(clk_register_clkdev(of_clk_get_by_name(node,
444 "timclken1"), "v2m-timer0", "sp804"));
445 WARN_ON(clk_register_clkdev(of_clk_get_by_name(node,
446 "timclken2"), "v2m-timer1", "sp804"));
444 v2m_sp804_init(of_iomap(node, 0), 447 v2m_sp804_init(of_iomap(node, 0),
445 irq_of_parse_and_map(node, 0)); 448 irq_of_parse_and_map(node, 0));
446 } 449 }
@@ -464,7 +467,6 @@ static void __init v2m_dt_init(void)
464{ 467{
465 l2x0_of_init(0x00400000, 0xfe0fffff); 468 l2x0_of_init(0x00400000, 0xfe0fffff);
466 of_platform_populate(NULL, v2m_dt_bus_match, NULL, NULL); 469 of_platform_populate(NULL, v2m_dt_bus_match, NULL, NULL);
467 pm_power_off = vexpress_power_off;
468} 470}
469 471
470static const char * const v2m_dt_match[] __initconst = { 472static const char * const v2m_dt_match[] __initconst = {
@@ -481,5 +483,4 @@ DT_MACHINE_START(VEXPRESS_DT, "ARM-Versatile Express")
481 .init_irq = irqchip_init, 483 .init_irq = irqchip_init,
482 .init_time = v2m_dt_timer_init, 484 .init_time = v2m_dt_timer_init,
483 .init_machine = v2m_dt_init, 485 .init_machine = v2m_dt_init,
484 .restart = vexpress_restart,
485MACHINE_END 486MACHINE_END
diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c
index 7abdb9645c5b..e65a80a1ac75 100644
--- a/arch/arm/mach-w90x900/dev.c
+++ b/arch/arm/mach-w90x900/dev.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/cpu.h>
22 23
23#include <linux/mtd/physmap.h> 24#include <linux/mtd/physmap.h>
24#include <linux/mtd/mtd.h> 25#include <linux/mtd/mtd.h>
@@ -531,7 +532,7 @@ static struct platform_device *nuc900_public_dev[] __initdata = {
531 532
532void __init nuc900_board_init(struct platform_device **device, int size) 533void __init nuc900_board_init(struct platform_device **device, int size)
533{ 534{
534 disable_hlt(); 535 cpu_idle_poll_ctrl(true);
535 platform_add_devices(device, size); 536 platform_add_devices(device, size);
536 platform_add_devices(nuc900_public_dev, ARRAY_SIZE(nuc900_public_dev)); 537 platform_add_devices(nuc900_public_dev, ARRAY_SIZE(nuc900_public_dev));
537 spi_register_board_info(nuc900_spi_board_info, 538 spi_register_board_info(nuc900_spi_board_info,
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ad722f1208a5..9a5cdc01fcdf 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -99,6 +99,9 @@ void show_mem(unsigned int filter)
99 printk("Mem-info:\n"); 99 printk("Mem-info:\n");
100 show_free_areas(filter); 100 show_free_areas(filter);
101 101
102 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
103 return;
104
102 for_each_bank (i, mi) { 105 for_each_bank (i, mi) {
103 struct membank *bank = &mi->bank[i]; 106 struct membank *bank = &mi->bank[i];
104 unsigned int pfn1, pfn2; 107 unsigned int pfn1, pfn2;
@@ -424,24 +427,6 @@ void __init bootmem_init(void)
424 max_pfn = max_high - PHYS_PFN_OFFSET; 427 max_pfn = max_high - PHYS_PFN_OFFSET;
425} 428}
426 429
427static inline int free_area(unsigned long pfn, unsigned long end, char *s)
428{
429 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
430
431 for (; pfn < end; pfn++) {
432 struct page *page = pfn_to_page(pfn);
433 ClearPageReserved(page);
434 init_page_count(page);
435 __free_page(page);
436 pages++;
437 }
438
439 if (size && s)
440 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
441
442 return pages;
443}
444
445/* 430/*
446 * Poison init memory with an undefined instruction (ARM) or a branch to an 431 * Poison init memory with an undefined instruction (ARM) or a branch to an
447 * undefined instruction (Thumb). 432 * undefined instruction (Thumb).
@@ -534,6 +519,14 @@ static void __init free_unused_memmap(struct meminfo *mi)
534#endif 519#endif
535} 520}
536 521
522#ifdef CONFIG_HIGHMEM
523static inline void free_area_high(unsigned long pfn, unsigned long end)
524{
525 for (; pfn < end; pfn++)
526 free_highmem_page(pfn_to_page(pfn));
527}
528#endif
529
537static void __init free_highpages(void) 530static void __init free_highpages(void)
538{ 531{
539#ifdef CONFIG_HIGHMEM 532#ifdef CONFIG_HIGHMEM
@@ -569,8 +562,7 @@ static void __init free_highpages(void)
569 if (res_end > end) 562 if (res_end > end)
570 res_end = end; 563 res_end = end;
571 if (res_start != start) 564 if (res_start != start)
572 totalhigh_pages += free_area(start, res_start, 565 free_area_high(start, res_start);
573 NULL);
574 start = res_end; 566 start = res_end;
575 if (start == end) 567 if (start == end)
576 break; 568 break;
@@ -578,9 +570,8 @@ static void __init free_highpages(void)
578 570
579 /* And now free anything which remains */ 571 /* And now free anything which remains */
580 if (start < end) 572 if (start < end)
581 totalhigh_pages += free_area(start, end, NULL); 573 free_area_high(start, end);
582 } 574 }
583 totalram_pages += totalhigh_pages;
584#endif 575#endif
585} 576}
586 577
@@ -609,8 +600,7 @@ void __init mem_init(void)
609 600
610#ifdef CONFIG_SA1111 601#ifdef CONFIG_SA1111
611 /* now that our DMA memory is actually so designated, we can free it */ 602 /* now that our DMA memory is actually so designated, we can free it */
612 totalram_pages += free_area(PHYS_PFN_OFFSET, 603 free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
613 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
614#endif 604#endif
615 605
616 free_highpages(); 606 free_highpages();
@@ -738,16 +728,12 @@ void free_initmem(void)
738 extern char __tcm_start, __tcm_end; 728 extern char __tcm_start, __tcm_end;
739 729
740 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 730 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
741 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), 731 free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
742 __phys_to_pfn(__pa(&__tcm_end)),
743 "TCM link");
744#endif 732#endif
745 733
746 poison_init_mem(__init_begin, __init_end - __init_begin); 734 poison_init_mem(__init_begin, __init_end - __init_begin);
747 if (!machine_is_integrator() && !machine_is_cintegrator()) 735 if (!machine_is_integrator() && !machine_is_cintegrator())
748 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 736 free_initmem_default(0);
749 __phys_to_pfn(__pa(__init_end)),
750 "init");
751} 737}
752 738
753#ifdef CONFIG_BLK_DEV_INITRD 739#ifdef CONFIG_BLK_DEV_INITRD
@@ -758,9 +744,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
758{ 744{
759 if (!keep_initrd) { 745 if (!keep_initrd) {
760 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 746 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
761 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 747 free_reserved_area(start, end, 0, "initrd");
762 __phys_to_pfn(__pa(end)),
763 "initrd");
764 } 748 }
765} 749}
766 750
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 51afedda9ab6..03db14d8ace9 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11*/ 11*/
12 12
13#include <linux/amba/pl330.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
@@ -1552,6 +1553,9 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
1552 pd.num_cs = num_cs; 1553 pd.num_cs = num_cs;
1553 pd.src_clk_nr = src_clk_nr; 1554 pd.src_clk_nr = src_clk_nr;
1554 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio; 1555 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
1556#ifdef CONFIG_PL330_DMA
1557 pd.filter = pl330_filter;
1558#endif
1555 1559
1556 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0); 1560 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
1557} 1561}
@@ -1590,6 +1594,9 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
1590 pd.num_cs = num_cs; 1594 pd.num_cs = num_cs;
1591 pd.src_clk_nr = src_clk_nr; 1595 pd.src_clk_nr = src_clk_nr;
1592 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio; 1596 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio;
1597#ifdef CONFIG_PL330_DMA
1598 pd.filter = pl330_filter;
1599#endif
1593 1600
1594 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1); 1601 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1);
1595} 1602}
@@ -1628,6 +1635,9 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
1628 pd.num_cs = num_cs; 1635 pd.num_cs = num_cs;
1629 pd.src_clk_nr = src_clk_nr; 1636 pd.src_clk_nr = src_clk_nr;
1630 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio; 1637 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio;
1638#ifdef CONFIG_PL330_DMA
1639 pd.filter = pl330_filter;
1640#endif
1631 1641
1632 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2); 1642 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2);
1633} 1643}
diff --git a/arch/arm/plat-samsung/include/plat/fb.h b/arch/arm/plat-samsung/include/plat/fb.h
index b885322717a1..9ae507270785 100644
--- a/arch/arm/plat-samsung/include/plat/fb.h
+++ b/arch/arm/plat-samsung/include/plat/fb.h
@@ -15,55 +15,7 @@
15#ifndef __PLAT_S3C_FB_H 15#ifndef __PLAT_S3C_FB_H
16#define __PLAT_S3C_FB_H __FILE__ 16#define __PLAT_S3C_FB_H __FILE__
17 17
18/* S3C_FB_MAX_WIN 18#include <linux/platform_data/video_s3c.h>
19 * Set to the maximum number of windows that any of the supported hardware
20 * can use. Since the platform data uses this for an array size, having it
21 * set to the maximum of any version of the hardware can do is safe.
22 */
23#define S3C_FB_MAX_WIN (5)
24
25/**
26 * struct s3c_fb_pd_win - per window setup data
27 * @xres : The window X size.
28 * @yres : The window Y size.
29 * @virtual_x: The virtual X size.
30 * @virtual_y: The virtual Y size.
31 */
32struct s3c_fb_pd_win {
33 unsigned short default_bpp;
34 unsigned short max_bpp;
35 unsigned short xres;
36 unsigned short yres;
37 unsigned short virtual_x;
38 unsigned short virtual_y;
39};
40
41/**
42 * struct s3c_fb_platdata - S3C driver platform specific information
43 * @setup_gpio: Setup the external GPIO pins to the right state to transfer
44 * the data from the display system to the connected display
45 * device.
46 * @vidcon0: The base vidcon0 values to control the panel data format.
47 * @vidcon1: The base vidcon1 values to control the panel data output.
48 * @vtiming: Video timing when connected to a RGB type panel.
49 * @win: The setup data for each hardware window, or NULL for unused.
50 * @display_mode: The LCD output display mode.
51 *
52 * The platform data supplies the video driver with all the information
53 * it requires to work with the display(s) attached to the machine. It
54 * controls the initial mode, the number of display windows (0 is always
55 * the base framebuffer) that are initialised etc.
56 *
57 */
58struct s3c_fb_platdata {
59 void (*setup_gpio)(void);
60
61 struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN];
62 struct fb_videomode *vtiming;
63
64 u32 vidcon0;
65 u32 vidcon1;
66};
67 19
68/** 20/**
69 * s3c_fb_set_platdata() - Setup the FB device with platform data. 21 * s3c_fb_set_platdata() - Setup the FB device with platform data.
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index 29c26a818842..f05f2afa440d 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -1,281 +1 @@
1/* arch/arm/plat-samsung/include/plat/regs-serial.h #include <linux/serial_s3c.h>
2 *
3 * From linux/include/asm-arm/hardware/serial_s3c2410.h
4 *
5 * Internal header file for Samsung S3C2410 serial ports (UART0-2)
6 *
7 * Copyright (C) 2002 Shane Nay (shane@minirl.com)
8 *
9 * Additional defines, Copyright 2003 Simtec Electronics (linux@simtec.co.uk)
10 *
11 * Adapted from:
12 *
13 * Internal header file for MX1ADS serial ports (UART1 & 2)
14 *
15 * Copyright (C) 2002 Shane Nay (shane@minirl.com)
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30*/
31
32#ifndef __ASM_ARM_REGS_SERIAL_H
33#define __ASM_ARM_REGS_SERIAL_H
34
35#define S3C24XX_VA_UART0 (S3C_VA_UART)
36#define S3C24XX_VA_UART1 (S3C_VA_UART + 0x4000 )
37#define S3C24XX_VA_UART2 (S3C_VA_UART + 0x8000 )
38#define S3C24XX_VA_UART3 (S3C_VA_UART + 0xC000 )
39
40#define S3C2410_PA_UART0 (S3C24XX_PA_UART)
41#define S3C2410_PA_UART1 (S3C24XX_PA_UART + 0x4000 )
42#define S3C2410_PA_UART2 (S3C24XX_PA_UART + 0x8000 )
43#define S3C2443_PA_UART3 (S3C24XX_PA_UART + 0xC000 )
44
45#define S3C2410_URXH (0x24)
46#define S3C2410_UTXH (0x20)
47#define S3C2410_ULCON (0x00)
48#define S3C2410_UCON (0x04)
49#define S3C2410_UFCON (0x08)
50#define S3C2410_UMCON (0x0C)
51#define S3C2410_UBRDIV (0x28)
52#define S3C2410_UTRSTAT (0x10)
53#define S3C2410_UERSTAT (0x14)
54#define S3C2410_UFSTAT (0x18)
55#define S3C2410_UMSTAT (0x1C)
56
57#define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3))
58
59#define S3C2410_LCON_CS5 (0x0)
60#define S3C2410_LCON_CS6 (0x1)
61#define S3C2410_LCON_CS7 (0x2)
62#define S3C2410_LCON_CS8 (0x3)
63#define S3C2410_LCON_CSMASK (0x3)
64
65#define S3C2410_LCON_PNONE (0x0)
66#define S3C2410_LCON_PEVEN (0x5 << 3)
67#define S3C2410_LCON_PODD (0x4 << 3)
68#define S3C2410_LCON_PMASK (0x7 << 3)
69
70#define S3C2410_LCON_STOPB (1<<2)
71#define S3C2410_LCON_IRM (1<<6)
72
73#define S3C2440_UCON_CLKMASK (3<<10)
74#define S3C2440_UCON_CLKSHIFT (10)
75#define S3C2440_UCON_PCLK (0<<10)
76#define S3C2440_UCON_UCLK (1<<10)
77#define S3C2440_UCON_PCLK2 (2<<10)
78#define S3C2440_UCON_FCLK (3<<10)
79#define S3C2443_UCON_EPLL (3<<10)
80
81#define S3C6400_UCON_CLKMASK (3<<10)
82#define S3C6400_UCON_CLKSHIFT (10)
83#define S3C6400_UCON_PCLK (0<<10)
84#define S3C6400_UCON_PCLK2 (2<<10)
85#define S3C6400_UCON_UCLK0 (1<<10)
86#define S3C6400_UCON_UCLK1 (3<<10)
87
88#define S3C2440_UCON2_FCLK_EN (1<<15)
89#define S3C2440_UCON0_DIVMASK (15 << 12)
90#define S3C2440_UCON1_DIVMASK (15 << 12)
91#define S3C2440_UCON2_DIVMASK (7 << 12)
92#define S3C2440_UCON_DIVSHIFT (12)
93
94#define S3C2412_UCON_CLKMASK (3<<10)
95#define S3C2412_UCON_CLKSHIFT (10)
96#define S3C2412_UCON_UCLK (1<<10)
97#define S3C2412_UCON_USYSCLK (3<<10)
98#define S3C2412_UCON_PCLK (0<<10)
99#define S3C2412_UCON_PCLK2 (2<<10)
100
101#define S3C2410_UCON_CLKMASK (1 << 10)
102#define S3C2410_UCON_CLKSHIFT (10)
103#define S3C2410_UCON_UCLK (1<<10)
104#define S3C2410_UCON_SBREAK (1<<4)
105
106#define S3C2410_UCON_TXILEVEL (1<<9)
107#define S3C2410_UCON_RXILEVEL (1<<8)
108#define S3C2410_UCON_TXIRQMODE (1<<2)
109#define S3C2410_UCON_RXIRQMODE (1<<0)
110#define S3C2410_UCON_RXFIFO_TOI (1<<7)
111#define S3C2443_UCON_RXERR_IRQEN (1<<6)
112#define S3C2443_UCON_LOOPBACK (1<<5)
113
114#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
115 S3C2410_UCON_RXILEVEL | \
116 S3C2410_UCON_TXIRQMODE | \
117 S3C2410_UCON_RXIRQMODE | \
118 S3C2410_UCON_RXFIFO_TOI)
119
120#define S3C2410_UFCON_FIFOMODE (1<<0)
121#define S3C2410_UFCON_TXTRIG0 (0<<6)
122#define S3C2410_UFCON_RXTRIG8 (1<<4)
123#define S3C2410_UFCON_RXTRIG12 (2<<4)
124
125/* S3C2440 FIFO trigger levels */
126#define S3C2440_UFCON_RXTRIG1 (0<<4)
127#define S3C2440_UFCON_RXTRIG8 (1<<4)
128#define S3C2440_UFCON_RXTRIG16 (2<<4)
129#define S3C2440_UFCON_RXTRIG32 (3<<4)
130
131#define S3C2440_UFCON_TXTRIG0 (0<<6)
132#define S3C2440_UFCON_TXTRIG16 (1<<6)
133#define S3C2440_UFCON_TXTRIG32 (2<<6)
134#define S3C2440_UFCON_TXTRIG48 (3<<6)
135
136#define S3C2410_UFCON_RESETBOTH (3<<1)
137#define S3C2410_UFCON_RESETTX (1<<2)
138#define S3C2410_UFCON_RESETRX (1<<1)
139
140#define S3C2410_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
141 S3C2410_UFCON_TXTRIG0 | \
142 S3C2410_UFCON_RXTRIG8 )
143
144#define S3C2410_UMCOM_AFC (1<<4)
145#define S3C2410_UMCOM_RTS_LOW (1<<0)
146
147#define S3C2412_UMCON_AFC_63 (0<<5) /* same as s3c2443 */
148#define S3C2412_UMCON_AFC_56 (1<<5)
149#define S3C2412_UMCON_AFC_48 (2<<5)
150#define S3C2412_UMCON_AFC_40 (3<<5)
151#define S3C2412_UMCON_AFC_32 (4<<5)
152#define S3C2412_UMCON_AFC_24 (5<<5)
153#define S3C2412_UMCON_AFC_16 (6<<5)
154#define S3C2412_UMCON_AFC_8 (7<<5)
155
156#define S3C2410_UFSTAT_TXFULL (1<<9)
157#define S3C2410_UFSTAT_RXFULL (1<<8)
158#define S3C2410_UFSTAT_TXMASK (15<<4)
159#define S3C2410_UFSTAT_TXSHIFT (4)
160#define S3C2410_UFSTAT_RXMASK (15<<0)
161#define S3C2410_UFSTAT_RXSHIFT (0)
162
163/* UFSTAT S3C2443 same as S3C2440 */
164#define S3C2440_UFSTAT_TXFULL (1<<14)
165#define S3C2440_UFSTAT_RXFULL (1<<6)
166#define S3C2440_UFSTAT_TXSHIFT (8)
167#define S3C2440_UFSTAT_RXSHIFT (0)
168#define S3C2440_UFSTAT_TXMASK (63<<8)
169#define S3C2440_UFSTAT_RXMASK (63)
170
171#define S3C2410_UTRSTAT_TXE (1<<2)
172#define S3C2410_UTRSTAT_TXFE (1<<1)
173#define S3C2410_UTRSTAT_RXDR (1<<0)
174
175#define S3C2410_UERSTAT_OVERRUN (1<<0)
176#define S3C2410_UERSTAT_FRAME (1<<2)
177#define S3C2410_UERSTAT_BREAK (1<<3)
178#define S3C2443_UERSTAT_PARITY (1<<1)
179
180#define S3C2410_UERSTAT_ANY (S3C2410_UERSTAT_OVERRUN | \
181 S3C2410_UERSTAT_FRAME | \
182 S3C2410_UERSTAT_BREAK)
183
184#define S3C2410_UMSTAT_CTS (1<<0)
185#define S3C2410_UMSTAT_DeltaCTS (1<<2)
186
187#define S3C2443_DIVSLOT (0x2C)
188
189/* S3C64XX interrupt registers. */
190#define S3C64XX_UINTP 0x30
191#define S3C64XX_UINTSP 0x34
192#define S3C64XX_UINTM 0x38
193
194#define S3C64XX_UINTM_RXD (0)
195#define S3C64XX_UINTM_TXD (2)
196#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD)
197#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD)
198
199/* Following are specific to S5PV210 */
200#define S5PV210_UCON_CLKMASK (1<<10)
201#define S5PV210_UCON_CLKSHIFT (10)
202#define S5PV210_UCON_PCLK (0<<10)
203#define S5PV210_UCON_UCLK (1<<10)
204
205#define S5PV210_UFCON_TXTRIG0 (0<<8)
206#define S5PV210_UFCON_TXTRIG4 (1<<8)
207#define S5PV210_UFCON_TXTRIG8 (2<<8)
208#define S5PV210_UFCON_TXTRIG16 (3<<8)
209#define S5PV210_UFCON_TXTRIG32 (4<<8)
210#define S5PV210_UFCON_TXTRIG64 (5<<8)
211#define S5PV210_UFCON_TXTRIG128 (6<<8)
212#define S5PV210_UFCON_TXTRIG256 (7<<8)
213
214#define S5PV210_UFCON_RXTRIG1 (0<<4)
215#define S5PV210_UFCON_RXTRIG4 (1<<4)
216#define S5PV210_UFCON_RXTRIG8 (2<<4)
217#define S5PV210_UFCON_RXTRIG16 (3<<4)
218#define S5PV210_UFCON_RXTRIG32 (4<<4)
219#define S5PV210_UFCON_RXTRIG64 (5<<4)
220#define S5PV210_UFCON_RXTRIG128 (6<<4)
221#define S5PV210_UFCON_RXTRIG256 (7<<4)
222
223#define S5PV210_UFSTAT_TXFULL (1<<24)
224#define S5PV210_UFSTAT_RXFULL (1<<8)
225#define S5PV210_UFSTAT_TXMASK (255<<16)
226#define S5PV210_UFSTAT_TXSHIFT (16)
227#define S5PV210_UFSTAT_RXMASK (255<<0)
228#define S5PV210_UFSTAT_RXSHIFT (0)
229
230#define S3C2410_UCON_CLKSEL0 (1 << 0)
231#define S3C2410_UCON_CLKSEL1 (1 << 1)
232#define S3C2410_UCON_CLKSEL2 (1 << 2)
233#define S3C2410_UCON_CLKSEL3 (1 << 3)
234
235/* Default values for s5pv210 UCON and UFCON uart registers */
236#define S5PV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
237 S3C2410_UCON_RXILEVEL | \
238 S3C2410_UCON_TXIRQMODE | \
239 S3C2410_UCON_RXIRQMODE | \
240 S3C2410_UCON_RXFIFO_TOI | \
241 S3C2443_UCON_RXERR_IRQEN)
242
243#define S5PV210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
244 S5PV210_UFCON_TXTRIG4 | \
245 S5PV210_UFCON_RXTRIG4)
246
247#ifndef __ASSEMBLY__
248
249/* configuration structure for per-machine configurations for the
250 * serial port
251 *
252 * the pointer is setup by the machine specific initialisation from the
253 * arch/arm/mach-s3c2410/ directory.
254*/
255
256struct s3c2410_uartcfg {
257 unsigned char hwport; /* hardware port number */
258 unsigned char unused;
259 unsigned short flags;
260 upf_t uart_flags; /* default uart flags */
261 unsigned int clk_sel;
262
263 unsigned int has_fracval;
264
265 unsigned long ucon; /* value of ucon for port */
266 unsigned long ulcon; /* value of ulcon for port */
267 unsigned long ufcon; /* value of ufcon for port */
268};
269
270/* s3c24xx_uart_devs
271 *
272 * this is exported from the core as we cannot use driver_register(),
273 * or platform_add_device() before the console_initcall()
274*/
275
276extern struct platform_device *s3c24xx_uart_devs[4];
277
278#endif /* __ASSEMBLY__ */
279
280#endif /* __ASM_ARM_REGS_SERIAL_H */
281
diff --git a/arch/arm/plat-samsung/include/plat/usb-phy.h b/arch/arm/plat-samsung/include/plat/usb-phy.h
index 959bcdb03a25..ab34dfadb7f9 100644
--- a/arch/arm/plat-samsung/include/plat/usb-phy.h
+++ b/arch/arm/plat-samsung/include/plat/usb-phy.h
@@ -11,10 +11,7 @@
11#ifndef __PLAT_SAMSUNG_USB_PHY_H 11#ifndef __PLAT_SAMSUNG_USB_PHY_H
12#define __PLAT_SAMSUNG_USB_PHY_H __FILE__ 12#define __PLAT_SAMSUNG_USB_PHY_H __FILE__
13 13
14enum s5p_usb_phy_type { 14#include <linux/usb/samsung_usb_phy.h>
15 S5P_USB_PHY_DEVICE,
16 S5P_USB_PHY_HOST,
17};
18 15
19extern int s5p_usb_phy_init(struct platform_device *pdev, int type); 16extern int s5p_usb_phy_init(struct platform_device *pdev, int type);
20extern int s5p_usb_phy_exit(struct platform_device *pdev, int type); 17extern int s5p_usb_phy_exit(struct platform_device *pdev, int type);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9b6d19f74078..73b6e764034c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -99,7 +99,16 @@ source "init/Kconfig"
99 99
100source "kernel/Kconfig.freezer" 100source "kernel/Kconfig.freezer"
101 101
102menu "System Type" 102menu "Platform selection"
103
104config ARCH_VEXPRESS
105 bool "ARMv8 software model (Versatile Express)"
106 select ARCH_REQUIRE_GPIOLIB
107 select COMMON_CLK_VERSATILE
108 select VEXPRESS_CONFIG
109 help
110 This enables support for the ARMv8 software model (Versatile
111 Express).
103 112
104endmenu 113endmenu
105 114
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 32ac0aef0068..68457e9e0975 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,3 +1,5 @@
1dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb
2
1targets += dtbs 3targets += dtbs
2targets += $(dtb-y) 4targets += $(dtb-y)
3 5
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
new file mode 100644
index 000000000000..198682b6de31
--- /dev/null
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -0,0 +1,230 @@
1/*
2 * ARM Ltd.
3 *
4 * ARMv8 Foundation model DTS
5 */
6
7/dts-v1/;
8
9/ {
10 model = "Foundation-v8A";
11 compatible = "arm,foundation-aarch64", "arm,vexpress";
12 interrupt-parent = <&gic>;
13 #address-cells = <2>;
14 #size-cells = <2>;
15
16 chosen { };
17
18 aliases {
19 serial0 = &v2m_serial0;
20 serial1 = &v2m_serial1;
21 serial2 = &v2m_serial2;
22 serial3 = &v2m_serial3;
23 };
24
25 cpus {
26 #address-cells = <1>;
27 #size-cells = <0>;
28
29 cpu@0 {
30 device_type = "cpu";
31 compatible = "arm,armv8";
32 reg = <0x0 0x0>;
33 enable-method = "spin-table";
34 cpu-release-addr = <0x0 0x8000fff8>;
35 };
36 cpu@1 {
37 device_type = "cpu";
38 compatible = "arm,armv8";
39 reg = <0x0 0x1>;
40 enable-method = "spin-table";
41 cpu-release-addr = <0x0 0x8000fff8>;
42 };
43 cpu@2 {
44 device_type = "cpu";
45 compatible = "arm,armv8";
46 reg = <0x0 0x2>;
47 enable-method = "spin-table";
48 cpu-release-addr = <0x0 0x8000fff8>;
49 };
50 cpu@3 {
51 device_type = "cpu";
52 compatible = "arm,armv8";
53 reg = <0x0 0x3>;
54 enable-method = "spin-table";
55 cpu-release-addr = <0x0 0x8000fff8>;
56 };
57 };
58
59 memory@80000000 {
60 device_type = "memory";
61 reg = <0x00000000 0x80000000 0 0x80000000>,
62 <0x00000008 0x80000000 0 0x80000000>;
63 };
64
65 gic: interrupt-controller@2c001000 {
66 compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
67 #interrupt-cells = <3>;
68 #address-cells = <0>;
69 interrupt-controller;
70 reg = <0x0 0x2c001000 0 0x1000>,
71 <0x0 0x2c002000 0 0x1000>,
72 <0x0 0x2c004000 0 0x2000>,
73 <0x0 0x2c006000 0 0x2000>;
74 interrupts = <1 9 0xf04>;
75 };
76
77 timer {
78 compatible = "arm,armv8-timer";
79 interrupts = <1 13 0xff01>,
80 <1 14 0xff01>,
81 <1 11 0xff01>,
82 <1 10 0xff01>;
83 clock-frequency = <100000000>;
84 };
85
86 pmu {
87 compatible = "arm,armv8-pmuv3";
88 interrupts = <0 60 4>,
89 <0 61 4>,
90 <0 62 4>,
91 <0 63 4>;
92 };
93
94 smb {
95 compatible = "arm,vexpress,v2m-p1", "simple-bus";
96 arm,v2m-memory-map = "rs1";
97 #address-cells = <2>; /* SMB chipselect number and offset */
98 #size-cells = <1>;
99
100 ranges = <0 0 0 0x08000000 0x04000000>,
101 <1 0 0 0x14000000 0x04000000>,
102 <2 0 0 0x18000000 0x04000000>,
103 <3 0 0 0x1c000000 0x04000000>,
104 <4 0 0 0x0c000000 0x04000000>,
105 <5 0 0 0x10000000 0x04000000>;
106
107 #interrupt-cells = <1>;
108 interrupt-map-mask = <0 0 63>;
109 interrupt-map = <0 0 0 &gic 0 0 4>,
110 <0 0 1 &gic 0 1 4>,
111 <0 0 2 &gic 0 2 4>,
112 <0 0 3 &gic 0 3 4>,
113 <0 0 4 &gic 0 4 4>,
114 <0 0 5 &gic 0 5 4>,
115 <0 0 6 &gic 0 6 4>,
116 <0 0 7 &gic 0 7 4>,
117 <0 0 8 &gic 0 8 4>,
118 <0 0 9 &gic 0 9 4>,
119 <0 0 10 &gic 0 10 4>,
120 <0 0 11 &gic 0 11 4>,
121 <0 0 12 &gic 0 12 4>,
122 <0 0 13 &gic 0 13 4>,
123 <0 0 14 &gic 0 14 4>,
124 <0 0 15 &gic 0 15 4>,
125 <0 0 16 &gic 0 16 4>,
126 <0 0 17 &gic 0 17 4>,
127 <0 0 18 &gic 0 18 4>,
128 <0 0 19 &gic 0 19 4>,
129 <0 0 20 &gic 0 20 4>,
130 <0 0 21 &gic 0 21 4>,
131 <0 0 22 &gic 0 22 4>,
132 <0 0 23 &gic 0 23 4>,
133 <0 0 24 &gic 0 24 4>,
134 <0 0 25 &gic 0 25 4>,
135 <0 0 26 &gic 0 26 4>,
136 <0 0 27 &gic 0 27 4>,
137 <0 0 28 &gic 0 28 4>,
138 <0 0 29 &gic 0 29 4>,
139 <0 0 30 &gic 0 30 4>,
140 <0 0 31 &gic 0 31 4>,
141 <0 0 32 &gic 0 32 4>,
142 <0 0 33 &gic 0 33 4>,
143 <0 0 34 &gic 0 34 4>,
144 <0 0 35 &gic 0 35 4>,
145 <0 0 36 &gic 0 36 4>,
146 <0 0 37 &gic 0 37 4>,
147 <0 0 38 &gic 0 38 4>,
148 <0 0 39 &gic 0 39 4>,
149 <0 0 40 &gic 0 40 4>,
150 <0 0 41 &gic 0 41 4>,
151 <0 0 42 &gic 0 42 4>;
152
153 ethernet@2,02000000 {
154 compatible = "smsc,lan91c111";
155 reg = <2 0x02000000 0x10000>;
156 interrupts = <15>;
157 };
158
159 v2m_clk24mhz: clk24mhz {
160 compatible = "fixed-clock";
161 #clock-cells = <0>;
162 clock-frequency = <24000000>;
163 clock-output-names = "v2m:clk24mhz";
164 };
165
166 v2m_refclk1mhz: refclk1mhz {
167 compatible = "fixed-clock";
168 #clock-cells = <0>;
169 clock-frequency = <1000000>;
170 clock-output-names = "v2m:refclk1mhz";
171 };
172
173 v2m_refclk32khz: refclk32khz {
174 compatible = "fixed-clock";
175 #clock-cells = <0>;
176 clock-frequency = <32768>;
177 clock-output-names = "v2m:refclk32khz";
178 };
179
180 iofpga@3,00000000 {
181 compatible = "arm,amba-bus", "simple-bus";
182 #address-cells = <1>;
183 #size-cells = <1>;
184 ranges = <0 3 0 0x200000>;
185
186 v2m_sysreg: sysreg@010000 {
187 compatible = "arm,vexpress-sysreg";
188 reg = <0x010000 0x1000>;
189 };
190
191 v2m_serial0: uart@090000 {
192 compatible = "arm,pl011", "arm,primecell";
193 reg = <0x090000 0x1000>;
194 interrupts = <5>;
195 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
196 clock-names = "uartclk", "apb_pclk";
197 };
198
199 v2m_serial1: uart@0a0000 {
200 compatible = "arm,pl011", "arm,primecell";
201 reg = <0x0a0000 0x1000>;
202 interrupts = <6>;
203 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
204 clock-names = "uartclk", "apb_pclk";
205 };
206
207 v2m_serial2: uart@0b0000 {
208 compatible = "arm,pl011", "arm,primecell";
209 reg = <0x0b0000 0x1000>;
210 interrupts = <7>;
211 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
212 clock-names = "uartclk", "apb_pclk";
213 };
214
215 v2m_serial3: uart@0c0000 {
216 compatible = "arm,pl011", "arm,primecell";
217 reg = <0x0c0000 0x1000>;
218 interrupts = <8>;
219 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
220 clock-names = "uartclk", "apb_pclk";
221 };
222
223 virtio_block@0130000 {
224 compatible = "virtio,mmio";
225 reg = <0x130000 0x1000>;
226 interrupts = <42>;
227 };
228 };
229 };
230};
diff --git a/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts
new file mode 100644
index 000000000000..572005ea2217
--- /dev/null
+++ b/arch/arm64/boot/dts/rtsm_ve-aemv8a.dts
@@ -0,0 +1,159 @@
1/*
2 * ARM Ltd. Fast Models
3 *
4 * Architecture Envelope Model (AEM) ARMv8-A
5 * ARMAEMv8AMPCT
6 *
7 * RTSM_VE_AEMv8A.lisa
8 */
9
10/dts-v1/;
11
12/memreserve/ 0x80000000 0x00010000;
13
14/ {
15 model = "RTSM_VE_AEMv8A";
16 compatible = "arm,rtsm_ve,aemv8a", "arm,vexpress";
17 interrupt-parent = <&gic>;
18 #address-cells = <2>;
19 #size-cells = <2>;
20
21 chosen { };
22
23 aliases {
24 serial0 = &v2m_serial0;
25 serial1 = &v2m_serial1;
26 serial2 = &v2m_serial2;
27 serial3 = &v2m_serial3;
28 };
29
30 cpus {
31 #address-cells = <2>;
32 #size-cells = <0>;
33
34 cpu@0 {
35 device_type = "cpu";
36 compatible = "arm,armv8";
37 reg = <0x0 0x0>;
38 enable-method = "spin-table";
39 cpu-release-addr = <0x0 0x8000fff8>;
40 };
41 cpu@1 {
42 device_type = "cpu";
43 compatible = "arm,armv8";
44 reg = <0x0 0x1>;
45 enable-method = "spin-table";
46 cpu-release-addr = <0x0 0x8000fff8>;
47 };
48 cpu@2 {
49 device_type = "cpu";
50 compatible = "arm,armv8";
51 reg = <0x0 0x2>;
52 enable-method = "spin-table";
53 cpu-release-addr = <0x0 0x8000fff8>;
54 };
55 cpu@3 {
56 device_type = "cpu";
57 compatible = "arm,armv8";
58 reg = <0x0 0x3>;
59 enable-method = "spin-table";
60 cpu-release-addr = <0x0 0x8000fff8>;
61 };
62 };
63
64 memory@80000000 {
65 device_type = "memory";
66 reg = <0x00000000 0x80000000 0 0x80000000>,
67 <0x00000008 0x80000000 0 0x80000000>;
68 };
69
70 gic: interrupt-controller@2c001000 {
71 compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
72 #interrupt-cells = <3>;
73 #address-cells = <0>;
74 interrupt-controller;
75 reg = <0x0 0x2c001000 0 0x1000>,
76 <0x0 0x2c002000 0 0x1000>,
77 <0x0 0x2c004000 0 0x2000>,
78 <0x0 0x2c006000 0 0x2000>;
79 interrupts = <1 9 0xf04>;
80 };
81
82 timer {
83 compatible = "arm,armv8-timer";
84 interrupts = <1 13 0xff01>,
85 <1 14 0xff01>,
86 <1 11 0xff01>,
87 <1 10 0xff01>;
88 clock-frequency = <100000000>;
89 };
90
91 pmu {
92 compatible = "arm,armv8-pmuv3";
93 interrupts = <0 60 4>,
94 <0 61 4>,
95 <0 62 4>,
96 <0 63 4>;
97 };
98
99 smb {
100 compatible = "simple-bus";
101
102 #address-cells = <2>;
103 #size-cells = <1>;
104 ranges = <0 0 0 0x08000000 0x04000000>,
105 <1 0 0 0x14000000 0x04000000>,
106 <2 0 0 0x18000000 0x04000000>,
107 <3 0 0 0x1c000000 0x04000000>,
108 <4 0 0 0x0c000000 0x04000000>,
109 <5 0 0 0x10000000 0x04000000>;
110
111 #interrupt-cells = <1>;
112 interrupt-map-mask = <0 0 63>;
113 interrupt-map = <0 0 0 &gic 0 0 4>,
114 <0 0 1 &gic 0 1 4>,
115 <0 0 2 &gic 0 2 4>,
116 <0 0 3 &gic 0 3 4>,
117 <0 0 4 &gic 0 4 4>,
118 <0 0 5 &gic 0 5 4>,
119 <0 0 6 &gic 0 6 4>,
120 <0 0 7 &gic 0 7 4>,
121 <0 0 8 &gic 0 8 4>,
122 <0 0 9 &gic 0 9 4>,
123 <0 0 10 &gic 0 10 4>,
124 <0 0 11 &gic 0 11 4>,
125 <0 0 12 &gic 0 12 4>,
126 <0 0 13 &gic 0 13 4>,
127 <0 0 14 &gic 0 14 4>,
128 <0 0 15 &gic 0 15 4>,
129 <0 0 16 &gic 0 16 4>,
130 <0 0 17 &gic 0 17 4>,
131 <0 0 18 &gic 0 18 4>,
132 <0 0 19 &gic 0 19 4>,
133 <0 0 20 &gic 0 20 4>,
134 <0 0 21 &gic 0 21 4>,
135 <0 0 22 &gic 0 22 4>,
136 <0 0 23 &gic 0 23 4>,
137 <0 0 24 &gic 0 24 4>,
138 <0 0 25 &gic 0 25 4>,
139 <0 0 26 &gic 0 26 4>,
140 <0 0 27 &gic 0 27 4>,
141 <0 0 28 &gic 0 28 4>,
142 <0 0 29 &gic 0 29 4>,
143 <0 0 30 &gic 0 30 4>,
144 <0 0 31 &gic 0 31 4>,
145 <0 0 32 &gic 0 32 4>,
146 <0 0 33 &gic 0 33 4>,
147 <0 0 34 &gic 0 34 4>,
148 <0 0 35 &gic 0 35 4>,
149 <0 0 36 &gic 0 36 4>,
150 <0 0 37 &gic 0 37 4>,
151 <0 0 38 &gic 0 38 4>,
152 <0 0 39 &gic 0 39 4>,
153 <0 0 40 &gic 0 40 4>,
154 <0 0 41 &gic 0 41 4>,
155 <0 0 42 &gic 0 42 4>;
156
157 /include/ "rtsm_ve-motherboard.dtsi"
158 };
159};
diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
new file mode 100644
index 000000000000..b45e5f39f577
--- /dev/null
+++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
@@ -0,0 +1,234 @@
1/*
2 * ARM Ltd. Fast Models
3 *
4 * Versatile Express (VE) system model
5 * Motherboard component
6 *
7 * VEMotherBoard.lisa
8 */
9
10 motherboard {
11 arm,v2m-memory-map = "rs1";
12 compatible = "arm,vexpress,v2m-p1", "simple-bus";
13 #address-cells = <2>; /* SMB chipselect number and offset */
14 #size-cells = <1>;
15 #interrupt-cells = <1>;
16 ranges;
17
18 flash@0,00000000 {
19 compatible = "arm,vexpress-flash", "cfi-flash";
20 reg = <0 0x00000000 0x04000000>,
21 <4 0x00000000 0x04000000>;
22 bank-width = <4>;
23 };
24
25 vram@2,00000000 {
26 compatible = "arm,vexpress-vram";
27 reg = <2 0x00000000 0x00800000>;
28 };
29
30 ethernet@2,02000000 {
31 compatible = "smsc,lan91c111";
32 reg = <2 0x02000000 0x10000>;
33 interrupts = <15>;
34 };
35
36 v2m_clk24mhz: clk24mhz {
37 compatible = "fixed-clock";
38 #clock-cells = <0>;
39 clock-frequency = <24000000>;
40 clock-output-names = "v2m:clk24mhz";
41 };
42
43 v2m_refclk1mhz: refclk1mhz {
44 compatible = "fixed-clock";
45 #clock-cells = <0>;
46 clock-frequency = <1000000>;
47 clock-output-names = "v2m:refclk1mhz";
48 };
49
50 v2m_refclk32khz: refclk32khz {
51 compatible = "fixed-clock";
52 #clock-cells = <0>;
53 clock-frequency = <32768>;
54 clock-output-names = "v2m:refclk32khz";
55 };
56
57 iofpga@3,00000000 {
58 compatible = "arm,amba-bus", "simple-bus";
59 #address-cells = <1>;
60 #size-cells = <1>;
61 ranges = <0 3 0 0x200000>;
62
63 v2m_sysreg: sysreg@010000 {
64 compatible = "arm,vexpress-sysreg";
65 reg = <0x010000 0x1000>;
66 gpio-controller;
67 #gpio-cells = <2>;
68 };
69
70 v2m_sysctl: sysctl@020000 {
71 compatible = "arm,sp810", "arm,primecell";
72 reg = <0x020000 0x1000>;
73 clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&v2m_clk24mhz>;
74 clock-names = "refclk", "timclk", "apb_pclk";
75 #clock-cells = <1>;
76 clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
77 };
78
79 aaci@040000 {
80 compatible = "arm,pl041", "arm,primecell";
81 reg = <0x040000 0x1000>;
82 interrupts = <11>;
83 clocks = <&v2m_clk24mhz>;
84 clock-names = "apb_pclk";
85 };
86
87 mmci@050000 {
88 compatible = "arm,pl180", "arm,primecell";
89 reg = <0x050000 0x1000>;
90 interrupts = <9 10>;
91 cd-gpios = <&v2m_sysreg 0 0>;
92 wp-gpios = <&v2m_sysreg 1 0>;
93 max-frequency = <12000000>;
94 vmmc-supply = <&v2m_fixed_3v3>;
95 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
96 clock-names = "mclk", "apb_pclk";
97 };
98
99 kmi@060000 {
100 compatible = "arm,pl050", "arm,primecell";
101 reg = <0x060000 0x1000>;
102 interrupts = <12>;
103 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
104 clock-names = "KMIREFCLK", "apb_pclk";
105 };
106
107 kmi@070000 {
108 compatible = "arm,pl050", "arm,primecell";
109 reg = <0x070000 0x1000>;
110 interrupts = <13>;
111 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
112 clock-names = "KMIREFCLK", "apb_pclk";
113 };
114
115 v2m_serial0: uart@090000 {
116 compatible = "arm,pl011", "arm,primecell";
117 reg = <0x090000 0x1000>;
118 interrupts = <5>;
119 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
120 clock-names = "uartclk", "apb_pclk";
121 };
122
123 v2m_serial1: uart@0a0000 {
124 compatible = "arm,pl011", "arm,primecell";
125 reg = <0x0a0000 0x1000>;
126 interrupts = <6>;
127 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
128 clock-names = "uartclk", "apb_pclk";
129 };
130
131 v2m_serial2: uart@0b0000 {
132 compatible = "arm,pl011", "arm,primecell";
133 reg = <0x0b0000 0x1000>;
134 interrupts = <7>;
135 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
136 clock-names = "uartclk", "apb_pclk";
137 };
138
139 v2m_serial3: uart@0c0000 {
140 compatible = "arm,pl011", "arm,primecell";
141 reg = <0x0c0000 0x1000>;
142 interrupts = <8>;
143 clocks = <&v2m_clk24mhz>, <&v2m_clk24mhz>;
144 clock-names = "uartclk", "apb_pclk";
145 };
146
147 wdt@0f0000 {
148 compatible = "arm,sp805", "arm,primecell";
149 reg = <0x0f0000 0x1000>;
150 interrupts = <0>;
151 clocks = <&v2m_refclk32khz>, <&v2m_clk24mhz>;
152 clock-names = "wdogclk", "apb_pclk";
153 };
154
155 v2m_timer01: timer@110000 {
156 compatible = "arm,sp804", "arm,primecell";
157 reg = <0x110000 0x1000>;
158 interrupts = <2>;
159 clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_clk24mhz>;
160 clock-names = "timclken1", "timclken2", "apb_pclk";
161 };
162
163 v2m_timer23: timer@120000 {
164 compatible = "arm,sp804", "arm,primecell";
165 reg = <0x120000 0x1000>;
166 interrupts = <3>;
167 clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&v2m_clk24mhz>;
168 clock-names = "timclken1", "timclken2", "apb_pclk";
169 };
170
171 rtc@170000 {
172 compatible = "arm,pl031", "arm,primecell";
173 reg = <0x170000 0x1000>;
174 interrupts = <4>;
175 clocks = <&v2m_clk24mhz>;
176 clock-names = "apb_pclk";
177 };
178
179 clcd@1f0000 {
180 compatible = "arm,pl111", "arm,primecell";
181 reg = <0x1f0000 0x1000>;
182 interrupts = <14>;
183 clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
184 clock-names = "clcdclk", "apb_pclk";
185 };
186 };
187
188 v2m_fixed_3v3: fixedregulator@0 {
189 compatible = "regulator-fixed";
190 regulator-name = "3V3";
191 regulator-min-microvolt = <3300000>;
192 regulator-max-microvolt = <3300000>;
193 regulator-always-on;
194 };
195
196 mcc {
197 compatible = "arm,vexpress,config-bus", "simple-bus";
198 arm,vexpress,config-bridge = <&v2m_sysreg>;
199
200 v2m_oscclk1: osc@1 {
201 /* CLCD clock */
202 compatible = "arm,vexpress-osc";
203 arm,vexpress-sysreg,func = <1 1>;
204 freq-range = <23750000 63500000>;
205 #clock-cells = <0>;
206 clock-output-names = "v2m:oscclk1";
207 };
208
209 reset@0 {
210 compatible = "arm,vexpress-reset";
211 arm,vexpress-sysreg,func = <5 0>;
212 };
213
214 muxfpga@0 {
215 compatible = "arm,vexpress-muxfpga";
216 arm,vexpress-sysreg,func = <7 0>;
217 };
218
219 shutdown@0 {
220 compatible = "arm,vexpress-shutdown";
221 arm,vexpress-sysreg,func = <8 0>;
222 };
223
224 reboot@0 {
225 compatible = "arm,vexpress-reboot";
226 arm,vexpress-sysreg,func = <9 0>;
227 };
228
229 dvimode@0 {
230 compatible = "arm,vexpress-dvimode";
231 arm,vexpress-sysreg,func = <11 0>;
232 };
233 };
234 };
diff --git a/arch/arm64/boot/dts/skeleton.dtsi b/arch/arm64/boot/dts/skeleton.dtsi
new file mode 100644
index 000000000000..38ead821bb42
--- /dev/null
+++ b/arch/arm64/boot/dts/skeleton.dtsi
@@ -0,0 +1,13 @@
1/*
2 * Skeleton device tree; the bare minimum needed to boot; just include and
3 * add a compatible value. The bootloader will typically populate the memory
4 * node.
5 */
6
7/ {
8 #address-cells = <2>;
9 #size-cells = <1>;
10 chosen { };
11 aliases { };
12 memory { device_type = "memory"; reg = <0 0 0>; };
13};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 09bef29f3a09..8d9696adb440 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -23,6 +23,7 @@ CONFIG_MODULES=y
23CONFIG_MODULE_UNLOAD=y 23CONFIG_MODULE_UNLOAD=y
24# CONFIG_BLK_DEV_BSG is not set 24# CONFIG_BLK_DEV_BSG is not set
25# CONFIG_IOSCHED_DEADLINE is not set 25# CONFIG_IOSCHED_DEADLINE is not set
26CONFIG_ARCH_VEXPRESS=y
26CONFIG_SMP=y 27CONFIG_SMP=y
27CONFIG_PREEMPT_VOLUNTARY=y 28CONFIG_PREEMPT_VOLUNTARY=y
28CONFIG_CMDLINE="console=ttyAMA0" 29CONFIG_CMDLINE="console=ttyAMA0"
@@ -47,11 +48,14 @@ CONFIG_BLK_DEV_SD=y
47# CONFIG_SCSI_LOWLEVEL is not set 48# CONFIG_SCSI_LOWLEVEL is not set
48CONFIG_NETDEVICES=y 49CONFIG_NETDEVICES=y
49CONFIG_MII=y 50CONFIG_MII=y
51CONFIG_SMC91X=y
50# CONFIG_WLAN is not set 52# CONFIG_WLAN is not set
51CONFIG_INPUT_EVDEV=y 53CONFIG_INPUT_EVDEV=y
52# CONFIG_SERIO_I8042 is not set 54# CONFIG_SERIO_I8042 is not set
53# CONFIG_SERIO_SERPORT is not set 55# CONFIG_SERIO_SERPORT is not set
54CONFIG_LEGACY_PTY_COUNT=16 56CONFIG_LEGACY_PTY_COUNT=16
57CONFIG_SERIAL_AMBA_PL011=y
58CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
55# CONFIG_HW_RANDOM is not set 59# CONFIG_HW_RANDOM is not set
56# CONFIG_HWMON is not set 60# CONFIG_HWMON is not set
57CONFIG_FB=y 61CONFIG_FB=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index e5fe4f99fe10..79a642d199f2 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -39,7 +39,6 @@ generic-y += shmbuf.h
39generic-y += sizes.h 39generic-y += sizes.h
40generic-y += socket.h 40generic-y += socket.h
41generic-y += sockios.h 41generic-y += sockios.h
42generic-y += string.h
43generic-y += switch_to.h 42generic-y += switch_to.h
44generic-y += swab.h 43generic-y += swab.h
45generic-y += termbits.h 44generic-y += termbits.h
@@ -49,4 +48,5 @@ generic-y += trace_clock.h
49generic-y += types.h 48generic-y += types.h
50generic-y += unaligned.h 49generic-y += unaligned.h
51generic-y += user.h 50generic-y += user.h
51generic-y += vga.h
52generic-y += xor.h 52generic-y += xor.h
diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
index 5e693073b030..aa5b59d6ba43 100644
--- a/arch/arm64/include/asm/bitops.h
+++ b/arch/arm64/include/asm/bitops.h
@@ -32,6 +32,16 @@
32#error only <linux/bitops.h> can be included directly 32#error only <linux/bitops.h> can be included directly
33#endif 33#endif
34 34
35/*
36 * Little endian assembly atomic bitops.
37 */
38extern void set_bit(int nr, volatile unsigned long *p);
39extern void clear_bit(int nr, volatile unsigned long *p);
40extern void change_bit(int nr, volatile unsigned long *p);
41extern int test_and_set_bit(int nr, volatile unsigned long *p);
42extern int test_and_clear_bit(int nr, volatile unsigned long *p);
43extern int test_and_change_bit(int nr, volatile unsigned long *p);
44
35#include <asm-generic/bitops/builtin-__ffs.h> 45#include <asm-generic/bitops/builtin-__ffs.h>
36#include <asm-generic/bitops/builtin-ffs.h> 46#include <asm-generic/bitops/builtin-ffs.h>
37#include <asm-generic/bitops/builtin-__fls.h> 47#include <asm-generic/bitops/builtin-__fls.h>
@@ -45,9 +55,13 @@
45#include <asm-generic/bitops/hweight.h> 55#include <asm-generic/bitops/hweight.h>
46#include <asm-generic/bitops/lock.h> 56#include <asm-generic/bitops/lock.h>
47 57
48#include <asm-generic/bitops/atomic.h>
49#include <asm-generic/bitops/non-atomic.h> 58#include <asm-generic/bitops/non-atomic.h>
50#include <asm-generic/bitops/le.h> 59#include <asm-generic/bitops/le.h>
51#include <asm-generic/bitops/ext2-atomic.h> 60
61/*
62 * Ext2 is defined to use little-endian byte ordering.
63 */
64#define ext2_set_bit_atomic(lock, nr, p) test_and_set_bit_le(nr, p)
65#define ext2_clear_bit_atomic(lock, nr, p) test_and_clear_bit_le(nr, p)
52 66
53#endif /* __ASM_BITOPS_H */ 67#endif /* __ASM_BITOPS_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 968b5cbfc260..8a8ce0e73a38 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -170,4 +170,7 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
170 (unsigned long)(n), \ 170 (unsigned long)(n), \
171 sizeof(*(ptr)))) 171 sizeof(*(ptr))))
172 172
173#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
174#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
175
173#endif /* __ASM_CMPXCHG_H */ 176#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 618b450e5a1d..899af807ef0f 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -35,14 +35,16 @@ typedef s32 compat_clock_t;
35typedef s32 compat_pid_t; 35typedef s32 compat_pid_t;
36typedef u32 __compat_uid_t; 36typedef u32 __compat_uid_t;
37typedef u32 __compat_gid_t; 37typedef u32 __compat_gid_t;
38typedef u16 __compat_uid16_t;
39typedef u16 __compat_gid16_t;
38typedef u32 __compat_uid32_t; 40typedef u32 __compat_uid32_t;
39typedef u32 __compat_gid32_t; 41typedef u32 __compat_gid32_t;
40typedef u32 compat_mode_t; 42typedef u16 compat_mode_t;
41typedef u32 compat_ino_t; 43typedef u32 compat_ino_t;
42typedef u32 compat_dev_t; 44typedef u32 compat_dev_t;
43typedef s32 compat_off_t; 45typedef s32 compat_off_t;
44typedef s64 compat_loff_t; 46typedef s64 compat_loff_t;
45typedef s16 compat_nlink_t; 47typedef s32 compat_nlink_t;
46typedef u16 compat_ipc_pid_t; 48typedef u16 compat_ipc_pid_t;
47typedef s32 compat_daddr_t; 49typedef s32 compat_daddr_t;
48typedef u32 compat_caddr_t; 50typedef u32 compat_caddr_t;
@@ -50,9 +52,11 @@ typedef __kernel_fsid_t compat_fsid_t;
50typedef s32 compat_key_t; 52typedef s32 compat_key_t;
51typedef s32 compat_timer_t; 53typedef s32 compat_timer_t;
52 54
55typedef s16 compat_short_t;
53typedef s32 compat_int_t; 56typedef s32 compat_int_t;
54typedef s32 compat_long_t; 57typedef s32 compat_long_t;
55typedef s64 compat_s64; 58typedef s64 compat_s64;
59typedef u16 compat_ushort_t;
56typedef u32 compat_uint_t; 60typedef u32 compat_uint_t;
57typedef u32 compat_ulong_t; 61typedef u32 compat_ulong_t;
58typedef u64 compat_u64; 62typedef u64 compat_u64;
@@ -72,20 +76,20 @@ struct compat_stat {
72 compat_dev_t st_dev; 76 compat_dev_t st_dev;
73 compat_ino_t st_ino; 77 compat_ino_t st_ino;
74 compat_mode_t st_mode; 78 compat_mode_t st_mode;
75 compat_nlink_t st_nlink; 79 compat_ushort_t st_nlink;
76 __compat_uid32_t st_uid; 80 __compat_uid16_t st_uid;
77 __compat_gid32_t st_gid; 81 __compat_gid16_t st_gid;
78 compat_dev_t st_rdev; 82 compat_dev_t st_rdev;
79 compat_off_t st_size; 83 compat_off_t st_size;
80 compat_off_t st_blksize; 84 compat_off_t st_blksize;
81 compat_off_t st_blocks; 85 compat_off_t st_blocks;
82 compat_time_t st_atime; 86 compat_time_t st_atime;
83 u32 st_atime_nsec; 87 compat_ulong_t st_atime_nsec;
84 compat_time_t st_mtime; 88 compat_time_t st_mtime;
85 u32 st_mtime_nsec; 89 compat_ulong_t st_mtime_nsec;
86 compat_time_t st_ctime; 90 compat_time_t st_ctime;
87 u32 st_ctime_nsec; 91 compat_ulong_t st_ctime_nsec;
88 u32 __unused4[2]; 92 compat_ulong_t __unused4[2];
89}; 93};
90 94
91struct compat_flock { 95struct compat_flock {
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index ef54125e6c1e..cf2749488cd4 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -17,6 +17,7 @@
17#define __ASM_CPUTYPE_H 17#define __ASM_CPUTYPE_H
18 18
19#define ID_MIDR_EL1 "midr_el1" 19#define ID_MIDR_EL1 "midr_el1"
20#define ID_MPIDR_EL1 "mpidr_el1"
20#define ID_CTR_EL0 "ctr_el0" 21#define ID_CTR_EL0 "ctr_el0"
21 22
22#define ID_AA64PFR0_EL1 "id_aa64pfr0_el1" 23#define ID_AA64PFR0_EL1 "id_aa64pfr0_el1"
@@ -25,12 +26,24 @@
25#define ID_AA64ISAR0_EL1 "id_aa64isar0_el1" 26#define ID_AA64ISAR0_EL1 "id_aa64isar0_el1"
26#define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1" 27#define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1"
27 28
29#define INVALID_HWID ULONG_MAX
30
31#define MPIDR_HWID_BITMASK 0xff00ffffff
32
28#define read_cpuid(reg) ({ \ 33#define read_cpuid(reg) ({ \
29 u64 __val; \ 34 u64 __val; \
30 asm("mrs %0, " reg : "=r" (__val)); \ 35 asm("mrs %0, " reg : "=r" (__val)); \
31 __val; \ 36 __val; \
32}) 37})
33 38
39#define ARM_CPU_IMP_ARM 0x41
40
41#define ARM_CPU_PART_AEM_V8 0xD0F0
42#define ARM_CPU_PART_FOUNDATION 0xD000
43#define ARM_CPU_PART_CORTEX_A57 0xD070
44
45#ifndef __ASSEMBLY__
46
34/* 47/*
35 * The CPU ID never changes at run time, so we might as well tell the 48 * The CPU ID never changes at run time, so we might as well tell the
36 * compiler that it's constant. Use this function to read the CPU ID 49 * compiler that it's constant. Use this function to read the CPU ID
@@ -41,9 +54,26 @@ static inline u32 __attribute_const__ read_cpuid_id(void)
41 return read_cpuid(ID_MIDR_EL1); 54 return read_cpuid(ID_MIDR_EL1);
42} 55}
43 56
57static inline u64 __attribute_const__ read_cpuid_mpidr(void)
58{
59 return read_cpuid(ID_MPIDR_EL1);
60}
61
62static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
63{
64 return (read_cpuid_id() & 0xFF000000) >> 24;
65}
66
67static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
68{
69 return (read_cpuid_id() & 0xFFF0);
70}
71
44static inline u32 __attribute_const__ read_cpuid_cachetype(void) 72static inline u32 __attribute_const__ read_cpuid_cachetype(void)
45{ 73{
46 return read_cpuid(ID_CTR_EL0); 74 return read_cpuid(ID_CTR_EL0);
47} 75}
48 76
77#endif /* __ASSEMBLY__ */
78
49#endif 79#endif
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
new file mode 100644
index 000000000000..78834123a32e
--- /dev/null
+++ b/arch/arm64/include/asm/esr.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (C) 2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ASM_ESR_H
19#define __ASM_ESR_H
20
21#define ESR_EL1_EC_SHIFT (26)
22#define ESR_EL1_IL (1U << 25)
23
24#define ESR_EL1_EC_UNKNOWN (0x00)
25#define ESR_EL1_EC_WFI (0x01)
26#define ESR_EL1_EC_CP15_32 (0x03)
27#define ESR_EL1_EC_CP15_64 (0x04)
28#define ESR_EL1_EC_CP14_MR (0x05)
29#define ESR_EL1_EC_CP14_LS (0x06)
30#define ESR_EL1_EC_FP_ASIMD (0x07)
31#define ESR_EL1_EC_CP10_ID (0x08)
32#define ESR_EL1_EC_CP14_64 (0x0C)
33#define ESR_EL1_EC_ILL_ISS (0x0E)
34#define ESR_EL1_EC_SVC32 (0x11)
35#define ESR_EL1_EC_SVC64 (0x15)
36#define ESR_EL1_EC_SYS64 (0x18)
37#define ESR_EL1_EC_IABT_EL0 (0x20)
38#define ESR_EL1_EC_IABT_EL1 (0x21)
39#define ESR_EL1_EC_PC_ALIGN (0x22)
40#define ESR_EL1_EC_DABT_EL0 (0x24)
41#define ESR_EL1_EC_DABT_EL1 (0x25)
42#define ESR_EL1_EC_SP_ALIGN (0x26)
43#define ESR_EL1_EC_FP_EXC32 (0x28)
44#define ESR_EL1_EC_FP_EXC64 (0x2C)
45#define ESR_EL1_EC_SERRROR (0x2F)
46#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
47#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
48#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
49#define ESR_EL1_EC_SOFTSTP_EL1 (0x33)
50#define ESR_EL1_EC_WATCHPT_EL0 (0x34)
51#define ESR_EL1_EC_WATCHPT_EL1 (0x35)
52#define ESR_EL1_EC_BKPT32 (0x38)
53#define ESR_EL1_EC_BRK64 (0x3C)
54
55#endif /* __ASM_ESR_H */
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index ac63519b7b90..0303705fcad6 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -19,5 +19,6 @@
19#define __ASM_EXCEPTION_H 19#define __ASM_EXCEPTION_H
20 20
21#define __exception __attribute__((section(".exception.text"))) 21#define __exception __attribute__((section(".exception.text")))
22#define __exception_irq_entry __exception
22 23
23#endif /* __ASM_EXCEPTION_H */ 24#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 507546353d62..990c051e7829 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -49,4 +49,9 @@ static inline void ack_bad_irq(unsigned int irq)
49 49
50extern void handle_IRQ(unsigned int, struct pt_regs *); 50extern void handle_IRQ(unsigned int, struct pt_regs *);
51 51
52/*
53 * No arch-specific IRQ flags.
54 */
55#define set_irq_flags(irq, flags)
56
52#endif /* __ASM_HARDIRQ_H */ 57#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 57f12c991de2..2e12258aa7e4 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -92,10 +92,12 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
92#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) 92#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
93#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) 93#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; })
94#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) 94#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; })
95#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; })
95 96
96#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) 97#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
97#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) 98#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
98#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) 99#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
100#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
99 101
100/* 102/*
101 * I/O memory access primitives. Reads are ordered relative to any 103 * I/O memory access primitives. Reads are ordered relative to any
@@ -105,10 +107,12 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
105#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) 107#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
106#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) 108#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
107#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) 109#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
110#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
108 111
109#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) 112#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
110#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) 113#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
111#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) 114#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
115#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
112 116
113/* 117/*
114 * I/O port access primitives. 118 * I/O port access primitives.
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index a4e1cad3202a..0332fc077f6e 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -4,5 +4,6 @@
4#include <asm-generic/irq.h> 4#include <asm-generic/irq.h>
5 5
6extern void (*handle_arch_irq)(struct pt_regs *); 6extern void (*handle_arch_irq)(struct pt_regs *);
7extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
7 8
8#endif 9#endif
diff --git a/arch/arm64/lib/bitops.c b/arch/arm64/include/asm/smp_plat.h
index aa4965e60acc..ed43a0d2b1b2 100644
--- a/arch/arm64/lib/bitops.c
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * Copyright (C) 2012 ARM Limited 2 * Definitions specific to SMP platforms.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * Copyright (C) 2013 ARM Ltd.
5 *
6 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
7 * 9 *
@@ -14,12 +16,15 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 17 */
16 18
17#include <linux/kernel.h> 19#ifndef __ASM_SMP_PLAT_H
18#include <linux/spinlock.h> 20#define __ASM_SMP_PLAT_H
19#include <linux/atomic.h> 21
22#include <asm/types.h>
23
24/*
25 * Logical CPU mapping.
26 */
27extern u64 __cpu_logical_map[NR_CPUS];
28#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
20 29
21#ifdef CONFIG_SMP 30#endif /* __ASM_SMP_PLAT_H */
22arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
23 [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
24};
25#endif
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
new file mode 100644
index 000000000000..3ee8b303d9a9
--- /dev/null
+++ b/arch/arm64/include/asm/string.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2013 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_STRING_H
17#define __ASM_STRING_H
18
19#define __HAVE_ARCH_STRRCHR
20extern char *strrchr(const char *, int c);
21
22#define __HAVE_ARCH_STRCHR
23extern char *strchr(const char *, int c);
24
25#define __HAVE_ARCH_MEMCPY
26extern void *memcpy(void *, const void *, __kernel_size_t);
27
28#define __HAVE_ARCH_MEMMOVE
29extern void *memmove(void *, const void *, __kernel_size_t);
30
31#define __HAVE_ARCH_MEMCHR
32extern void *memchr(const void *, int, __kernel_size_t);
33
34#define __HAVE_ARCH_MEMSET
35extern void *memset(void *, int, __kernel_size_t);
36
37#endif
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index aa3e948f7885..7df1aad29b67 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -39,10 +39,21 @@ EXPORT_SYMBOL(__copy_from_user);
39EXPORT_SYMBOL(__copy_to_user); 39EXPORT_SYMBOL(__copy_to_user);
40EXPORT_SYMBOL(__clear_user); 40EXPORT_SYMBOL(__clear_user);
41 41
42 /* bitops */
43#ifdef CONFIG_SMP
44EXPORT_SYMBOL(__atomic_hash);
45#endif
46
47 /* physical memory */ 42 /* physical memory */
48EXPORT_SYMBOL(memstart_addr); 43EXPORT_SYMBOL(memstart_addr);
44
45 /* string / mem functions */
46EXPORT_SYMBOL(strchr);
47EXPORT_SYMBOL(strrchr);
48EXPORT_SYMBOL(memset);
49EXPORT_SYMBOL(memcpy);
50EXPORT_SYMBOL(memmove);
51EXPORT_SYMBOL(memchr);
52
53 /* atomic bitops */
54EXPORT_SYMBOL(set_bit);
55EXPORT_SYMBOL(test_and_set_bit);
56EXPORT_SYMBOL(clear_bit);
57EXPORT_SYMBOL(test_and_clear_bit);
58EXPORT_SYMBOL(change_bit);
59EXPORT_SYMBOL(test_and_change_bit);
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
index 7e320a2edb9b..ac974f48a7a2 100644
--- a/arch/arm64/kernel/early_printk.c
+++ b/arch/arm64/kernel/early_printk.c
@@ -24,6 +24,7 @@
24#include <linux/io.h> 24#include <linux/io.h>
25 25
26#include <linux/amba/serial.h> 26#include <linux/amba/serial.h>
27#include <linux/serial_reg.h>
27 28
28static void __iomem *early_base; 29static void __iomem *early_base;
29static void (*printch)(char ch); 30static void (*printch)(char ch);
@@ -40,6 +41,37 @@ static void pl011_printch(char ch)
40 ; 41 ;
41} 42}
42 43
44/*
45 * Semihosting-based debug console
46 */
47static void smh_printch(char ch)
48{
49 asm volatile("mov x1, %0\n"
50 "mov x0, #3\n"
51 "hlt 0xf000\n"
52 : : "r" (&ch) : "x0", "x1", "memory");
53}
54
55/*
56 * 8250/16550 (8-bit aligned registers) single character TX.
57 */
58static void uart8250_8bit_printch(char ch)
59{
60 while (!(readb_relaxed(early_base + UART_LSR) & UART_LSR_THRE))
61 ;
62 writeb_relaxed(ch, early_base + UART_TX);
63}
64
65/*
66 * 8250/16550 (32-bit aligned registers) single character TX.
67 */
68static void uart8250_32bit_printch(char ch)
69{
70 while (!(readl_relaxed(early_base + (UART_LSR << 2)) & UART_LSR_THRE))
71 ;
72 writel_relaxed(ch, early_base + (UART_TX << 2));
73}
74
43struct earlycon_match { 75struct earlycon_match {
44 const char *name; 76 const char *name;
45 void (*printch)(char ch); 77 void (*printch)(char ch);
@@ -47,6 +79,9 @@ struct earlycon_match {
47 79
48static const struct earlycon_match earlycon_match[] __initconst = { 80static const struct earlycon_match earlycon_match[] __initconst = {
49 { .name = "pl011", .printch = pl011_printch, }, 81 { .name = "pl011", .printch = pl011_printch, },
82 { .name = "smh", .printch = smh_printch, },
83 { .name = "uart8250-8bit", .printch = uart8250_8bit_printch, },
84 { .name = "uart8250-32bit", .printch = uart8250_32bit_printch, },
50 {} 85 {}
51}; 86};
52 87
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 514d6098dbee..c7e047049f2c 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -24,6 +24,7 @@
24#include <asm/assembler.h> 24#include <asm/assembler.h>
25#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
26#include <asm/errno.h> 26#include <asm/errno.h>
27#include <asm/esr.h>
27#include <asm/thread_info.h> 28#include <asm/thread_info.h>
28#include <asm/unistd.h> 29#include <asm/unistd.h>
29#include <asm/unistd32.h> 30#include <asm/unistd32.h>
@@ -239,18 +240,18 @@ ENDPROC(el1_error_invalid)
239el1_sync: 240el1_sync:
240 kernel_entry 1 241 kernel_entry 1
241 mrs x1, esr_el1 // read the syndrome register 242 mrs x1, esr_el1 // read the syndrome register
242 lsr x24, x1, #26 // exception class 243 lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
243 cmp x24, #0x25 // data abort in EL1 244 cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
244 b.eq el1_da 245 b.eq el1_da
245 cmp x24, #0x18 // configurable trap 246 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
246 b.eq el1_undef 247 b.eq el1_undef
247 cmp x24, #0x26 // stack alignment exception 248 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
248 b.eq el1_sp_pc 249 b.eq el1_sp_pc
249 cmp x24, #0x22 // pc alignment exception 250 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
250 b.eq el1_sp_pc 251 b.eq el1_sp_pc
251 cmp x24, #0x00 // unknown exception in EL1 252 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
252 b.eq el1_undef 253 b.eq el1_undef
253 cmp x24, #0x30 // debug exception in EL1 254 cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
254 b.ge el1_dbg 255 b.ge el1_dbg
255 b el1_inv 256 b el1_inv
256el1_da: 257el1_da:
@@ -346,27 +347,27 @@ el1_preempt:
346el0_sync: 347el0_sync:
347 kernel_entry 0 348 kernel_entry 0
348 mrs x25, esr_el1 // read the syndrome register 349 mrs x25, esr_el1 // read the syndrome register
349 lsr x24, x25, #26 // exception class 350 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
350 cmp x24, #0x15 // SVC in 64-bit state 351 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
351 b.eq el0_svc 352 b.eq el0_svc
352 adr lr, ret_from_exception 353 adr lr, ret_from_exception
353 cmp x24, #0x24 // data abort in EL0 354 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
354 b.eq el0_da 355 b.eq el0_da
355 cmp x24, #0x20 // instruction abort in EL0 356 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
356 b.eq el0_ia 357 b.eq el0_ia
357 cmp x24, #0x07 // FP/ASIMD access 358 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
358 b.eq el0_fpsimd_acc 359 b.eq el0_fpsimd_acc
359 cmp x24, #0x2c // FP/ASIMD exception 360 cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
360 b.eq el0_fpsimd_exc 361 b.eq el0_fpsimd_exc
361 cmp x24, #0x18 // configurable trap 362 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
362 b.eq el0_undef 363 b.eq el0_undef
363 cmp x24, #0x26 // stack alignment exception 364 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
364 b.eq el0_sp_pc 365 b.eq el0_sp_pc
365 cmp x24, #0x22 // pc alignment exception 366 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
366 b.eq el0_sp_pc 367 b.eq el0_sp_pc
367 cmp x24, #0x00 // unknown exception in EL0 368 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
368 b.eq el0_undef 369 b.eq el0_undef
369 cmp x24, #0x30 // debug exception in EL0 370 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
370 b.ge el0_dbg 371 b.ge el0_dbg
371 b el0_inv 372 b el0_inv
372 373
@@ -375,21 +376,21 @@ el0_sync:
375el0_sync_compat: 376el0_sync_compat:
376 kernel_entry 0, 32 377 kernel_entry 0, 32
377 mrs x25, esr_el1 // read the syndrome register 378 mrs x25, esr_el1 // read the syndrome register
378 lsr x24, x25, #26 // exception class 379 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
379 cmp x24, #0x11 // SVC in 32-bit state 380 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
380 b.eq el0_svc_compat 381 b.eq el0_svc_compat
381 adr lr, ret_from_exception 382 adr lr, ret_from_exception
382 cmp x24, #0x24 // data abort in EL0 383 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
383 b.eq el0_da 384 b.eq el0_da
384 cmp x24, #0x20 // instruction abort in EL0 385 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
385 b.eq el0_ia 386 b.eq el0_ia
386 cmp x24, #0x07 // FP/ASIMD access 387 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
387 b.eq el0_fpsimd_acc 388 b.eq el0_fpsimd_acc
388 cmp x24, #0x28 // FP/ASIMD exception 389 cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
389 b.eq el0_fpsimd_exc 390 b.eq el0_fpsimd_exc
390 cmp x24, #0x00 // unknown exception in EL0 391 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
391 b.eq el0_undef 392 b.eq el0_undef
392 cmp x24, #0x30 // debug exception in EL0 393 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
393 b.ge el0_dbg 394 b.ge el0_dbg
394 b el0_inv 395 b el0_inv
395el0_svc_compat: 396el0_svc_compat:
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0a0a49756826..53dcae49e729 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -26,6 +26,7 @@
26#include <asm/assembler.h> 26#include <asm/assembler.h>
27#include <asm/ptrace.h> 27#include <asm/ptrace.h>
28#include <asm/asm-offsets.h> 28#include <asm/asm-offsets.h>
29#include <asm/cputype.h>
29#include <asm/memory.h> 30#include <asm/memory.h>
30#include <asm/thread_info.h> 31#include <asm/thread_info.h>
31#include <asm/pgtable-hwdef.h> 32#include <asm/pgtable-hwdef.h>
@@ -229,7 +230,8 @@ ENTRY(secondary_holding_pen)
229 bl __calc_phys_offset // x24=phys offset 230 bl __calc_phys_offset // x24=phys offset
230 bl el2_setup // Drop to EL1 231 bl el2_setup // Drop to EL1
231 mrs x0, mpidr_el1 232 mrs x0, mpidr_el1
232 and x0, x0, #15 // CPU number 233 ldr x1, =MPIDR_HWID_BITMASK
234 and x0, x0, x1
233 adr x1, 1b 235 adr x1, 1b
234 ldp x2, x3, [x1] 236 ldp x2, x3, [x1]
235 sub x1, x1, x2 237 sub x1, x1, x2
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 0373c6609eaf..ecb3354292ed 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -25,7 +25,7 @@
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/smp.h> 26#include <linux/smp.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/of_irq.h> 28#include <linux/irqchip.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/ratelimit.h> 30#include <linux/ratelimit.h>
31 31
@@ -67,18 +67,17 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs)
67 set_irq_regs(old_regs); 67 set_irq_regs(old_regs);
68} 68}
69 69
70/* 70void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
71 * Interrupt controllers supported by the kernel. 71{
72 */ 72 if (handle_arch_irq)
73static const struct of_device_id intctrl_of_match[] __initconst = { 73 return;
74 /* IRQ controllers { .compatible, .data } info to go here */ 74
75 {} 75 handle_arch_irq = handle_irq;
76}; 76}
77 77
78void __init init_IRQ(void) 78void __init init_IRQ(void)
79{ 79{
80 of_irq_init(intctrl_of_match); 80 irqchip_init();
81
82 if (!handle_arch_irq) 81 if (!handle_arch_irq)
83 panic("No interrupt controller found."); 82 panic("No interrupt controller found.");
84} 83}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 0337cdb0667b..f4919721f7dd 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -84,11 +84,15 @@ EXPORT_SYMBOL_GPL(pm_power_off);
84void (*pm_restart)(const char *cmd); 84void (*pm_restart)(const char *cmd);
85EXPORT_SYMBOL_GPL(pm_restart); 85EXPORT_SYMBOL_GPL(pm_restart);
86 86
87void arch_cpu_idle_prepare(void)
88{
89 local_fiq_enable();
90}
87 91
88/* 92/*
89 * This is our default idle handler. 93 * This is our default idle handler.
90 */ 94 */
91static void default_idle(void) 95void arch_cpu_idle(void)
92{ 96{
93 /* 97 /*
94 * This should do all the clock switching and wait for interrupt 98 * This should do all the clock switching and wait for interrupt
@@ -98,43 +102,6 @@ static void default_idle(void)
98 local_irq_enable(); 102 local_irq_enable();
99} 103}
100 104
101/*
102 * The idle thread.
103 * We always respect 'hlt_counter' to prevent low power idle.
104 */
105void cpu_idle(void)
106{
107 local_fiq_enable();
108
109 /* endless idle loop with no priority at all */
110 while (1) {
111 tick_nohz_idle_enter();
112 rcu_idle_enter();
113 while (!need_resched()) {
114 /*
115 * We need to disable interrupts here to ensure
116 * we don't miss a wakeup call.
117 */
118 local_irq_disable();
119 if (!need_resched()) {
120 stop_critical_timings();
121 default_idle();
122 start_critical_timings();
123 /*
124 * default_idle functions should always return
125 * with IRQs enabled.
126 */
127 WARN_ON(irqs_disabled());
128 } else {
129 local_irq_enable();
130 }
131 }
132 rcu_idle_exit();
133 tick_nohz_idle_exit();
134 schedule_preempt_disabled();
135 }
136}
137
138void machine_shutdown(void) 105void machine_shutdown(void)
139{ 106{
140#ifdef CONFIG_SMP 107#ifdef CONFIG_SMP
@@ -178,11 +145,7 @@ void __show_regs(struct pt_regs *regs)
178{ 145{
179 int i; 146 int i;
180 147
181 printk("CPU: %d %s (%s %.*s)\n", 148 show_regs_print_info(KERN_DEFAULT);
182 raw_smp_processor_id(), print_tainted(),
183 init_utsname()->release,
184 (int)strcspn(init_utsname()->version, " "),
185 init_utsname()->version);
186 print_symbol("PC is at %s\n", instruction_pointer(regs)); 149 print_symbol("PC is at %s\n", instruction_pointer(regs));
187 print_symbol("LR is at %s\n", regs->regs[30]); 150 print_symbol("LR is at %s\n", regs->regs[30]);
188 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", 151 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
@@ -199,7 +162,6 @@ void __show_regs(struct pt_regs *regs)
199void show_regs(struct pt_regs * regs) 162void show_regs(struct pt_regs * regs)
200{ 163{
201 printk("\n"); 164 printk("\n");
202 printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
203 __show_regs(regs); 165 __show_regs(regs);
204} 166}
205 167
@@ -311,11 +273,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
311 fpsimd_thread_switch(next); 273 fpsimd_thread_switch(next);
312 tls_thread_switch(next); 274 tls_thread_switch(next);
313 hw_breakpoint_thread_switch(next); 275 hw_breakpoint_thread_switch(next);
276 contextidr_thread_switch(next);
277
278 /*
279 * Complete any pending TLB or cache maintenance on this CPU in case
280 * the thread migrates to a different CPU.
281 */
282 dsb();
314 283
315 /* the actual thread switch */ 284 /* the actual thread switch */
316 last = cpu_switch_to(prev, next); 285 last = cpu_switch_to(prev, next);
317 286
318 contextidr_thread_switch(next);
319 return last; 287 return last;
320} 288}
321 289
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 113db863f832..6a9a53292590 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -32,6 +32,7 @@
32#include <linux/kexec.h> 32#include <linux/kexec.h>
33#include <linux/crash_dump.h> 33#include <linux/crash_dump.h>
34#include <linux/root_dev.h> 34#include <linux/root_dev.h>
35#include <linux/clk-provider.h>
35#include <linux/cpu.h> 36#include <linux/cpu.h>
36#include <linux/interrupt.h> 37#include <linux/interrupt.h>
37#include <linux/smp.h> 38#include <linux/smp.h>
@@ -46,6 +47,7 @@
46#include <asm/cputable.h> 47#include <asm/cputable.h>
47#include <asm/sections.h> 48#include <asm/sections.h>
48#include <asm/setup.h> 49#include <asm/setup.h>
50#include <asm/smp_plat.h>
49#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
50#include <asm/tlbflush.h> 52#include <asm/tlbflush.h>
51#include <asm/traps.h> 53#include <asm/traps.h>
@@ -240,6 +242,8 @@ static void __init request_standard_resources(void)
240 } 242 }
241} 243}
242 244
245u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
246
243void __init setup_arch(char **cmdline_p) 247void __init setup_arch(char **cmdline_p)
244{ 248{
245 setup_processor(); 249 setup_processor();
@@ -264,6 +268,7 @@ void __init setup_arch(char **cmdline_p)
264 268
265 psci_init(); 269 psci_init();
266 270
271 cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
267#ifdef CONFIG_SMP 272#ifdef CONFIG_SMP
268 smp_init_cpus(); 273 smp_init_cpus();
269#endif 274#endif
@@ -277,6 +282,13 @@ void __init setup_arch(char **cmdline_p)
277#endif 282#endif
278} 283}
279 284
285static int __init arm64_of_clk_init(void)
286{
287 of_clk_init(NULL);
288 return 0;
289}
290arch_initcall(arm64_of_clk_init);
291
280static DEFINE_PER_CPU(struct cpu, cpu_data); 292static DEFINE_PER_CPU(struct cpu, cpu_data);
281 293
282static int __init topology_init(void) 294static int __init topology_init(void)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index bdd34597254b..5d54e3717bf8 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -43,6 +43,7 @@
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/pgalloc.h> 44#include <asm/pgalloc.h>
45#include <asm/processor.h> 45#include <asm/processor.h>
46#include <asm/smp_plat.h>
46#include <asm/sections.h> 47#include <asm/sections.h>
47#include <asm/tlbflush.h> 48#include <asm/tlbflush.h>
48#include <asm/ptrace.h> 49#include <asm/ptrace.h>
@@ -53,7 +54,7 @@
53 * where to place its SVC stack 54 * where to place its SVC stack
54 */ 55 */
55struct secondary_data secondary_data; 56struct secondary_data secondary_data;
56volatile unsigned long secondary_holding_pen_release = -1; 57volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
57 58
58enum ipi_msg_type { 59enum ipi_msg_type {
59 IPI_RESCHEDULE, 60 IPI_RESCHEDULE,
@@ -70,7 +71,7 @@ static DEFINE_RAW_SPINLOCK(boot_lock);
70 * in coherency or not. This is necessary for the hotplug code to work 71 * in coherency or not. This is necessary for the hotplug code to work
71 * reliably. 72 * reliably.
72 */ 73 */
73static void __cpuinit write_pen_release(int val) 74static void __cpuinit write_pen_release(u64 val)
74{ 75{
75 void *start = (void *)&secondary_holding_pen_release; 76 void *start = (void *)&secondary_holding_pen_release;
76 unsigned long size = sizeof(secondary_holding_pen_release); 77 unsigned long size = sizeof(secondary_holding_pen_release);
@@ -96,7 +97,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
96 /* 97 /*
97 * Update the pen release flag. 98 * Update the pen release flag.
98 */ 99 */
99 write_pen_release(cpu); 100 write_pen_release(cpu_logical_map(cpu));
100 101
101 /* 102 /*
102 * Send an event, causing the secondaries to read pen_release. 103 * Send an event, causing the secondaries to read pen_release.
@@ -105,7 +106,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
105 106
106 timeout = jiffies + (1 * HZ); 107 timeout = jiffies + (1 * HZ);
107 while (time_before(jiffies, timeout)) { 108 while (time_before(jiffies, timeout)) {
108 if (secondary_holding_pen_release == -1UL) 109 if (secondary_holding_pen_release == INVALID_HWID)
109 break; 110 break;
110 udelay(10); 111 udelay(10);
111 } 112 }
@@ -116,7 +117,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
116 */ 117 */
117 raw_spin_unlock(&boot_lock); 118 raw_spin_unlock(&boot_lock);
118 119
119 return secondary_holding_pen_release != -1 ? -ENOSYS : 0; 120 return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
120} 121}
121 122
122static DECLARE_COMPLETION(cpu_running); 123static DECLARE_COMPLETION(cpu_running);
@@ -190,7 +191,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
190 * Let the primary processor know we're out of the 191 * Let the primary processor know we're out of the
191 * pen, then head off into the C entry point 192 * pen, then head off into the C entry point
192 */ 193 */
193 write_pen_release(-1); 194 write_pen_release(INVALID_HWID);
194 195
195 /* 196 /*
196 * Synchronise with the boot thread. 197 * Synchronise with the boot thread.
@@ -216,7 +217,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
216 /* 217 /*
217 * OK, it's off to the idle thread for us 218 * OK, it's off to the idle thread for us
218 */ 219 */
219 cpu_idle(); 220 cpu_startup_entry(CPUHP_ONLINE);
220} 221}
221 222
222void __init smp_cpus_done(unsigned int max_cpus) 223void __init smp_cpus_done(unsigned int max_cpus)
@@ -244,11 +245,11 @@ static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
244 245
245static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name) 246static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
246{ 247{
247 const struct smp_enable_ops *ops = enable_ops[0]; 248 const struct smp_enable_ops **ops = enable_ops;
248 249
249 while (ops) { 250 while (*ops) {
250 if (!strcmp(name, ops->name)) 251 if (!strcmp(name, (*ops)->name))
251 return ops; 252 return *ops;
252 253
253 ops++; 254 ops++;
254 } 255 }
@@ -257,15 +258,80 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
257} 258}
258 259
259/* 260/*
260 * Enumerate the possible CPU set from the device tree. 261 * Enumerate the possible CPU set from the device tree and build the
262 * cpu logical map array containing MPIDR values related to logical
263 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
261 */ 264 */
262void __init smp_init_cpus(void) 265void __init smp_init_cpus(void)
263{ 266{
264 const char *enable_method; 267 const char *enable_method;
265 struct device_node *dn = NULL; 268 struct device_node *dn = NULL;
266 int cpu = 0; 269 int i, cpu = 1;
270 bool bootcpu_valid = false;
267 271
268 while ((dn = of_find_node_by_type(dn, "cpu"))) { 272 while ((dn = of_find_node_by_type(dn, "cpu"))) {
273 const u32 *cell;
274 u64 hwid;
275
276 /*
277 * A cpu node with missing "reg" property is
278 * considered invalid to build a cpu_logical_map
279 * entry.
280 */
281 cell = of_get_property(dn, "reg", NULL);
282 if (!cell) {
283 pr_err("%s: missing reg property\n", dn->full_name);
284 goto next;
285 }
286 hwid = of_read_number(cell, of_n_addr_cells(dn));
287
288 /*
289 * Non affinity bits must be set to 0 in the DT
290 */
291 if (hwid & ~MPIDR_HWID_BITMASK) {
292 pr_err("%s: invalid reg property\n", dn->full_name);
293 goto next;
294 }
295
296 /*
297 * Duplicate MPIDRs are a recipe for disaster. Scan
298 * all initialized entries and check for
299 * duplicates. If any is found just ignore the cpu.
300 * cpu_logical_map was initialized to INVALID_HWID to
301 * avoid matching valid MPIDR values.
302 */
303 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) {
304 if (cpu_logical_map(i) == hwid) {
305 pr_err("%s: duplicate cpu reg properties in the DT\n",
306 dn->full_name);
307 goto next;
308 }
309 }
310
311 /*
312 * The numbering scheme requires that the boot CPU
313 * must be assigned logical id 0. Record it so that
314 * the logical map built from DT is validated and can
315 * be used.
316 */
317 if (hwid == cpu_logical_map(0)) {
318 if (bootcpu_valid) {
319 pr_err("%s: duplicate boot cpu reg property in DT\n",
320 dn->full_name);
321 goto next;
322 }
323
324 bootcpu_valid = true;
325
326 /*
327 * cpu_logical_map has already been
328 * initialized and the boot cpu doesn't need
329 * the enable-method so continue without
330 * incrementing cpu.
331 */
332 continue;
333 }
334
269 if (cpu >= NR_CPUS) 335 if (cpu >= NR_CPUS)
270 goto next; 336 goto next;
271 337
@@ -274,22 +340,24 @@ void __init smp_init_cpus(void)
274 */ 340 */
275 enable_method = of_get_property(dn, "enable-method", NULL); 341 enable_method = of_get_property(dn, "enable-method", NULL);
276 if (!enable_method) { 342 if (!enable_method) {
277 pr_err("CPU %d: missing enable-method property\n", cpu); 343 pr_err("%s: missing enable-method property\n",
344 dn->full_name);
278 goto next; 345 goto next;
279 } 346 }
280 347
281 smp_enable_ops[cpu] = smp_get_enable_ops(enable_method); 348 smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
282 349
283 if (!smp_enable_ops[cpu]) { 350 if (!smp_enable_ops[cpu]) {
284 pr_err("CPU %d: invalid enable-method property: %s\n", 351 pr_err("%s: invalid enable-method property: %s\n",
285 cpu, enable_method); 352 dn->full_name, enable_method);
286 goto next; 353 goto next;
287 } 354 }
288 355
289 if (smp_enable_ops[cpu]->init_cpu(dn, cpu)) 356 if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
290 goto next; 357 goto next;
291 358
292 set_cpu_possible(cpu, true); 359 pr_debug("cpu logical map 0x%llx\n", hwid);
360 cpu_logical_map(cpu) = hwid;
293next: 361next:
294 cpu++; 362 cpu++;
295 } 363 }
@@ -298,6 +366,19 @@ next:
298 if (cpu > NR_CPUS) 366 if (cpu > NR_CPUS)
299 pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", 367 pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n",
300 cpu, NR_CPUS); 368 cpu, NR_CPUS);
369
370 if (!bootcpu_valid) {
371 pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n");
372 return;
373 }
374
375 /*
376 * All the cpus that made it to the cpu_logical_map have been
377 * validated so set them as possible cpus.
378 */
379 for (i = 0; i < NR_CPUS; i++)
380 if (cpu_logical_map(i) != INVALID_HWID)
381 set_cpu_possible(i, true);
301} 382}
302 383
303void __init smp_prepare_cpus(unsigned int max_cpus) 384void __init smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
index 112091684c22..0c533301be77 100644
--- a/arch/arm64/kernel/smp_psci.c
+++ b/arch/arm64/kernel/smp_psci.c
@@ -21,6 +21,7 @@
21#include <linux/smp.h> 21#include <linux/smp.h>
22 22
23#include <asm/psci.h> 23#include <asm/psci.h>
24#include <asm/smp_plat.h>
24 25
25static int __init smp_psci_init_cpu(struct device_node *dn, int cpu) 26static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
26{ 27{
@@ -36,7 +37,7 @@ static int __init smp_psci_prepare_cpu(int cpu)
36 return -ENODEV; 37 return -ENODEV;
37 } 38 }
38 39
39 err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen)); 40 err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen));
40 if (err) { 41 if (err) {
41 pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); 42 pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
42 return err; 43 return err;
@@ -47,6 +48,6 @@ static int __init smp_psci_prepare_cpu(int cpu)
47 48
48const struct smp_enable_ops smp_psci_ops __initconst = { 49const struct smp_enable_ops smp_psci_ops __initconst = {
49 .name = "psci", 50 .name = "psci",
50 .init_cpu = smp_psci_init_cpu, 51 .init_cpu = smp_psci_init_cpu,
51 .prepare_cpu = smp_psci_prepare_cpu, 52 .prepare_cpu = smp_psci_prepare_cpu,
52}; 53};
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index 9416d045a687..db01aa978c41 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -84,13 +84,6 @@ compat_sys_readahead_wrapper:
84 b sys_readahead 84 b sys_readahead
85ENDPROC(compat_sys_readahead_wrapper) 85ENDPROC(compat_sys_readahead_wrapper)
86 86
87compat_sys_lookup_dcookie:
88 orr x0, x0, x1, lsl #32
89 mov w1, w2
90 mov w2, w3
91 b sys_lookup_dcookie
92ENDPROC(compat_sys_lookup_dcookie)
93
94compat_sys_fadvise64_64_wrapper: 87compat_sys_fadvise64_64_wrapper:
95 mov w6, w1 88 mov w6, w1
96 orr x1, x2, x3, lsl #32 89 orr x1, x2, x3, lsl #32
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index b3c5f628bdb4..61d7dd29f756 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -167,13 +167,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
167 } 167 }
168} 168}
169 169
170void dump_stack(void)
171{
172 dump_backtrace(NULL, NULL);
173}
174
175EXPORT_SYMBOL(dump_stack);
176
177void show_stack(struct task_struct *tsk, unsigned long *sp) 170void show_stack(struct task_struct *tsk, unsigned long *sp)
178{ 171{
179 dump_backtrace(NULL, tsk); 172 dump_backtrace(NULL, tsk);
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 2fb7f6092aae..59acc0ef0462 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,4 +1,6 @@
1lib-y := bitops.o delay.o \ 1lib-y := bitops.o delay.o \
2 strncpy_from_user.o strnlen_user.o clear_user.o \ 2 strncpy_from_user.o strnlen_user.o clear_user.o \
3 copy_from_user.o copy_to_user.o copy_in_user.o \ 3 copy_from_user.o copy_to_user.o copy_in_user.o \
4 copy_page.o clear_page.o 4 copy_page.o clear_page.o \
5 memchr.o memcpy.o memmove.o memset.o \
6 strchr.o strrchr.o
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
new file mode 100644
index 000000000000..36216d30cb9a
--- /dev/null
+++ b/arch/arm64/lib/bitops.S
@@ -0,0 +1,68 @@
1/*
2 * Based on arch/arm/lib/bitops.h
3 *
4 * Copyright (C) 2013 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/linkage.h>
20#include <asm/assembler.h>
21
22/*
23 * x0: bits 5:0 bit offset
24 * bits 63:6 word offset
25 * x1: address
26 */
27 .macro bitop, name, instr
28ENTRY( \name )
29 and x3, x0, #63 // Get bit offset
30 eor x0, x0, x3 // Clear low bits
31 mov x2, #1
32 add x1, x1, x0, lsr #3 // Get word offset
33 lsl x3, x2, x3 // Create mask
341: ldxr x2, [x1]
35 \instr x2, x2, x3
36 stxr w0, x2, [x1]
37 cbnz w0, 1b
38 ret
39ENDPROC(\name )
40 .endm
41
42 .macro testop, name, instr
43ENTRY( \name )
44 and x3, x0, #63 // Get bit offset
45 eor x0, x0, x3 // Clear low bits
46 mov x2, #1
47 add x1, x1, x0, lsr #3 // Get word offset
48 lsl x4, x2, x3 // Create mask
491: ldaxr x2, [x1]
50 lsr x0, x2, x3 // Save old value of bit
51 \instr x2, x2, x4 // toggle bit
52 stlxr w5, x2, [x1]
53 cbnz w5, 1b
54 and x0, x0, #1
553: ret
56ENDPROC(\name )
57 .endm
58
59/*
60 * Atomic bit operations.
61 */
62 bitop change_bit, eor
63 bitop clear_bit, bic
64 bitop set_bit, orr
65
66 testop test_and_change_bit, eor
67 testop test_and_clear_bit, bic
68 testop test_and_set_bit, orr
diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
new file mode 100644
index 000000000000..8636b7549163
--- /dev/null
+++ b/arch/arm64/lib/memchr.S
@@ -0,0 +1,44 @@
1/*
2 * Based on arch/arm/lib/memchr.S
3 *
4 * Copyright (C) 1995-2000 Russell King
5 * Copyright (C) 2013 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22
23/*
24 * Find a character in an area of memory.
25 *
26 * Parameters:
27 * x0 - buf
28 * x1 - c
29 * x2 - n
30 * Returns:
31 * x0 - address of first occurrence of 'c' or 0
32 */
33ENTRY(memchr)
34 and w1, w1, #0xff
351: subs x2, x2, #1
36 b.mi 2f
37 ldrb w3, [x0], #1
38 cmp w3, w1
39 b.ne 1b
40 sub x0, x0, #1
41 ret
422: mov x0, #0
43 ret
44ENDPROC(memchr)
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
new file mode 100644
index 000000000000..27b5003609b6
--- /dev/null
+++ b/arch/arm64/lib/memcpy.S
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2013 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/linkage.h>
18#include <asm/assembler.h>
19
20/*
21 * Copy a buffer from src to dest (alignment handled by the hardware)
22 *
23 * Parameters:
24 * x0 - dest
25 * x1 - src
26 * x2 - n
27 * Returns:
28 * x0 - dest
29 */
30ENTRY(memcpy)
31 mov x4, x0
32 subs x2, x2, #8
33 b.mi 2f
341: ldr x3, [x1], #8
35 subs x2, x2, #8
36 str x3, [x4], #8
37 b.pl 1b
382: adds x2, x2, #4
39 b.mi 3f
40 ldr w3, [x1], #4
41 sub x2, x2, #4
42 str w3, [x4], #4
433: adds x2, x2, #2
44 b.mi 4f
45 ldrh w3, [x1], #2
46 sub x2, x2, #2
47 strh w3, [x4], #2
484: adds x2, x2, #1
49 b.mi 5f
50 ldrb w3, [x1]
51 strb w3, [x4]
525: ret
53ENDPROC(memcpy)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
new file mode 100644
index 000000000000..b79fdfa42d39
--- /dev/null
+++ b/arch/arm64/lib/memmove.S
@@ -0,0 +1,57 @@
1/*
2 * Copyright (C) 2013 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/linkage.h>
18#include <asm/assembler.h>
19
20/*
21 * Move a buffer from src to test (alignment handled by the hardware).
22 * If dest <= src, call memcpy, otherwise copy in reverse order.
23 *
24 * Parameters:
25 * x0 - dest
26 * x1 - src
27 * x2 - n
28 * Returns:
29 * x0 - dest
30 */
31ENTRY(memmove)
32 cmp x0, x1
33 b.ls memcpy
34 add x4, x0, x2
35 add x1, x1, x2
36 subs x2, x2, #8
37 b.mi 2f
381: ldr x3, [x1, #-8]!
39 subs x2, x2, #8
40 str x3, [x4, #-8]!
41 b.pl 1b
422: adds x2, x2, #4
43 b.mi 3f
44 ldr w3, [x1, #-4]!
45 sub x2, x2, #4
46 str w3, [x4, #-4]!
473: adds x2, x2, #2
48 b.mi 4f
49 ldrh w3, [x1, #-2]!
50 sub x2, x2, #2
51 strh w3, [x4, #-2]!
524: adds x2, x2, #1
53 b.mi 5f
54 ldrb w3, [x1, #-1]
55 strb w3, [x4, #-1]
565: ret
57ENDPROC(memmove)
diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S
new file mode 100644
index 000000000000..87e4a68fbbbc
--- /dev/null
+++ b/arch/arm64/lib/memset.S
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2013 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/linkage.h>
18#include <asm/assembler.h>
19
20/*
21 * Fill in the buffer with character c (alignment handled by the hardware)
22 *
23 * Parameters:
24 * x0 - buf
25 * x1 - c
26 * x2 - n
27 * Returns:
28 * x0 - buf
29 */
30ENTRY(memset)
31 mov x4, x0
32 and w1, w1, #0xff
33 orr w1, w1, w1, lsl #8
34 orr w1, w1, w1, lsl #16
35 orr x1, x1, x1, lsl #32
36 subs x2, x2, #8
37 b.mi 2f
381: str x1, [x4], #8
39 subs x2, x2, #8
40 b.pl 1b
412: adds x2, x2, #4
42 b.mi 3f
43 sub x2, x2, #4
44 str w1, [x4], #4
453: adds x2, x2, #2
46 b.mi 4f
47 sub x2, x2, #2
48 strh w1, [x4], #2
494: adds x2, x2, #1
50 b.mi 5f
51 strb w1, [x4]
525: ret
53ENDPROC(memset)
diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S
new file mode 100644
index 000000000000..dae0cf5591f9
--- /dev/null
+++ b/arch/arm64/lib/strchr.S
@@ -0,0 +1,42 @@
1/*
2 * Based on arch/arm/lib/strchr.S
3 *
4 * Copyright (C) 1995-2000 Russell King
5 * Copyright (C) 2013 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22
23/*
24 * Find the first occurrence of a character in a string.
25 *
26 * Parameters:
27 * x0 - str
28 * x1 - c
29 * Returns:
30 * x0 - address of first occurrence of 'c' or 0
31 */
32ENTRY(strchr)
33 and w1, w1, #0xff
341: ldrb w2, [x0], #1
35 cmp w2, w1
36 ccmp w2, wzr, #4, ne
37 b.ne 1b
38 sub x0, x0, #1
39 cmp w2, w1
40 csel x0, x0, xzr, eq
41 ret
42ENDPROC(strchr)
diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S
new file mode 100644
index 000000000000..61eabd9a289a
--- /dev/null
+++ b/arch/arm64/lib/strrchr.S
@@ -0,0 +1,43 @@
1/*
2 * Based on arch/arm/lib/strrchr.S
3 *
4 * Copyright (C) 1995-2000 Russell King
5 * Copyright (C) 2013 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22
23/*
24 * Find the last occurrence of a character in a string.
25 *
26 * Parameters:
27 * x0 - str
28 * x1 - c
29 * Returns:
30 * x0 - address of last occurrence of 'c' or 0
31 */
32ENTRY(strrchr)
33 mov x3, #0
34 and w1, w1, #0xff
351: ldrb w2, [x0], #1
36 cbz w2, 2f
37 cmp w2, w1
38 b.ne 1b
39 sub x3, x0, #1
40 b 1b
412: mov x0, x3
42 ret
43ENDPROC(strrchr)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index afadae6682ed..52638171d6fd 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -57,16 +57,16 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
57 pmd_t *pmd; 57 pmd_t *pmd;
58 pte_t *pte; 58 pte_t *pte;
59 59
60 if (pgd_none_or_clear_bad(pgd)) 60 if (pgd_none(*pgd) || pgd_bad(*pgd))
61 break; 61 break;
62 62
63 pud = pud_offset(pgd, addr); 63 pud = pud_offset(pgd, addr);
64 if (pud_none_or_clear_bad(pud)) 64 if (pud_none(*pud) || pud_bad(*pud))
65 break; 65 break;
66 66
67 pmd = pmd_offset(pud, addr); 67 pmd = pmd_offset(pud, addr);
68 printk(", *pmd=%016llx", pmd_val(*pmd)); 68 printk(", *pmd=%016llx", pmd_val(*pmd));
69 if (pmd_none_or_clear_bad(pmd)) 69 if (pmd_none(*pmd) || pmd_bad(*pmd))
70 break; 70 break;
71 71
72 pte = pte_offset_map(pmd, addr); 72 pte = pte_offset_map(pmd, addr);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 800aac306a08..f497ca77925a 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -197,24 +197,6 @@ void __init bootmem_init(void)
197 max_pfn = max_low_pfn = max; 197 max_pfn = max_low_pfn = max;
198} 198}
199 199
200static inline int free_area(unsigned long pfn, unsigned long end, char *s)
201{
202 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
203
204 for (; pfn < end; pfn++) {
205 struct page *page = pfn_to_page(pfn);
206 ClearPageReserved(page);
207 init_page_count(page);
208 __free_page(page);
209 pages++;
210 }
211
212 if (size && s)
213 pr_info("Freeing %s memory: %dK\n", s, size);
214
215 return pages;
216}
217
218/* 200/*
219 * Poison init memory with an undefined instruction (0x0). 201 * Poison init memory with an undefined instruction (0x0).
220 */ 202 */
@@ -405,9 +387,7 @@ void __init mem_init(void)
405void free_initmem(void) 387void free_initmem(void)
406{ 388{
407 poison_init_mem(__init_begin, __init_end - __init_begin); 389 poison_init_mem(__init_begin, __init_end - __init_begin);
408 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 390 free_initmem_default(0);
409 __phys_to_pfn(__pa(__init_end)),
410 "init");
411} 391}
412 392
413#ifdef CONFIG_BLK_DEV_INITRD 393#ifdef CONFIG_BLK_DEV_INITRD
@@ -418,9 +398,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
418{ 398{
419 if (!keep_initrd) { 399 if (!keep_initrd) {
420 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 400 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
421 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 401 free_reserved_area(start, end, 0, "initrd");
422 __phys_to_pfn(__pa(end)),
423 "initrd");
424 } 402 }
425} 403}
426 404
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 70b8cd4021c4..eeecc9c8ed68 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -391,17 +391,14 @@ int kern_addr_valid(unsigned long addr)
391} 391}
392#ifdef CONFIG_SPARSEMEM_VMEMMAP 392#ifdef CONFIG_SPARSEMEM_VMEMMAP
393#ifdef CONFIG_ARM64_64K_PAGES 393#ifdef CONFIG_ARM64_64K_PAGES
394int __meminit vmemmap_populate(struct page *start_page, 394int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
395 unsigned long size, int node)
396{ 395{
397 return vmemmap_populate_basepages(start_page, size, node); 396 return vmemmap_populate_basepages(start, end, node);
398} 397}
399#else /* !CONFIG_ARM64_64K_PAGES */ 398#else /* !CONFIG_ARM64_64K_PAGES */
400int __meminit vmemmap_populate(struct page *start_page, 399int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
401 unsigned long size, int node)
402{ 400{
403 unsigned long addr = (unsigned long)start_page; 401 unsigned long addr = start;
404 unsigned long end = (unsigned long)(start_page + size);
405 unsigned long next; 402 unsigned long next;
406 pgd_t *pgd; 403 pgd_t *pgd;
407 pud_t *pud; 404 pud_t *pud;
@@ -434,7 +431,7 @@ int __meminit vmemmap_populate(struct page *start_page,
434 return 0; 431 return 0;
435} 432}
436#endif /* CONFIG_ARM64_64K_PAGES */ 433#endif /* CONFIG_ARM64_64K_PAGES */
437void vmemmap_free(struct page *memmap, unsigned long nr_pages) 434void vmemmap_free(unsigned long start, unsigned long end)
438{ 435{
439} 436}
440#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 437#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index c1a868d398bd..22c40308360b 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -250,20 +250,7 @@ config ARCH_SUSPEND_POSSIBLE
250 def_bool y 250 def_bool y
251 251
252menu "CPU Frequency scaling" 252menu "CPU Frequency scaling"
253
254source "drivers/cpufreq/Kconfig" 253source "drivers/cpufreq/Kconfig"
255
256config CPU_FREQ_AT32AP
257 bool "CPU frequency driver for AT32AP"
258 depends on CPU_FREQ && PLATFORM_AT32AP
259 default n
260 help
261 This enables the CPU frequency driver for AT32AP processors.
262
263 For details, take a look in <file:Documentation/cpu-freq>.
264
265 If in doubt, say N.
266
267endmenu 254endmenu
268 255
269endmenu 256endmenu
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index f4025db184ff..d5aff36ade92 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y
26# CONFIG_CPU_FREQ_STAT is not set 26# CONFIG_CPU_FREQ_STAT is not set
27CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 27CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
28CONFIG_CPU_FREQ_GOV_USERSPACE=y 28CONFIG_CPU_FREQ_GOV_USERSPACE=y
29CONFIG_CPU_FREQ_AT32AP=y 29CONFIG_AVR32_AT32AP_CPUFREQ=y
30CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y 30CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
31CONFIG_NET=y 31CONFIG_NET=y
32CONFIG_PACKET=y 32CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index c76a49b9e9d0..4abcf435d599 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -28,7 +28,7 @@ CONFIG_CPU_FREQ=y
28# CONFIG_CPU_FREQ_STAT is not set 28# CONFIG_CPU_FREQ_STAT is not set
29CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 29CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
30CONFIG_CPU_FREQ_GOV_USERSPACE=y 30CONFIG_CPU_FREQ_GOV_USERSPACE=y
31CONFIG_CPU_FREQ_AT32AP=y 31CONFIG_AVR32_AT32AP_CPUFREQ=y
32CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y 32CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
33CONFIG_NET=y 33CONFIG_NET=y
34CONFIG_PACKET=y 34CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 2d8ab089a64e..18f3fa0470ff 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -27,7 +27,7 @@ CONFIG_CPU_FREQ=y
27# CONFIG_CPU_FREQ_STAT is not set 27# CONFIG_CPU_FREQ_STAT is not set
28CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 28CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
29CONFIG_CPU_FREQ_GOV_USERSPACE=y 29CONFIG_CPU_FREQ_GOV_USERSPACE=y
30CONFIG_CPU_FREQ_AT32AP=y 30CONFIG_AVR32_AT32AP_CPUFREQ=y
31CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y 31CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
32CONFIG_NET=y 32CONFIG_NET=y
33CONFIG_PACKET=y 33CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig
index b189e0cab04b..06e389cfcd12 100644
--- a/arch/avr32/configs/atngw100_mrmt_defconfig
+++ b/arch/avr32/configs/atngw100_mrmt_defconfig
@@ -23,7 +23,7 @@ CONFIG_CPU_FREQ=y
23CONFIG_CPU_FREQ_GOV_POWERSAVE=y 23CONFIG_CPU_FREQ_GOV_POWERSAVE=y
24CONFIG_CPU_FREQ_GOV_USERSPACE=y 24CONFIG_CPU_FREQ_GOV_USERSPACE=y
25CONFIG_CPU_FREQ_GOV_ONDEMAND=y 25CONFIG_CPU_FREQ_GOV_ONDEMAND=y
26CONFIG_CPU_FREQ_AT32AP=y 26CONFIG_AVR32_AT32AP_CPUFREQ=y
27CONFIG_NET=y 27CONFIG_NET=y
28CONFIG_PACKET=y 28CONFIG_PACKET=y
29CONFIG_UNIX=y 29CONFIG_UNIX=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index 2e4de42a53c4..2518a1368d7c 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y
26# CONFIG_CPU_FREQ_STAT is not set 26# CONFIG_CPU_FREQ_STAT is not set
27CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 27CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
28CONFIG_CPU_FREQ_GOV_USERSPACE=y 28CONFIG_CPU_FREQ_GOV_USERSPACE=y
29CONFIG_CPU_FREQ_AT32AP=y 29CONFIG_AVR32_AT32AP_CPUFREQ=y
30CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y 30CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
31CONFIG_NET=y 31CONFIG_NET=y
32CONFIG_PACKET=y 32CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index fad3cd22dfd3..245ef6bd0fa6 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -29,7 +29,7 @@ CONFIG_CPU_FREQ=y
29# CONFIG_CPU_FREQ_STAT is not set 29# CONFIG_CPU_FREQ_STAT is not set
30CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 30CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
31CONFIG_CPU_FREQ_GOV_USERSPACE=y 31CONFIG_CPU_FREQ_GOV_USERSPACE=y
32CONFIG_CPU_FREQ_AT32AP=y 32CONFIG_AVR32_AT32AP_CPUFREQ=y
33CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y 33CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
34CONFIG_NET=y 34CONFIG_NET=y
35CONFIG_PACKET=y 35CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index 29986230aaa5..fa6cbac6e418 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -28,7 +28,7 @@ CONFIG_CPU_FREQ=y
28# CONFIG_CPU_FREQ_STAT is not set 28# CONFIG_CPU_FREQ_STAT is not set
29CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 29CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
30CONFIG_CPU_FREQ_GOV_USERSPACE=y 30CONFIG_CPU_FREQ_GOV_USERSPACE=y
31CONFIG_CPU_FREQ_AT32AP=y 31CONFIG_AVR32_AT32AP_CPUFREQ=y
32CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y 32CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
33CONFIG_NET=y 33CONFIG_NET=y
34CONFIG_PACKET=y 34CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index a582465e1cef..bbd5131021a5 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -25,7 +25,7 @@ CONFIG_CPU_FREQ=y
25# CONFIG_CPU_FREQ_STAT is not set 25# CONFIG_CPU_FREQ_STAT is not set
26CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 26CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
27CONFIG_CPU_FREQ_GOV_USERSPACE=y 27CONFIG_CPU_FREQ_GOV_USERSPACE=y
28CONFIG_CPU_FREQ_AT32AP=y 28CONFIG_AVR32_AT32AP_CPUFREQ=y
29CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y 29CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
30CONFIG_NET=y 30CONFIG_NET=y
31CONFIG_PACKET=y 31CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index 57a79df2ce5d..c1cd726f9012 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y
26# CONFIG_CPU_FREQ_STAT is not set 26# CONFIG_CPU_FREQ_STAT is not set
27CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 27CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
28CONFIG_CPU_FREQ_GOV_USERSPACE=y 28CONFIG_CPU_FREQ_GOV_USERSPACE=y
29CONFIG_CPU_FREQ_AT32AP=y 29CONFIG_AVR32_AT32AP_CPUFREQ=y
30CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y 30CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
31CONFIG_NET=y 31CONFIG_NET=y
32CONFIG_PACKET=y 32CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 1a49bd8c6340..754ae56b2767 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y
26# CONFIG_CPU_FREQ_STAT is not set 26# CONFIG_CPU_FREQ_STAT is not set
27CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 27CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
28CONFIG_CPU_FREQ_GOV_USERSPACE=y 28CONFIG_CPU_FREQ_GOV_USERSPACE=y
29CONFIG_CPU_FREQ_AT32AP=y 29CONFIG_AVR32_AT32AP_CPUFREQ=y
30CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y 30CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
31CONFIG_NET=y 31CONFIG_NET=y
32CONFIG_PACKET=y 32CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index 206a1b67f763..58589d8cc0ac 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -26,7 +26,7 @@ CONFIG_CPU_FREQ=y
26# CONFIG_CPU_FREQ_STAT is not set 26# CONFIG_CPU_FREQ_STAT is not set
27CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 27CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
28CONFIG_CPU_FREQ_GOV_USERSPACE=y 28CONFIG_CPU_FREQ_GOV_USERSPACE=y
29CONFIG_CPU_FREQ_AT32AP=y 29CONFIG_AVR32_AT32AP_CPUFREQ=y
30CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y 30CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
31CONFIG_NET=y 31CONFIG_NET=y
32CONFIG_PACKET=y 32CONFIG_PACKET=y
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index 0421498d666b..57788a42ff83 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -27,7 +27,7 @@ CONFIG_CPU_FREQ=y
27# CONFIG_CPU_FREQ_STAT is not set 27# CONFIG_CPU_FREQ_STAT is not set
28CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 28CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
29CONFIG_CPU_FREQ_GOV_USERSPACE=y 29CONFIG_CPU_FREQ_GOV_USERSPACE=y
30CONFIG_CPU_FREQ_AT32AP=y 30CONFIG_AVR32_AT32AP_CPUFREQ=y
31CONFIG_NET=y 31CONFIG_NET=y
32CONFIG_PACKET=y 32CONFIG_PACKET=y
33CONFIG_UNIX=y 33CONFIG_UNIX=y
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index 82f24eb251bd..ba7c31e269cb 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -31,7 +31,7 @@ CONFIG_CPU_FREQ=y
31# CONFIG_CPU_FREQ_STAT is not set 31# CONFIG_CPU_FREQ_STAT is not set
32CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 32CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
33CONFIG_CPU_FREQ_GOV_USERSPACE=y 33CONFIG_CPU_FREQ_GOV_USERSPACE=y
34CONFIG_CPU_FREQ_AT32AP=y 34CONFIG_AVR32_AT32AP_CPUFREQ=y
35CONFIG_NET=y 35CONFIG_NET=y
36CONFIG_PACKET=y 36CONFIG_PACKET=y
37CONFIG_UNIX=y 37CONFIG_UNIX=y
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig
index 1bee51f22154..0a8bfdc420e0 100644
--- a/arch/avr32/configs/mimc200_defconfig
+++ b/arch/avr32/configs/mimc200_defconfig
@@ -24,7 +24,7 @@ CONFIG_CPU_FREQ=y
24# CONFIG_CPU_FREQ_STAT is not set 24# CONFIG_CPU_FREQ_STAT is not set
25CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 25CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
26CONFIG_CPU_FREQ_GOV_USERSPACE=y 26CONFIG_CPU_FREQ_GOV_USERSPACE=y
27CONFIG_CPU_FREQ_AT32AP=y 27CONFIG_AVR32_AT32AP_CPUFREQ=y
28CONFIG_NET=y 28CONFIG_NET=y
29CONFIG_PACKET=y 29CONFIG_PACKET=y
30CONFIG_UNIX=y 30CONFIG_UNIX=y
diff --git a/arch/avr32/include/asm/unistd.h b/arch/avr32/include/asm/unistd.h
index dc4d5a931112..c1eb080e45fe 100644
--- a/arch/avr32/include/asm/unistd.h
+++ b/arch/avr32/include/asm/unistd.h
@@ -41,12 +41,4 @@
41#define __ARCH_WANT_SYS_VFORK 41#define __ARCH_WANT_SYS_VFORK
42#define __ARCH_WANT_SYS_CLONE 42#define __ARCH_WANT_SYS_CLONE
43 43
44/*
45 * "Conditional" syscalls
46 *
47 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
48 * but it doesn't work on all toolchains, so we just do it by hand
49 */
50#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
51
52#endif /* __ASM_AVR32_UNISTD_H */ 44#endif /* __ASM_AVR32_UNISTD_H */
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index fd78f58ea79a..e7b61494c312 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -30,18 +30,9 @@ EXPORT_SYMBOL(pm_power_off);
30 * This file handles the architecture-dependent parts of process handling.. 30 * This file handles the architecture-dependent parts of process handling..
31 */ 31 */
32 32
33void cpu_idle(void) 33void arch_cpu_idle(void)
34{ 34{
35 /* endless idle loop with no priority at all */ 35 cpu_enter_idle();
36 while (1) {
37 tick_nohz_idle_enter();
38 rcu_idle_enter();
39 while (!need_resched())
40 cpu_idle_sleep();
41 rcu_idle_exit();
42 tick_nohz_idle_exit();
43 schedule_preempt_disabled();
44 }
45} 36}
46 37
47void machine_halt(void) 38void machine_halt(void)
@@ -213,14 +204,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
213 show_stack_log_lvl(tsk, (unsigned long)stack, NULL, ""); 204 show_stack_log_lvl(tsk, (unsigned long)stack, NULL, "");
214} 205}
215 206
216void dump_stack(void)
217{
218 unsigned long stack;
219
220 show_trace_log_lvl(current, &stack, NULL, "");
221}
222EXPORT_SYMBOL(dump_stack);
223
224static const char *cpu_modes[] = { 207static const char *cpu_modes[] = {
225 "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1", 208 "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
226 "Interrupt level 2", "Interrupt level 3", "Exception", "NMI" 209 "Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
@@ -232,6 +215,8 @@ void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
232 unsigned long lr = regs->lr; 215 unsigned long lr = regs->lr;
233 unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT; 216 unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
234 217
218 show_regs_print_info(log_lvl);
219
235 if (!user_mode(regs)) { 220 if (!user_mode(regs)) {
236 sp = (unsigned long)regs + FRAME_SIZE_FULL; 221 sp = (unsigned long)regs + FRAME_SIZE_FULL;
237 222
@@ -269,9 +254,6 @@ void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
269 regs->sr & SR_I0M ? '0' : '.', 254 regs->sr & SR_I0M ? '0' : '.',
270 regs->sr & SR_GM ? 'G' : 'g'); 255 regs->sr & SR_GM ? 'G' : 'g');
271 printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]); 256 printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]);
272 printk("%sProcess: %s [%d] (task: %p thread: %p)\n",
273 log_lvl, current->comm, current->pid, current,
274 task_thread_info(current));
275} 257}
276 258
277void show_regs(struct pt_regs *regs) 259void show_regs(struct pt_regs *regs)
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 05ad29112ff4..869a1c6ffeee 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -12,6 +12,7 @@
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/time.h> 14#include <linux/time.h>
15#include <linux/cpu.h>
15 16
16#include <asm/sysreg.h> 17#include <asm/sysreg.h>
17 18
@@ -87,13 +88,17 @@ static void comparator_mode(enum clock_event_mode mode,
87 pr_debug("%s: start\n", evdev->name); 88 pr_debug("%s: start\n", evdev->name);
88 /* FALLTHROUGH */ 89 /* FALLTHROUGH */
89 case CLOCK_EVT_MODE_RESUME: 90 case CLOCK_EVT_MODE_RESUME:
90 cpu_disable_idle_sleep(); 91 /*
92 * If we're using the COUNT and COMPARE registers we
93 * need to force idle poll.
94 */
95 cpu_idle_poll_ctrl(true);
91 break; 96 break;
92 case CLOCK_EVT_MODE_UNUSED: 97 case CLOCK_EVT_MODE_UNUSED:
93 case CLOCK_EVT_MODE_SHUTDOWN: 98 case CLOCK_EVT_MODE_SHUTDOWN:
94 sysreg_write(COMPARE, 0); 99 sysreg_write(COMPARE, 0);
95 pr_debug("%s: stop\n", evdev->name); 100 pr_debug("%s: stop\n", evdev->name);
96 cpu_enable_idle_sleep(); 101 cpu_idle_poll_ctrl(false);
97 break; 102 break;
98 default: 103 default:
99 BUG(); 104 BUG();
diff --git a/arch/avr32/mach-at32ap/Makefile b/arch/avr32/mach-at32ap/Makefile
index 514c9a9b009a..fc09ec4bc725 100644
--- a/arch/avr32/mach-at32ap/Makefile
+++ b/arch/avr32/mach-at32ap/Makefile
@@ -1,7 +1,6 @@
1obj-y += pdc.o clock.o intc.o extint.o pio.o hsmc.o 1obj-y += pdc.o clock.o intc.o extint.o pio.o hsmc.o
2obj-y += hmatrix.o 2obj-y += hmatrix.o
3obj-$(CONFIG_CPU_AT32AP700X) += at32ap700x.o pm-at32ap700x.o 3obj-$(CONFIG_CPU_AT32AP700X) += at32ap700x.o pm-at32ap700x.o
4obj-$(CONFIG_CPU_FREQ_AT32AP) += cpufreq.o
5obj-$(CONFIG_PM) += pm.o 4obj-$(CONFIG_PM) += pm.o
6 5
7ifeq ($(CONFIG_PM_DEBUG),y) 6ifeq ($(CONFIG_PM_DEBUG),y)
diff --git a/arch/avr32/mach-at32ap/cpufreq.c b/arch/avr32/mach-at32ap/cpufreq.c
deleted file mode 100644
index 18b765629a0c..000000000000
--- a/arch/avr32/mach-at32ap/cpufreq.c
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * Copyright (C) 2004-2007 Atmel Corporation
3 *
4 * Based on MIPS implementation arch/mips/kernel/time.c
5 * Copyright 2001 MontaVista Software Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12/*#define DEBUG*/
13
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/cpufreq.h>
18#include <linux/io.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/export.h>
22
23static struct clk *cpuclk;
24
25static int at32_verify_speed(struct cpufreq_policy *policy)
26{
27 if (policy->cpu != 0)
28 return -EINVAL;
29
30 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
31 policy->cpuinfo.max_freq);
32 return 0;
33}
34
35static unsigned int at32_get_speed(unsigned int cpu)
36{
37 /* No SMP support */
38 if (cpu)
39 return 0;
40 return (unsigned int)((clk_get_rate(cpuclk) + 500) / 1000);
41}
42
43static unsigned int ref_freq;
44static unsigned long loops_per_jiffy_ref;
45
46static int at32_set_target(struct cpufreq_policy *policy,
47 unsigned int target_freq,
48 unsigned int relation)
49{
50 struct cpufreq_freqs freqs;
51 long freq;
52
53 /* Convert target_freq from kHz to Hz */
54 freq = clk_round_rate(cpuclk, target_freq * 1000);
55
56 /* Check if policy->min <= new_freq <= policy->max */
57 if(freq < (policy->min * 1000) || freq > (policy->max * 1000))
58 return -EINVAL;
59
60 pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
61
62 freqs.old = at32_get_speed(0);
63 freqs.new = (freq + 500) / 1000;
64 freqs.cpu = 0;
65 freqs.flags = 0;
66
67 if (!ref_freq) {
68 ref_freq = freqs.old;
69 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
70 }
71
72 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
73 if (freqs.old < freqs.new)
74 boot_cpu_data.loops_per_jiffy = cpufreq_scale(
75 loops_per_jiffy_ref, ref_freq, freqs.new);
76 clk_set_rate(cpuclk, freq);
77 if (freqs.new < freqs.old)
78 boot_cpu_data.loops_per_jiffy = cpufreq_scale(
79 loops_per_jiffy_ref, ref_freq, freqs.new);
80 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
81
82 pr_debug("cpufreq: set frequency %lu Hz\n", freq);
83
84 return 0;
85}
86
87static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
88{
89 if (policy->cpu != 0)
90 return -EINVAL;
91
92 cpuclk = clk_get(NULL, "cpu");
93 if (IS_ERR(cpuclk)) {
94 pr_debug("cpufreq: could not get CPU clk\n");
95 return PTR_ERR(cpuclk);
96 }
97
98 policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
99 policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
100 policy->cpuinfo.transition_latency = 0;
101 policy->cur = at32_get_speed(0);
102 policy->min = policy->cpuinfo.min_freq;
103 policy->max = policy->cpuinfo.max_freq;
104
105 printk("cpufreq: AT32AP CPU frequency driver\n");
106
107 return 0;
108}
109
110static struct cpufreq_driver at32_driver = {
111 .name = "at32ap",
112 .owner = THIS_MODULE,
113 .init = at32_cpufreq_driver_init,
114 .verify = at32_verify_speed,
115 .target = at32_set_target,
116 .get = at32_get_speed,
117 .flags = CPUFREQ_STICKY,
118};
119
120static int __init at32_cpufreq_init(void)
121{
122 return cpufreq_register_driver(&at32_driver);
123}
124late_initcall(at32_cpufreq_init);
diff --git a/arch/avr32/mach-at32ap/include/mach/pm.h b/arch/avr32/mach-at32ap/include/mach/pm.h
index 979b355b77b6..f29ff2cd23d3 100644
--- a/arch/avr32/mach-at32ap/include/mach/pm.h
+++ b/arch/avr32/mach-at32ap/include/mach/pm.h
@@ -21,30 +21,6 @@
21extern void cpu_enter_idle(void); 21extern void cpu_enter_idle(void);
22extern void cpu_enter_standby(unsigned long sdramc_base); 22extern void cpu_enter_standby(unsigned long sdramc_base);
23 23
24extern bool disable_idle_sleep;
25
26static inline void cpu_disable_idle_sleep(void)
27{
28 disable_idle_sleep = true;
29}
30
31static inline void cpu_enable_idle_sleep(void)
32{
33 disable_idle_sleep = false;
34}
35
36static inline void cpu_idle_sleep(void)
37{
38 /*
39 * If we're using the COUNT and COMPARE registers for
40 * timekeeping, we can't use the IDLE state.
41 */
42 if (disable_idle_sleep)
43 cpu_relax();
44 else
45 cpu_enter_idle();
46}
47
48void intc_set_suspend_handler(unsigned long offset); 24void intc_set_suspend_handler(unsigned long offset);
49#endif 25#endif
50 26
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S
index f868f4ce761b..1c8e4e6bff03 100644
--- a/arch/avr32/mach-at32ap/pm-at32ap700x.S
+++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S
@@ -18,13 +18,6 @@
18/* Same as 0xfff00000 but fits in a 21 bit signed immediate */ 18/* Same as 0xfff00000 but fits in a 21 bit signed immediate */
19#define PM_BASE -0x100000 19#define PM_BASE -0x100000
20 20
21 .section .bss, "wa", @nobits
22 .global disable_idle_sleep
23 .type disable_idle_sleep, @object
24disable_idle_sleep:
25 .int 4
26 .size disable_idle_sleep, . - disable_idle_sleep
27
28 /* Keep this close to the irq handlers */ 21 /* Keep this close to the irq handlers */
29 .section .irq.text, "ax", @progbits 22 .section .irq.text, "ax", @progbits
30 23
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index 2798c2d4a1cf..e66e8406f992 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -146,34 +146,14 @@ void __init mem_init(void)
146 initsize >> 10); 146 initsize >> 10);
147} 147}
148 148
149static inline void free_area(unsigned long addr, unsigned long end, char *s)
150{
151 unsigned int size = (end - addr) >> 10;
152
153 for (; addr < end; addr += PAGE_SIZE) {
154 struct page *page = virt_to_page(addr);
155 ClearPageReserved(page);
156 init_page_count(page);
157 free_page(addr);
158 totalram_pages++;
159 }
160
161 if (size && s)
162 printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n",
163 s, size, end - (size << 10), end);
164}
165
166void free_initmem(void) 149void free_initmem(void)
167{ 150{
168 free_area((unsigned long)__init_begin, (unsigned long)__init_end, 151 free_initmem_default(0);
169 "init");
170} 152}
171 153
172#ifdef CONFIG_BLK_DEV_INITRD 154#ifdef CONFIG_BLK_DEV_INITRD
173
174void free_initrd_mem(unsigned long start, unsigned long end) 155void free_initrd_mem(unsigned long start, unsigned long end)
175{ 156{
176 free_area(start, end, "initrd"); 157 free_reserved_area(start, end, 0, "initrd");
177} 158}
178
179#endif 159#endif
diff --git a/arch/blackfin/include/asm/bfin_sport3.h b/arch/blackfin/include/asm/bfin_sport3.h
index 03c00220d69b..d82f5fa0ad9f 100644
--- a/arch/blackfin/include/asm/bfin_sport3.h
+++ b/arch/blackfin/include/asm/bfin_sport3.h
@@ -41,7 +41,7 @@
41#define SPORT_CTL_LAFS 0x00020000 /* Late Transmit frame select */ 41#define SPORT_CTL_LAFS 0x00020000 /* Late Transmit frame select */
42#define SPORT_CTL_RJUST 0x00040000 /* Right Justified mode select */ 42#define SPORT_CTL_RJUST 0x00040000 /* Right Justified mode select */
43#define SPORT_CTL_FSED 0x00080000 /* External frame sync edge select */ 43#define SPORT_CTL_FSED 0x00080000 /* External frame sync edge select */
44#define SPORT_CTL_TFIEN 0x00100000 /* Transmit finish interrrupt enable select */ 44#define SPORT_CTL_TFIEN 0x00100000 /* Transmit finish interrupt enable select */
45#define SPORT_CTL_GCLKEN 0x00200000 /* Gated clock mode select */ 45#define SPORT_CTL_GCLKEN 0x00200000 /* Gated clock mode select */
46#define SPORT_CTL_SPENSEC 0x01000000 /* Enable secondary channel */ 46#define SPORT_CTL_SPENSEC 0x01000000 /* Enable secondary channel */
47#define SPORT_CTL_SPTRAN 0x02000000 /* Data direction control */ 47#define SPORT_CTL_SPTRAN 0x02000000 /* Data direction control */
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index 04e83ea8d5cc..c35414bdf7bd 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -20,12 +20,4 @@
20#define __ARCH_WANT_SYS_NICE 20#define __ARCH_WANT_SYS_NICE
21#define __ARCH_WANT_SYS_VFORK 21#define __ARCH_WANT_SYS_VFORK
22 22
23/*
24 * "Conditional" syscalls
25 *
26 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
27 * but it doesn't work on all toolchains, so we just do it by hand
28 */
29#define cond_syscall(x) asm(".weak\t_" #x "\n\t.set\t_" #x ",_sys_ni_syscall");
30
31#endif /* __ASM_BFIN_UNISTD_H */ 23#endif /* __ASM_BFIN_UNISTD_H */
diff --git a/arch/blackfin/kernel/dumpstack.c b/arch/blackfin/kernel/dumpstack.c
index 5cfbaa298211..95ba6d9e9a3d 100644
--- a/arch/blackfin/kernel/dumpstack.c
+++ b/arch/blackfin/kernel/dumpstack.c
@@ -168,6 +168,7 @@ void dump_stack(void)
168#endif 168#endif
169 trace_buffer_save(tflags); 169 trace_buffer_save(tflags);
170 dump_bfin_trace_buffer(); 170 dump_bfin_trace_buffer();
171 dump_stack_print_info(KERN_DEFAULT);
171 show_stack(current, &stack); 172 show_stack(current, &stack);
172 trace_buffer_restore(tflags); 173 trace_buffer_restore(tflags);
173} 174}
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index 84ed8375113c..61fbd2de993d 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_init(unsigned int port,
25extern struct console *bfin_jc_early_init(void); 25extern struct console *bfin_jc_early_init(void);
26#endif 26#endif
27 27
28static struct console *early_console;
29
30/* Default console */ 28/* Default console */
31#define DEFAULT_PORT 0 29#define DEFAULT_PORT 0
32#define DEFAULT_CFLAG CS8|B57600 30#define DEFAULT_CFLAG CS8|B57600
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 9782c0329c14..4aa5545c4fde 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -46,15 +46,14 @@ EXPORT_SYMBOL(pm_power_off);
46 * The idle loop on BFIN 46 * The idle loop on BFIN
47 */ 47 */
48#ifdef CONFIG_IDLE_L1 48#ifdef CONFIG_IDLE_L1
49static void default_idle(void)__attribute__((l1_text)); 49void arch_cpu_idle(void)__attribute__((l1_text));
50void cpu_idle(void)__attribute__((l1_text));
51#endif 50#endif
52 51
53/* 52/*
54 * This is our default idle handler. We need to disable 53 * This is our default idle handler. We need to disable
55 * interrupts here to ensure we don't miss a wakeup call. 54 * interrupts here to ensure we don't miss a wakeup call.
56 */ 55 */
57static void default_idle(void) 56void arch_cpu_idle(void)
58{ 57{
59#ifdef CONFIG_IPIPE 58#ifdef CONFIG_IPIPE
60 ipipe_suspend_domain(); 59 ipipe_suspend_domain();
@@ -66,31 +65,12 @@ static void default_idle(void)
66 hard_local_irq_enable(); 65 hard_local_irq_enable();
67} 66}
68 67
69/*
70 * The idle thread. We try to conserve power, while trying to keep
71 * overall latency low. The architecture specific idle is passed
72 * a value to indicate the level of "idleness" of the system.
73 */
74void cpu_idle(void)
75{
76 /* endless idle loop with no priority at all */
77 while (1) {
78
79#ifdef CONFIG_HOTPLUG_CPU 68#ifdef CONFIG_HOTPLUG_CPU
80 if (cpu_is_offline(smp_processor_id())) 69void arch_cpu_idle_dead(void)
81 cpu_die(); 70{
82#endif 71 cpu_die();
83 tick_nohz_idle_enter();
84 rcu_idle_enter();
85 while (!need_resched())
86 default_idle();
87 rcu_idle_exit();
88 tick_nohz_idle_exit();
89 preempt_enable_no_resched();
90 schedule();
91 preempt_disable();
92 }
93} 72}
73#endif
94 74
95/* 75/*
96 * Do necessary setup to start up a newly executed thread. 76 * Do necessary setup to start up a newly executed thread.
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c
index f7f7a18abca9..c36efa0c7163 100644
--- a/arch/blackfin/kernel/trace.c
+++ b/arch/blackfin/kernel/trace.c
@@ -853,6 +853,8 @@ void show_regs(struct pt_regs *fp)
853 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); 853 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
854 854
855 pr_notice("\n"); 855 pr_notice("\n");
856 show_regs_print_info(KERN_NOTICE);
857
856 if (CPUID != bfin_cpuid()) 858 if (CPUID != bfin_cpuid())
857 pr_notice("Compiled for cpu family 0x%04x (Rev %d), " 859 pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
858 "but running on:0x%04x (Rev %d)\n", 860 "but running on:0x%04x (Rev %d)\n",
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 61c1f47a4bf2..97d701639585 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -936,19 +936,19 @@ static struct v4l2_input adv7842_inputs[] = {
936 .index = 2, 936 .index = 2,
937 .name = "Component", 937 .name = "Component",
938 .type = V4L2_INPUT_TYPE_CAMERA, 938 .type = V4L2_INPUT_TYPE_CAMERA,
939 .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS, 939 .capabilities = V4L2_IN_CAP_DV_TIMINGS,
940 }, 940 },
941 { 941 {
942 .index = 3, 942 .index = 3,
943 .name = "VGA", 943 .name = "VGA",
944 .type = V4L2_INPUT_TYPE_CAMERA, 944 .type = V4L2_INPUT_TYPE_CAMERA,
945 .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS, 945 .capabilities = V4L2_IN_CAP_DV_TIMINGS,
946 }, 946 },
947 { 947 {
948 .index = 4, 948 .index = 4,
949 .name = "HDMI", 949 .name = "HDMI",
950 .type = V4L2_INPUT_TYPE_CAMERA, 950 .type = V4L2_INPUT_TYPE_CAMERA,
951 .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS, 951 .capabilities = V4L2_IN_CAP_DV_TIMINGS,
952 }, 952 },
953}; 953};
954 954
@@ -1074,7 +1074,7 @@ static struct v4l2_output adv7511_outputs[] = {
1074 .index = 0, 1074 .index = 0,
1075 .name = "HDMI", 1075 .name = "HDMI",
1076 .type = V4L2_INPUT_TYPE_CAMERA, 1076 .type = V4L2_INPUT_TYPE_CAMERA,
1077 .capabilities = V4L2_OUT_CAP_CUSTOM_TIMINGS, 1077 .capabilities = V4L2_OUT_CAP_DV_TIMINGS,
1078 }, 1078 },
1079}; 1079};
1080 1080
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile
index 75f0ba29ebb9..675466d490d4 100644
--- a/arch/blackfin/mach-common/Makefile
+++ b/arch/blackfin/mach-common/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_PM) += pm.o
10ifneq ($(CONFIG_BF60x),y) 10ifneq ($(CONFIG_BF60x),y)
11obj-$(CONFIG_PM) += dpmc_modes.o 11obj-$(CONFIG_PM) += dpmc_modes.o
12endif 12endif
13obj-$(CONFIG_CPU_FREQ) += cpufreq.o
14obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o 13obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
15obj-$(CONFIG_SMP) += smp.o 14obj-$(CONFIG_SMP) += smp.o
16obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o 15obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
deleted file mode 100644
index d88bd31319e6..000000000000
--- a/arch/blackfin/mach-common/cpufreq.c
+++ /dev/null
@@ -1,258 +0,0 @@
1/*
2 * Blackfin core clock scaling
3 *
4 * Copyright 2008-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/clk.h>
14#include <linux/cpufreq.h>
15#include <linux/fs.h>
16#include <linux/delay.h>
17#include <asm/blackfin.h>
18#include <asm/time.h>
19#include <asm/dpmc.h>
20
21
22/* this is the table of CCLK frequencies, in Hz */
23/* .index is the entry in the auxiliary dpm_state_table[] */
24static struct cpufreq_frequency_table bfin_freq_table[] = {
25 {
26 .frequency = CPUFREQ_TABLE_END,
27 .index = 0,
28 },
29 {
30 .frequency = CPUFREQ_TABLE_END,
31 .index = 1,
32 },
33 {
34 .frequency = CPUFREQ_TABLE_END,
35 .index = 2,
36 },
37 {
38 .frequency = CPUFREQ_TABLE_END,
39 .index = 0,
40 },
41};
42
43static struct bfin_dpm_state {
44 unsigned int csel; /* system clock divider */
45 unsigned int tscale; /* change the divider on the core timer interrupt */
46} dpm_state_table[3];
47
48#if defined(CONFIG_CYCLES_CLOCKSOURCE)
49/*
50 * normalized to maximum frequency offset for CYCLES,
51 * used in time-ts cycles clock source, but could be used
52 * somewhere also.
53 */
54unsigned long long __bfin_cycles_off;
55unsigned int __bfin_cycles_mod;
56#endif
57
58/**************************************************************************/
59static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
60{
61
62 unsigned long csel, min_cclk;
63 int index;
64
65 /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
66#if ANOMALY_05000273 || ANOMALY_05000274 || \
67 (!(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) \
68 && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
69 min_cclk = sclk * 2;
70#else
71 min_cclk = sclk;
72#endif
73
74#ifndef CONFIG_BF60x
75 csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
76#else
77 csel = bfin_read32(CGU0_DIV) & 0x1F;
78#endif
79
80 for (index = 0; (cclk >> index) >= min_cclk && csel <= 3 && index < 3; index++, csel++) {
81 bfin_freq_table[index].frequency = cclk >> index;
82#ifndef CONFIG_BF60x
83 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
84#else
85 dpm_state_table[index].csel = csel;
86#endif
87 dpm_state_table[index].tscale = (TIME_SCALE >> index) - 1;
88
89 pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
90 bfin_freq_table[index].frequency,
91 dpm_state_table[index].csel,
92 dpm_state_table[index].tscale);
93 }
94 return;
95}
96
97static void bfin_adjust_core_timer(void *info)
98{
99 unsigned int tscale;
100 unsigned int index = *(unsigned int *)info;
101
102 /* we have to adjust the core timer, because it is using cclk */
103 tscale = dpm_state_table[index].tscale;
104 bfin_write_TSCALE(tscale);
105 return;
106}
107
108static unsigned int bfin_getfreq_khz(unsigned int cpu)
109{
110 /* Both CoreA/B have the same core clock */
111 return get_cclk() / 1000;
112}
113
114#ifdef CONFIG_BF60x
115unsigned long cpu_set_cclk(int cpu, unsigned long new)
116{
117 struct clk *clk;
118 int ret;
119
120 clk = clk_get(NULL, "CCLK");
121 if (IS_ERR(clk))
122 return -ENODEV;
123
124 ret = clk_set_rate(clk, new);
125 clk_put(clk);
126 return ret;
127}
128#endif
129
130static int bfin_target(struct cpufreq_policy *poli,
131 unsigned int target_freq, unsigned int relation)
132{
133#ifndef CONFIG_BF60x
134 unsigned int plldiv;
135#endif
136 unsigned int index, cpu;
137 unsigned long cclk_hz;
138 struct cpufreq_freqs freqs;
139 static unsigned long lpj_ref;
140 static unsigned int lpj_ref_freq;
141 int ret = 0;
142
143#if defined(CONFIG_CYCLES_CLOCKSOURCE)
144 cycles_t cycles;
145#endif
146
147 for_each_online_cpu(cpu) {
148 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
149
150 if (!policy)
151 continue;
152
153 if (cpufreq_frequency_table_target(policy, bfin_freq_table,
154 target_freq, relation, &index))
155 return -EINVAL;
156
157 cclk_hz = bfin_freq_table[index].frequency;
158
159 freqs.old = bfin_getfreq_khz(0);
160 freqs.new = cclk_hz;
161 freqs.cpu = cpu;
162
163 pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
164 cclk_hz, target_freq, freqs.old);
165
166 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
167 if (cpu == CPUFREQ_CPU) {
168#ifndef CONFIG_BF60x
169 plldiv = (bfin_read_PLL_DIV() & SSEL) |
170 dpm_state_table[index].csel;
171 bfin_write_PLL_DIV(plldiv);
172#else
173 ret = cpu_set_cclk(cpu, freqs.new * 1000);
174 if (ret != 0) {
175 WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
176 break;
177 }
178#endif
179 on_each_cpu(bfin_adjust_core_timer, &index, 1);
180#if defined(CONFIG_CYCLES_CLOCKSOURCE)
181 cycles = get_cycles();
182 SSYNC();
183 cycles += 10; /* ~10 cycles we lose after get_cycles() */
184 __bfin_cycles_off +=
185 (cycles << __bfin_cycles_mod) - (cycles << index);
186 __bfin_cycles_mod = index;
187#endif
188 if (!lpj_ref_freq) {
189 lpj_ref = loops_per_jiffy;
190 lpj_ref_freq = freqs.old;
191 }
192 if (freqs.new != freqs.old) {
193 loops_per_jiffy = cpufreq_scale(lpj_ref,
194 lpj_ref_freq, freqs.new);
195 }
196 }
197 /* TODO: just test case for cycles clock source, remove later */
198 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
199 }
200
201 pr_debug("cpufreq: done\n");
202 return ret;
203}
204
205static int bfin_verify_speed(struct cpufreq_policy *policy)
206{
207 return cpufreq_frequency_table_verify(policy, bfin_freq_table);
208}
209
210static int __bfin_cpu_init(struct cpufreq_policy *policy)
211{
212
213 unsigned long cclk, sclk;
214
215 cclk = get_cclk() / 1000;
216 sclk = get_sclk() / 1000;
217
218 if (policy->cpu == CPUFREQ_CPU)
219 bfin_init_tables(cclk, sclk);
220
221 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
222
223 policy->cur = cclk;
224 cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
225 return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
226}
227
228static struct freq_attr *bfin_freq_attr[] = {
229 &cpufreq_freq_attr_scaling_available_freqs,
230 NULL,
231};
232
233static struct cpufreq_driver bfin_driver = {
234 .verify = bfin_verify_speed,
235 .target = bfin_target,
236 .get = bfin_getfreq_khz,
237 .init = __bfin_cpu_init,
238 .name = "bfin cpufreq",
239 .owner = THIS_MODULE,
240 .attr = bfin_freq_attr,
241};
242
243static int __init bfin_cpu_init(void)
244{
245 return cpufreq_register_driver(&bfin_driver);
246}
247
248static void __exit bfin_cpu_exit(void)
249{
250 cpufreq_unregister_driver(&bfin_driver);
251}
252
253MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
254MODULE_DESCRIPTION("cpufreq driver for Blackfin");
255MODULE_LICENSE("GPL");
256
257module_init(bfin_cpu_init);
258module_exit(bfin_cpu_exit);
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index bb61ae4986e4..1bc2ce6f3c94 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -335,7 +335,7 @@ void __cpuinit secondary_start_kernel(void)
335 */ 335 */
336 calibrate_delay(); 336 calibrate_delay();
337 337
338 cpu_idle(); 338 cpu_startup_entry(CPUHP_ONLINE);
339} 339}
340 340
341void __init smp_prepare_boot_cpu(void) 341void __init smp_prepare_boot_cpu(void)
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 9cb85537bd2b..82d01a71207f 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -103,7 +103,7 @@ void __init mem_init(void)
103 max_mapnr = num_physpages = MAP_NR(high_memory); 103 max_mapnr = num_physpages = MAP_NR(high_memory);
104 printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages); 104 printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages);
105 105
106 /* This will put all memory onto the freelists. */ 106 /* This will put all low memory onto the freelists. */
107 totalram_pages = free_all_bootmem(); 107 totalram_pages = free_all_bootmem();
108 108
109 reservedpages = 0; 109 reservedpages = 0;
@@ -129,24 +129,11 @@ void __init mem_init(void)
129 initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); 129 initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10)));
130} 130}
131 131
132static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end)
133{
134 unsigned long addr;
135 /* next to check that the page we free is not a partial page */
136 for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
137 ClearPageReserved(virt_to_page(addr));
138 init_page_count(virt_to_page(addr));
139 free_page(addr);
140 totalram_pages++;
141 }
142 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
143}
144
145#ifdef CONFIG_BLK_DEV_INITRD 132#ifdef CONFIG_BLK_DEV_INITRD
146void __init free_initrd_mem(unsigned long start, unsigned long end) 133void __init free_initrd_mem(unsigned long start, unsigned long end)
147{ 134{
148#ifndef CONFIG_MPU 135#ifndef CONFIG_MPU
149 free_init_pages("initrd memory", start, end); 136 free_reserved_area(start, end, 0, "initrd");
150#endif 137#endif
151} 138}
152#endif 139#endif
@@ -154,10 +141,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
154void __init_refok free_initmem(void) 141void __init_refok free_initmem(void)
155{ 142{
156#if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU 143#if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
157 free_init_pages("unused kernel memory", 144 free_initmem_default(0);
158 (unsigned long)(&__init_begin),
159 (unsigned long)(&__init_end));
160
161 if (memory_start == (unsigned long)(&__init_end)) 145 if (memory_start == (unsigned long)(&__init_end))
162 memory_start = (unsigned long)(&__init_begin); 146 memory_start = (unsigned long)(&__init_begin);
163#endif 147#endif
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c
index 6434df476f77..57d2ea8d1977 100644
--- a/arch/c6x/kernel/process.c
+++ b/arch/c6x/kernel/process.c
@@ -33,7 +33,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
33void (*pm_power_off)(void); 33void (*pm_power_off)(void);
34EXPORT_SYMBOL(pm_power_off); 34EXPORT_SYMBOL(pm_power_off);
35 35
36static void c6x_idle(void) 36void arch_cpu_idle(void)
37{ 37{
38 unsigned long tmp; 38 unsigned long tmp;
39 39
@@ -49,32 +49,6 @@ static void c6x_idle(void)
49 : "=b"(tmp)); 49 : "=b"(tmp));
50} 50}
51 51
52/*
53 * The idle loop for C64x
54 */
55void cpu_idle(void)
56{
57 /* endless idle loop with no priority at all */
58 while (1) {
59 tick_nohz_idle_enter();
60 rcu_idle_enter();
61 while (1) {
62 local_irq_disable();
63 if (need_resched()) {
64 local_irq_enable();
65 break;
66 }
67 c6x_idle(); /* enables local irqs */
68 }
69 rcu_idle_exit();
70 tick_nohz_idle_exit();
71
72 preempt_enable_no_resched();
73 schedule();
74 preempt_disable();
75 }
76}
77
78static void halt_loop(void) 52static void halt_loop(void)
79{ 53{
80 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 54 printk(KERN_EMERG "System Halted, OK to turn off power\n");
diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c
index 1be74e5b4788..dcc2c2f6d67c 100644
--- a/arch/c6x/kernel/traps.c
+++ b/arch/c6x/kernel/traps.c
@@ -31,6 +31,7 @@ void __init trap_init(void)
31void show_regs(struct pt_regs *regs) 31void show_regs(struct pt_regs *regs)
32{ 32{
33 pr_err("\n"); 33 pr_err("\n");
34 show_regs_print_info(KERN_ERR);
34 pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp); 35 pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp);
35 pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4); 36 pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4);
36 pr_err("A0: %08lx B0: %08lx\n", regs->a0, regs->b0); 37 pr_err("A0: %08lx B0: %08lx\n", regs->a0, regs->b0);
@@ -67,15 +68,6 @@ void show_regs(struct pt_regs *regs)
67 pr_err("A31: %08lx B31: %08lx\n", regs->a31, regs->b31); 68 pr_err("A31: %08lx B31: %08lx\n", regs->a31, regs->b31);
68} 69}
69 70
70void dump_stack(void)
71{
72 unsigned long stack;
73
74 show_stack(current, &stack);
75}
76EXPORT_SYMBOL(dump_stack);
77
78
79void die(char *str, struct pt_regs *fp, int nr) 71void die(char *str, struct pt_regs *fp, int nr)
80{ 72{
81 console_verbose(); 73 console_verbose();
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index 89395f09648a..a9fcd89b251b 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -77,37 +77,11 @@ void __init mem_init(void)
77#ifdef CONFIG_BLK_DEV_INITRD 77#ifdef CONFIG_BLK_DEV_INITRD
78void __init free_initrd_mem(unsigned long start, unsigned long end) 78void __init free_initrd_mem(unsigned long start, unsigned long end)
79{ 79{
80 int pages = 0; 80 free_reserved_area(start, end, 0, "initrd");
81 for (; start < end; start += PAGE_SIZE) {
82 ClearPageReserved(virt_to_page(start));
83 init_page_count(virt_to_page(start));
84 free_page(start);
85 totalram_pages++;
86 pages++;
87 }
88 printk(KERN_INFO "Freeing initrd memory: %luk freed\n",
89 (pages * PAGE_SIZE) >> 10);
90} 81}
91#endif 82#endif
92 83
93void __init free_initmem(void) 84void __init free_initmem(void)
94{ 85{
95 unsigned long addr; 86 free_initmem_default(0);
96
97 /*
98 * The following code should be cool even if these sections
99 * are not page aligned.
100 */
101 addr = PAGE_ALIGN((unsigned long)(__init_begin));
102
103 /* next to check that the page we free is not a partial page */
104 for (; addr + PAGE_SIZE < (unsigned long)(__init_end);
105 addr += PAGE_SIZE) {
106 ClearPageReserved(virt_to_page(addr));
107 init_page_count(virt_to_page(addr));
108 free_page(addr);
109 totalram_pages++;
110 }
111 printk(KERN_INFO "Freeing unused kernel memory: %dK freed\n",
112 (int) ((addr - PAGE_ALIGN((long) &__init_begin)) >> 10));
113} 87}
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index b1018750cffb..753e9a03cf87 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -30,8 +30,9 @@ void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */
30void default_idle(void) 30void default_idle(void)
31{ 31{
32#ifdef CONFIG_ETRAX_GPIO 32#ifdef CONFIG_ETRAX_GPIO
33 etrax_gpio_wake_up_check(); 33 etrax_gpio_wake_up_check();
34#endif 34#endif
35 local_irq_enable();
35} 36}
36 37
37/* 38/*
@@ -175,6 +176,9 @@ unsigned long get_wchan(struct task_struct *p)
175void show_regs(struct pt_regs * regs) 176void show_regs(struct pt_regs * regs)
176{ 177{
177 unsigned long usp = rdusp(); 178 unsigned long usp = rdusp();
179
180 show_regs_print_info(KERN_DEFAULT);
181
178 printk("IRP: %08lx SRP: %08lx DCCR: %08lx USP: %08lx MOF: %08lx\n", 182 printk("IRP: %08lx SRP: %08lx DCCR: %08lx USP: %08lx MOF: %08lx\n",
179 regs->irp, regs->srp, regs->dccr, usp, regs->mof ); 183 regs->irp, regs->srp, regs->dccr, usp, regs->mof );
180 printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n", 184 printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n",
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index 2b23ef0e4452..cebd32e2a8fb 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -20,18 +20,12 @@
20 20
21extern void stop_watchdog(void); 21extern void stop_watchdog(void);
22 22
23extern int cris_hlt_counter;
24
25/* We use this if we don't have any better idle routine. */ 23/* We use this if we don't have any better idle routine. */
26void default_idle(void) 24void default_idle(void)
27{ 25{
28 local_irq_disable(); 26 /* Halt until exception. */
29 if (!need_resched() && !cris_hlt_counter) { 27 __asm__ volatile("ei \n\t"
30 /* Halt until exception. */ 28 "halt ");
31 __asm__ volatile("ei \n\t"
32 "halt ");
33 }
34 local_irq_enable();
35} 29}
36 30
37/* 31/*
@@ -170,6 +164,9 @@ get_wchan(struct task_struct *p)
170void show_regs(struct pt_regs * regs) 164void show_regs(struct pt_regs * regs)
171{ 165{
172 unsigned long usp = rdusp(); 166 unsigned long usp = rdusp();
167
168 show_regs_print_info(KERN_DEFAULT);
169
173 printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n", 170 printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n",
174 regs->erp, regs->srp, regs->ccs, usp, regs->mof); 171 regs->erp, regs->srp, regs->ccs, usp, regs->mof);
175 172
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 04a16edd5401..cdd12028de0c 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -145,8 +145,6 @@ smp_boot_one_cpu(int cpuid, struct task_struct idle)
145 * specific stuff such as the local timer and the MMU. */ 145 * specific stuff such as the local timer and the MMU. */
146void __init smp_callin(void) 146void __init smp_callin(void)
147{ 147{
148 extern void cpu_idle(void);
149
150 int cpu = cpu_now_booting; 148 int cpu = cpu_now_booting;
151 reg_intr_vect_rw_mask vect_mask = {0}; 149 reg_intr_vect_rw_mask vect_mask = {0};
152 150
@@ -170,7 +168,7 @@ void __init smp_callin(void)
170 local_irq_enable(); 168 local_irq_enable();
171 169
172 set_cpu_online(cpu, true); 170 set_cpu_online(cpu, true);
173 cpu_idle(); 171 cpu_startup_entry(CPUHP_ONLINE);
174} 172}
175 173
176/* Stop execution on this CPU.*/ 174/* Stop execution on this CPU.*/
diff --git a/arch/cris/arch-v32/mach-a3/Makefile b/arch/cris/arch-v32/mach-a3/Makefile
index d366e0891988..18a227196a41 100644
--- a/arch/cris/arch-v32/mach-a3/Makefile
+++ b/arch/cris/arch-v32/mach-a3/Makefile
@@ -3,7 +3,6 @@
3# 3#
4 4
5obj-y := dma.o pinmux.o io.o arbiter.o 5obj-y := dma.o pinmux.o io.o arbiter.o
6obj-$(CONFIG_CPU_FREQ) += cpufreq.o
7 6
8clean: 7clean:
9 8
diff --git a/arch/cris/arch-v32/mach-a3/cpufreq.c b/arch/cris/arch-v32/mach-a3/cpufreq.c
deleted file mode 100644
index ee391ecb5bc9..000000000000
--- a/arch/cris/arch-v32/mach-a3/cpufreq.c
+++ /dev/null
@@ -1,152 +0,0 @@
1#include <linux/init.h>
2#include <linux/module.h>
3#include <linux/cpufreq.h>
4#include <hwregs/reg_map.h>
5#include <hwregs/reg_rdwr.h>
6#include <hwregs/clkgen_defs.h>
7#include <hwregs/ddr2_defs.h>
8
9static int
10cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
11 void *data);
12
13static struct notifier_block cris_sdram_freq_notifier_block = {
14 .notifier_call = cris_sdram_freq_notifier
15};
16
17static struct cpufreq_frequency_table cris_freq_table[] = {
18 {0x01, 6000},
19 {0x02, 200000},
20 {0, CPUFREQ_TABLE_END},
21};
22
23static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
24{
25 reg_clkgen_rw_clk_ctrl clk_ctrl;
26 clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
27 return clk_ctrl.pll ? 200000 : 6000;
28}
29
30static void cris_freq_set_cpu_state(unsigned int state)
31{
32 int i = 0;
33 struct cpufreq_freqs freqs;
34 reg_clkgen_rw_clk_ctrl clk_ctrl;
35 clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
36
37#ifdef CONFIG_SMP
38 for_each_present_cpu(i)
39#endif
40 {
41 freqs.old = cris_freq_get_cpu_frequency(i);
42 freqs.new = cris_freq_table[state].frequency;
43 freqs.cpu = i;
44 }
45
46 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
47
48 local_irq_disable();
49
50 /* Even though we may be SMP they will share the same clock
51 * so all settings are made on CPU0. */
52 if (cris_freq_table[state].frequency == 200000)
53 clk_ctrl.pll = 1;
54 else
55 clk_ctrl.pll = 0;
56 REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
57
58 local_irq_enable();
59
60 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
61};
62
63static int cris_freq_verify(struct cpufreq_policy *policy)
64{
65 return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
66}
67
68static int cris_freq_target(struct cpufreq_policy *policy,
69 unsigned int target_freq,
70 unsigned int relation)
71{
72 unsigned int newstate = 0;
73
74 if (cpufreq_frequency_table_target(policy, cris_freq_table,
75 target_freq, relation, &newstate))
76 return -EINVAL;
77
78 cris_freq_set_cpu_state(newstate);
79
80 return 0;
81}
82
83static int cris_freq_cpu_init(struct cpufreq_policy *policy)
84{
85 int result;
86
87 /* cpuinfo and default policy values */
88 policy->cpuinfo.transition_latency = 1000000; /* 1ms */
89 policy->cur = cris_freq_get_cpu_frequency(0);
90
91 result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
92 if (result)
93 return (result);
94
95 cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
96
97 return 0;
98}
99
100
101static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
102{
103 cpufreq_frequency_table_put_attr(policy->cpu);
104 return 0;
105}
106
107
108static struct freq_attr *cris_freq_attr[] = {
109 &cpufreq_freq_attr_scaling_available_freqs,
110 NULL,
111};
112
113static struct cpufreq_driver cris_freq_driver = {
114 .get = cris_freq_get_cpu_frequency,
115 .verify = cris_freq_verify,
116 .target = cris_freq_target,
117 .init = cris_freq_cpu_init,
118 .exit = cris_freq_cpu_exit,
119 .name = "cris_freq",
120 .owner = THIS_MODULE,
121 .attr = cris_freq_attr,
122};
123
124static int __init cris_freq_init(void)
125{
126 int ret;
127 ret = cpufreq_register_driver(&cris_freq_driver);
128 cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
129 CPUFREQ_TRANSITION_NOTIFIER);
130 return ret;
131}
132
133static int
134cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
135 void *data)
136{
137 int i;
138 struct cpufreq_freqs *freqs = data;
139 if (val == CPUFREQ_PRECHANGE) {
140 reg_ddr2_rw_cfg cfg =
141 REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg);
142 cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46);
143
144 if (freqs->new == 200000)
145 for (i = 0; i < 50000; i++);
146 REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
147 }
148 return 0;
149}
150
151
152module_init(cris_freq_init);
diff --git a/arch/cris/arch-v32/mach-fs/Makefile b/arch/cris/arch-v32/mach-fs/Makefile
index d366e0891988..18a227196a41 100644
--- a/arch/cris/arch-v32/mach-fs/Makefile
+++ b/arch/cris/arch-v32/mach-fs/Makefile
@@ -3,7 +3,6 @@
3# 3#
4 4
5obj-y := dma.o pinmux.o io.o arbiter.o 5obj-y := dma.o pinmux.o io.o arbiter.o
6obj-$(CONFIG_CPU_FREQ) += cpufreq.o
7 6
8clean: 7clean:
9 8
diff --git a/arch/cris/arch-v32/mach-fs/cpufreq.c b/arch/cris/arch-v32/mach-fs/cpufreq.c
deleted file mode 100644
index d92cf70d1cbe..000000000000
--- a/arch/cris/arch-v32/mach-fs/cpufreq.c
+++ /dev/null
@@ -1,145 +0,0 @@
1#include <linux/init.h>
2#include <linux/module.h>
3#include <linux/cpufreq.h>
4#include <hwregs/reg_map.h>
5#include <arch/hwregs/reg_rdwr.h>
6#include <arch/hwregs/config_defs.h>
7#include <arch/hwregs/bif_core_defs.h>
8
9static int
10cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
11 void *data);
12
13static struct notifier_block cris_sdram_freq_notifier_block = {
14 .notifier_call = cris_sdram_freq_notifier
15};
16
17static struct cpufreq_frequency_table cris_freq_table[] = {
18 {0x01, 6000},
19 {0x02, 200000},
20 {0, CPUFREQ_TABLE_END},
21};
22
23static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
24{
25 reg_config_rw_clk_ctrl clk_ctrl;
26 clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
27 return clk_ctrl.pll ? 200000 : 6000;
28}
29
30static void cris_freq_set_cpu_state(unsigned int state)
31{
32 int i;
33 struct cpufreq_freqs freqs;
34 reg_config_rw_clk_ctrl clk_ctrl;
35 clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
36
37 for_each_possible_cpu(i) {
38 freqs.old = cris_freq_get_cpu_frequency(i);
39 freqs.new = cris_freq_table[state].frequency;
40 freqs.cpu = i;
41 }
42
43 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
44
45 local_irq_disable();
46
47 /* Even though we may be SMP they will share the same clock
48 * so all settings are made on CPU0. */
49 if (cris_freq_table[state].frequency == 200000)
50 clk_ctrl.pll = 1;
51 else
52 clk_ctrl.pll = 0;
53 REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl);
54
55 local_irq_enable();
56
57 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
58};
59
60static int cris_freq_verify(struct cpufreq_policy *policy)
61{
62 return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
63}
64
65static int cris_freq_target(struct cpufreq_policy *policy,
66 unsigned int target_freq, unsigned int relation)
67{
68 unsigned int newstate = 0;
69
70 if (cpufreq_frequency_table_target
71 (policy, cris_freq_table, target_freq, relation, &newstate))
72 return -EINVAL;
73
74 cris_freq_set_cpu_state(newstate);
75
76 return 0;
77}
78
79static int cris_freq_cpu_init(struct cpufreq_policy *policy)
80{
81 int result;
82
83 /* cpuinfo and default policy values */
84 policy->cpuinfo.transition_latency = 1000000; /* 1ms */
85 policy->cur = cris_freq_get_cpu_frequency(0);
86
87 result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
88 if (result)
89 return (result);
90
91 cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
92
93 return 0;
94}
95
96static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
97{
98 cpufreq_frequency_table_put_attr(policy->cpu);
99 return 0;
100}
101
102static struct freq_attr *cris_freq_attr[] = {
103 &cpufreq_freq_attr_scaling_available_freqs,
104 NULL,
105};
106
107static struct cpufreq_driver cris_freq_driver = {
108 .get = cris_freq_get_cpu_frequency,
109 .verify = cris_freq_verify,
110 .target = cris_freq_target,
111 .init = cris_freq_cpu_init,
112 .exit = cris_freq_cpu_exit,
113 .name = "cris_freq",
114 .owner = THIS_MODULE,
115 .attr = cris_freq_attr,
116};
117
118static int __init cris_freq_init(void)
119{
120 int ret;
121 ret = cpufreq_register_driver(&cris_freq_driver);
122 cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
123 CPUFREQ_TRANSITION_NOTIFIER);
124 return ret;
125}
126
127static int
128cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
129 void *data)
130{
131 int i;
132 struct cpufreq_freqs *freqs = data;
133 if (val == CPUFREQ_PRECHANGE) {
134 reg_bif_core_rw_sdram_timing timing =
135 REG_RD(bif_core, regi_bif_core, rw_sdram_timing);
136 timing.cpd = (freqs->new == 200000 ? 0 : 1);
137
138 if (freqs->new == 200000)
139 for (i = 0; i < 50000; i++) ;
140 REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
141 }
142 return 0;
143}
144
145module_init(cris_freq_init);
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 675823f70c0f..c0a29b96b92b 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -65,13 +65,6 @@ static inline void release_thread(struct task_struct *dead_task)
65 65
66#define cpu_relax() barrier() 66#define cpu_relax() barrier()
67 67
68/*
69 * disable hlt during certain critical i/o operations
70 */
71#define HAVE_DISABLE_HLT
72void disable_hlt(void);
73void enable_hlt(void);
74
75void default_idle(void); 68void default_idle(void);
76 69
77#endif /* __ASM_CRIS_PROCESSOR_H */ 70#endif /* __ASM_CRIS_PROCESSOR_H */
diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h
index be57a988bfb9..0ff3f6889842 100644
--- a/arch/cris/include/asm/unistd.h
+++ b/arch/cris/include/asm/unistd.h
@@ -34,12 +34,4 @@
34#define __ARCH_WANT_SYS_VFORK 34#define __ARCH_WANT_SYS_VFORK
35#define __ARCH_WANT_SYS_CLONE 35#define __ARCH_WANT_SYS_CLONE
36 36
37/*
38 * "Conditional" syscalls
39 *
40 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
41 * but it doesn't work on all toolchains, so we just do it by hand
42 */
43#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
44
45#endif /* _ASM_CRIS_UNISTD_H_ */ 37#endif /* _ASM_CRIS_UNISTD_H_ */
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 104ff4dd9b98..b78498eb079b 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -29,59 +29,14 @@
29 29
30//#define DEBUG 30//#define DEBUG
31 31
32/*
33 * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if
34 * there would ever be a halt sequence (for power save when idle) with
35 * some largish delay when halting or resuming *and* a driver that can't
36 * afford that delay. The hlt_counter would then be checked before
37 * executing the halt sequence, and the driver marks the unhaltable
38 * region by enable_hlt/disable_hlt.
39 */
40
41int cris_hlt_counter=0;
42
43void disable_hlt(void)
44{
45 cris_hlt_counter++;
46}
47
48EXPORT_SYMBOL(disable_hlt);
49
50void enable_hlt(void)
51{
52 cris_hlt_counter--;
53}
54
55EXPORT_SYMBOL(enable_hlt);
56
57extern void default_idle(void); 32extern void default_idle(void);
58 33
59void (*pm_power_off)(void); 34void (*pm_power_off)(void);
60EXPORT_SYMBOL(pm_power_off); 35EXPORT_SYMBOL(pm_power_off);
61 36
62/* 37void arch_cpu_idle(void)
63 * The idle thread. There's no useful work to be
64 * done, so just try to conserve power and have a
65 * low exit latency (ie sit in a loop waiting for
66 * somebody to say that they'd like to reschedule)
67 */
68
69void cpu_idle (void)
70{ 38{
71 /* endless idle loop with no priority at all */ 39 default_idle();
72 while (1) {
73 rcu_idle_enter();
74 while (!need_resched()) {
75 /*
76 * Mark this as an RCU critical section so that
77 * synchronize_kernel() in the unload path waits
78 * for our completion.
79 */
80 default_idle();
81 }
82 rcu_idle_exit();
83 schedule_preempt_disabled();
84 }
85} 40}
86 41
87void hard_reset_now (void); 42void hard_reset_now (void);
diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c
index a11ad3229f8c..0ffda73734f5 100644
--- a/arch/cris/kernel/traps.c
+++ b/arch/cris/kernel/traps.c
@@ -147,13 +147,6 @@ show_stack(void)
147#endif 147#endif
148 148
149void 149void
150dump_stack(void)
151{
152 show_stack(NULL, NULL);
153}
154EXPORT_SYMBOL(dump_stack);
155
156void
157set_nmi_handler(void (*handler)(struct pt_regs *)) 150set_nmi_handler(void (*handler)(struct pt_regs *))
158{ 151{
159 nmi_handler = handler; 152 nmi_handler = handler;
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index d72ab58fd83e..9ac80946dada 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -12,12 +12,10 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <asm/tlb.h> 14#include <asm/tlb.h>
15#include <asm/sections.h>
15 16
16unsigned long empty_zero_page; 17unsigned long empty_zero_page;
17 18
18extern char _stext, _edata, _etext; /* From linkerscript */
19extern char __init_begin, __init_end;
20
21void __init 19void __init
22mem_init(void) 20mem_init(void)
23{ 21{
@@ -67,15 +65,5 @@ mem_init(void)
67void 65void
68free_initmem(void) 66free_initmem(void)
69{ 67{
70 unsigned long addr; 68 free_initmem_default(0);
71
72 addr = (unsigned long)(&__init_begin);
73 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
74 ClearPageReserved(virt_to_page(addr));
75 init_page_count(virt_to_page(addr));
76 free_page(addr);
77 totalram_pages++;
78 }
79 printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n",
80 (unsigned long)((&__init_end - &__init_begin) >> 10));
81} 69}
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index 4cfcc7bba25a..70ec7293dce7 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -31,14 +31,4 @@
31#define __ARCH_WANT_SYS_VFORK 31#define __ARCH_WANT_SYS_VFORK
32#define __ARCH_WANT_SYS_CLONE 32#define __ARCH_WANT_SYS_CLONE
33 33
34/*
35 * "Conditional" syscalls
36 *
37 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
38 * but it doesn't work on all toolchains, so we just do it by hand
39 */
40#ifndef cond_syscall
41#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
42#endif
43
44#endif /* _ASM_UNISTD_H_ */ 34#endif /* _ASM_UNISTD_H_ */
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 23916b2a12a2..5d40aeb7712e 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -59,29 +59,12 @@ static void core_sleep_idle(void)
59 mb(); 59 mb();
60} 60}
61 61
62void (*idle)(void) = core_sleep_idle; 62void arch_cpu_idle(void)
63
64/*
65 * The idle thread. There's no useful work to be
66 * done, so just try to conserve power and have a
67 * low exit latency (ie sit in a loop waiting for
68 * somebody to say that they'd like to reschedule)
69 */
70void cpu_idle(void)
71{ 63{
72 /* endless idle loop with no priority at all */ 64 if (!frv_dma_inprogress)
73 while (1) { 65 core_sleep_idle();
74 rcu_idle_enter(); 66 else
75 while (!need_resched()) { 67 local_irq_enable();
76 check_pgt_cache();
77
78 if (!frv_dma_inprogress && idle)
79 idle();
80 }
81 rcu_idle_exit();
82
83 schedule_preempt_disabled();
84 }
85} 68}
86 69
87void machine_restart(char * __unused) 70void machine_restart(char * __unused)
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c
index 5cfd1420b091..4bff48c19d29 100644
--- a/arch/frv/kernel/traps.c
+++ b/arch/frv/kernel/traps.c
@@ -466,17 +466,6 @@ asmlinkage void compound_exception(unsigned long esfr1,
466 BUG(); 466 BUG();
467} /* end compound_exception() */ 467} /* end compound_exception() */
468 468
469/*****************************************************************************/
470/*
471 * The architecture-independent backtrace generator
472 */
473void dump_stack(void)
474{
475 show_stack(NULL, NULL);
476}
477
478EXPORT_SYMBOL(dump_stack);
479
480void show_stack(struct task_struct *task, unsigned long *sp) 469void show_stack(struct task_struct *task, unsigned long *sp)
481{ 470{
482} 471}
@@ -508,6 +497,7 @@ void show_regs(struct pt_regs *regs)
508 int loop; 497 int loop;
509 498
510 printk("\n"); 499 printk("\n");
500 show_regs_print_info(KERN_DEFAULT);
511 501
512 printk("Frame: @%08lx [%s]\n", 502 printk("Frame: @%08lx [%s]\n",
513 (unsigned long) regs, 503 (unsigned long) regs,
@@ -522,8 +512,6 @@ void show_regs(struct pt_regs *regs)
522 else 512 else
523 printk(" | "); 513 printk(" | ");
524 } 514 }
525
526 printk("Process %s (pid: %d)\n", current->comm, current->pid);
527} 515}
528 516
529void die_if_kernel(const char *str, ...) 517void die_if_kernel(const char *str, ...)
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index 92e97b0894a6..dee354fa6b64 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -122,7 +122,7 @@ void __init mem_init(void)
122#endif 122#endif
123 int codek = 0, datak = 0; 123 int codek = 0, datak = 0;
124 124
125 /* this will put all memory onto the freelists */ 125 /* this will put all low memory onto the freelists */
126 totalram_pages = free_all_bootmem(); 126 totalram_pages = free_all_bootmem();
127 127
128#ifdef CONFIG_MMU 128#ifdef CONFIG_MMU
@@ -131,14 +131,8 @@ void __init mem_init(void)
131 datapages++; 131 datapages++;
132 132
133#ifdef CONFIG_HIGHMEM 133#ifdef CONFIG_HIGHMEM
134 for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) { 134 for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--)
135 struct page *page = &mem_map[pfn]; 135 free_highmem_page(&mem_map[pfn]);
136
137 ClearPageReserved(page);
138 init_page_count(page);
139 __free_page(page);
140 totalram_pages++;
141 }
142#endif 136#endif
143 137
144 codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10; 138 codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10;
@@ -168,21 +162,7 @@ void __init mem_init(void)
168void free_initmem(void) 162void free_initmem(void)
169{ 163{
170#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) 164#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL)
171 unsigned long start, end, addr; 165 free_initmem_default(0);
172
173 start = PAGE_ALIGN((unsigned long) &__init_begin); /* round up */
174 end = ((unsigned long) &__init_end) & PAGE_MASK; /* round down */
175
176 /* next to check that the page we free is not a partial page */
177 for (addr = start; addr < end; addr += PAGE_SIZE) {
178 ClearPageReserved(virt_to_page(addr));
179 init_page_count(virt_to_page(addr));
180 free_page(addr);
181 totalram_pages++;
182 }
183
184 printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n",
185 (end - start) >> 10, start, end);
186#endif 166#endif
187} /* end free_initmem() */ 167} /* end free_initmem() */
188 168
@@ -193,14 +173,6 @@ void free_initmem(void)
193#ifdef CONFIG_BLK_DEV_INITRD 173#ifdef CONFIG_BLK_DEV_INITRD
194void __init free_initrd_mem(unsigned long start, unsigned long end) 174void __init free_initrd_mem(unsigned long start, unsigned long end)
195{ 175{
196 int pages = 0; 176 free_reserved_area(start, end, 0, "initrd");
197 for (; start < end; start += PAGE_SIZE) {
198 ClearPageReserved(virt_to_page(start));
199 init_page_count(virt_to_page(start));
200 free_page(start);
201 totalram_pages++;
202 pages++;
203 }
204 printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10);
205} /* end free_initrd_mem() */ 177} /* end free_initrd_mem() */
206#endif 178#endif
diff --git a/arch/h8300/include/asm/linkage.h b/arch/h8300/include/asm/linkage.h
index 6f4df7d46180..1d81604fb0ad 100644
--- a/arch/h8300/include/asm/linkage.h
+++ b/arch/h8300/include/asm/linkage.h
@@ -2,7 +2,5 @@
2#define _H8300_LINKAGE_H 2#define _H8300_LINKAGE_H
3 3
4#undef SYMBOL_NAME_LABEL 4#undef SYMBOL_NAME_LABEL
5#undef SYMBOL_NAME
6#define SYMBOL_NAME_LABEL(_name_) _##_name_##: 5#define SYMBOL_NAME_LABEL(_name_) _##_name_##:
7#define SYMBOL_NAME(_name_) _##_name_
8#endif 6#endif
diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h
index 6721856d841b..ab671ecf5196 100644
--- a/arch/h8300/include/asm/unistd.h
+++ b/arch/h8300/include/asm/unistd.h
@@ -33,11 +33,4 @@
33#define __ARCH_WANT_SYS_VFORK 33#define __ARCH_WANT_SYS_VFORK
34#define __ARCH_WANT_SYS_CLONE 34#define __ARCH_WANT_SYS_CLONE
35 35
36/*
37 * "Conditional" syscalls
38 */
39#define cond_syscall(name) \
40 asm (".weak\t_" #name "\n" \
41 ".set\t_" #name ",_sys_ni_syscall");
42
43#endif /* _ASM_H8300_UNISTD_H_ */ 36#endif /* _ASM_H8300_UNISTD_H_ */
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index b609f63f1590..1a744ab7e7e5 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -53,40 +53,13 @@ asmlinkage void ret_from_kernel_thread(void);
53 * The idle loop on an H8/300.. 53 * The idle loop on an H8/300..
54 */ 54 */
55#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM) 55#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
56static void default_idle(void) 56void arch_cpu_idle(void)
57{ 57{
58 local_irq_disable(); 58 local_irq_enable();
59 if (!need_resched()) { 59 /* XXX: race here! What if need_resched() gets set now? */
60 local_irq_enable(); 60 __asm__("sleep");
61 /* XXX: race here! What if need_resched() gets set now? */
62 __asm__("sleep");
63 } else
64 local_irq_enable();
65}
66#else
67static void default_idle(void)
68{
69 cpu_relax();
70} 61}
71#endif 62#endif
72void (*idle)(void) = default_idle;
73
74/*
75 * The idle thread. There's no useful work to be
76 * done, so just try to conserve power and have a
77 * low exit latency (ie sit in a loop waiting for
78 * somebody to say that they'd like to reschedule)
79 */
80void cpu_idle(void)
81{
82 while (1) {
83 rcu_idle_enter();
84 while (!need_resched())
85 idle();
86 rcu_idle_exit();
87 schedule_preempt_disabled();
88 }
89}
90 63
91void machine_restart(char * __unused) 64void machine_restart(char * __unused)
92{ 65{
@@ -110,6 +83,8 @@ void machine_power_off(void)
110 83
111void show_regs(struct pt_regs * regs) 84void show_regs(struct pt_regs * regs)
112{ 85{
86 show_regs_print_info(KERN_DEFAULT);
87
113 printk("\nPC: %08lx Status: %02x", 88 printk("\nPC: %08lx Status: %02x",
114 regs->pc, regs->ccr); 89 regs->pc, regs->ccr);
115 printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx", 90 printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx",
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
index 7833aa3e7c7d..cfe494dbe3da 100644
--- a/arch/h8300/kernel/traps.c
+++ b/arch/h8300/kernel/traps.c
@@ -164,10 +164,3 @@ void show_trace_task(struct task_struct *tsk)
164{ 164{
165 show_stack(tsk,(unsigned long *)tsk->thread.esp0); 165 show_stack(tsk,(unsigned long *)tsk->thread.esp0);
166} 166}
167
168void dump_stack(void)
169{
170 show_stack(NULL,NULL);
171}
172
173EXPORT_SYMBOL(dump_stack);
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 981e25094b1a..ff349d70a29b 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -139,7 +139,7 @@ void __init mem_init(void)
139 start_mem = PAGE_ALIGN(start_mem); 139 start_mem = PAGE_ALIGN(start_mem);
140 max_mapnr = num_physpages = MAP_NR(high_memory); 140 max_mapnr = num_physpages = MAP_NR(high_memory);
141 141
142 /* this will put all memory onto the freelists */ 142 /* this will put all low memory onto the freelists */
143 totalram_pages = free_all_bootmem(); 143 totalram_pages = free_all_bootmem();
144 144
145 codek = (_etext - _stext) >> 10; 145 codek = (_etext - _stext) >> 10;
@@ -161,15 +161,7 @@ void __init mem_init(void)
161#ifdef CONFIG_BLK_DEV_INITRD 161#ifdef CONFIG_BLK_DEV_INITRD
162void free_initrd_mem(unsigned long start, unsigned long end) 162void free_initrd_mem(unsigned long start, unsigned long end)
163{ 163{
164 int pages = 0; 164 free_reserved_area(start, end, 0, "initrd");
165 for (; start < end; start += PAGE_SIZE) {
166 ClearPageReserved(virt_to_page(start));
167 init_page_count(virt_to_page(start));
168 free_page(start);
169 totalram_pages++;
170 pages++;
171 }
172 printk ("Freeing initrd memory: %dk freed\n", pages);
173} 165}
174#endif 166#endif
175 167
@@ -177,23 +169,7 @@ void
177free_initmem(void) 169free_initmem(void)
178{ 170{
179#ifdef CONFIG_RAMKERNEL 171#ifdef CONFIG_RAMKERNEL
180 unsigned long addr; 172 free_initmem_default(0);
181/*
182 * the following code should be cool even if these sections
183 * are not page aligned.
184 */
185 addr = PAGE_ALIGN((unsigned long)(__init_begin));
186 /* next to check that the page we free is not a partial page */
187 for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) {
188 ClearPageReserved(virt_to_page(addr));
189 init_page_count(virt_to_page(addr));
190 free_page(addr);
191 totalram_pages++;
192 }
193 printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
194 (addr - PAGE_ALIGN((long) __init_begin)) >> 10,
195 (int)(PAGE_ALIGN((unsigned long)__init_begin)),
196 (int)(addr - PAGE_SIZE));
197#endif 173#endif
198} 174}
199 175
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 7fcc636b6238..0a0dd5c05b46 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -52,28 +52,11 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
52 * If hardware or VM offer wait termination even though interrupts 52 * If hardware or VM offer wait termination even though interrupts
53 * are disabled. 53 * are disabled.
54 */ 54 */
55static void default_idle(void) 55void arch_cpu_idle(void)
56{ 56{
57 __vmwait(); 57 __vmwait();
58} 58 /* interrupts wake us up, but irqs are still disabled */
59 59 local_irq_enable();
60void (*idle_sleep)(void) = default_idle;
61
62void cpu_idle(void)
63{
64 while (1) {
65 tick_nohz_idle_enter();
66 local_irq_disable();
67 while (!need_resched()) {
68 idle_sleep();
69 /* interrupts wake us up, but aren't serviced */
70 local_irq_enable(); /* service interrupt */
71 local_irq_disable();
72 }
73 local_irq_enable();
74 tick_nohz_idle_exit();
75 schedule();
76 }
77} 60}
78 61
79/* 62/*
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 8e095dffd070..0e364ca43198 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -184,7 +184,7 @@ void __cpuinit start_secondary(void)
184 184
185 local_irq_enable(); 185 local_irq_enable();
186 186
187 cpu_idle(); 187 cpu_startup_entry(CPUHP_ONLINE);
188} 188}
189 189
190 190
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index aaf53f883710..7858663352b9 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -195,14 +195,6 @@ void show_stack(struct task_struct *task, unsigned long *fp)
195 do_show_stack(task, fp, 0); 195 do_show_stack(task, fp, 0);
196} 196}
197 197
198void dump_stack(void)
199{
200 unsigned long *fp;
201 asm("%0 = r30" : "=r" (fp));
202 show_stack(current, fp);
203}
204EXPORT_SYMBOL(dump_stack);
205
206int die(const char *str, struct pt_regs *regs, long err) 198int die(const char *str, struct pt_regs *regs, long err)
207{ 199{
208 static struct { 200 static struct {
diff --git a/arch/hexagon/kernel/vm_events.c b/arch/hexagon/kernel/vm_events.c
index 3e4453091889..741aaa917cda 100644
--- a/arch/hexagon/kernel/vm_events.c
+++ b/arch/hexagon/kernel/vm_events.c
@@ -33,6 +33,8 @@
33 */ 33 */
34void show_regs(struct pt_regs *regs) 34void show_regs(struct pt_regs *regs)
35{ 35{
36 show_regs_print_info(KERN_EMERG);
37
36 printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n", 38 printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n",
37 regs->restart_r0, regs->syscall_nr); 39 regs->restart_r0, regs->syscall_nr);
38 printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds); 40 printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 9a02f71c6b1f..e725ea01569e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -187,7 +187,7 @@ config IA64_DIG
187 187
188config IA64_DIG_VTD 188config IA64_DIG_VTD
189 bool "DIG+Intel+IOMMU" 189 bool "DIG+Intel+IOMMU"
190 select DMAR 190 select INTEL_IOMMU
191 select PCI_MSI 191 select PCI_MSI
192 192
193config IA64_HP_ZX1 193config IA64_HP_ZX1
@@ -591,9 +591,9 @@ source "kernel/power/Kconfig"
591source "drivers/acpi/Kconfig" 591source "drivers/acpi/Kconfig"
592 592
593if PM 593if PM
594 594menu "CPU Frequency scaling"
595source "arch/ia64/kernel/cpufreq/Kconfig" 595source "drivers/cpufreq/Kconfig"
596 596endmenu
597endif 597endif
598 598
599endmenu 599endmenu
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index da2f319fb71d..e70cadec7ce6 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -142,8 +142,7 @@ static void transmit_chars(struct tty_struct *tty, struct serial_state *info,
142 goto out; 142 goto out;
143 } 143 }
144 144
145 if (info->xmit.head == info->xmit.tail || tty->stopped || 145 if (info->xmit.head == info->xmit.tail || tty->stopped) {
146 tty->hw_stopped) {
147#ifdef SIMSERIAL_DEBUG 146#ifdef SIMSERIAL_DEBUG
148 printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", 147 printk("transmit_chars: head=%d, tail=%d, stopped=%d\n",
149 info->xmit.head, info->xmit.tail, tty->stopped); 148 info->xmit.head, info->xmit.tail, tty->stopped);
@@ -181,7 +180,7 @@ static void rs_flush_chars(struct tty_struct *tty)
181 struct serial_state *info = tty->driver_data; 180 struct serial_state *info = tty->driver_data;
182 181
183 if (info->xmit.head == info->xmit.tail || tty->stopped || 182 if (info->xmit.head == info->xmit.tail || tty->stopped ||
184 tty->hw_stopped || !info->xmit.buf) 183 !info->xmit.buf)
185 return; 184 return;
186 185
187 transmit_chars(tty, info, NULL); 186 transmit_chars(tty, info, NULL);
@@ -217,7 +216,7 @@ static int rs_write(struct tty_struct * tty,
217 * Hey, we transmit directly from here in our case 216 * Hey, we transmit directly from here in our case
218 */ 217 */
219 if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) && 218 if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) &&
220 !tty->stopped && !tty->hw_stopped) 219 !tty->stopped)
221 transmit_chars(tty, info, NULL); 220 transmit_chars(tty, info, NULL);
222 221
223 return ret; 222 return ret;
@@ -325,14 +324,6 @@ static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
325 324
326#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) 325#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
327 326
328static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
329{
330 /* Handle turning off CRTSCTS */
331 if ((old_termios->c_cflag & CRTSCTS) &&
332 !(tty->termios.c_cflag & CRTSCTS)) {
333 tty->hw_stopped = 0;
334 }
335}
336/* 327/*
337 * This routine will shutdown a serial port; interrupts are disabled, and 328 * This routine will shutdown a serial port; interrupts are disabled, and
338 * DTR is dropped if the hangup on close termio flag is on. 329 * DTR is dropped if the hangup on close termio flag is on.
@@ -481,7 +472,6 @@ static const struct tty_operations hp_ops = {
481 .throttle = rs_throttle, 472 .throttle = rs_throttle,
482 .unthrottle = rs_unthrottle, 473 .unthrottle = rs_unthrottle,
483 .send_xchar = rs_send_xchar, 474 .send_xchar = rs_send_xchar,
484 .set_termios = rs_set_termios,
485 .hangup = rs_hangup, 475 .hangup = rs_hangup,
486 .proc_fops = &rs_proc_fops, 476 .proc_fops = &rs_proc_fops,
487}; 477};
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index d2bf1fd5e44f..76acbcd5c060 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -106,16 +106,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
106 return -EFAULT; 106 return -EFAULT;
107 107
108 { 108 {
109 register unsigned long r8 __asm ("r8"); 109 register unsigned long r8 __asm ("r8") = 0;
110 unsigned long prev; 110 unsigned long prev;
111 __asm__ __volatile__( 111 __asm__ __volatile__(
112 " mf;; \n" 112 " mf;; \n"
113 " mov %0=r0 \n"
114 " mov ar.ccv=%4;; \n" 113 " mov ar.ccv=%4;; \n"
115 "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" 114 "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
116 " .xdata4 \"__ex_table\", 1b-., 2f-. \n" 115 " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
117 "[2:]" 116 "[2:]"
118 : "=r" (r8), "=r" (prev) 117 : "+r" (r8), "=&r" (prev)
119 : "r" (uaddr), "r" (newval), 118 : "r" (uaddr), "r" (newval),
120 "rO" ((long) (unsigned) oldval) 119 "rO" ((long) (unsigned) oldval)
121 : "memory"); 120 : "memory");
diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h
index 94eaa5bd5d0c..aa910054b8e7 100644
--- a/arch/ia64/include/asm/hugetlb.h
+++ b/arch/ia64/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
2#define _ASM_IA64_HUGETLB_H 2#define _ASM_IA64_HUGETLB_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
5 6
6 7
7void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 8void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h
index 2b68d856dc78..1bf2cf2f4ab4 100644
--- a/arch/ia64/include/asm/irqflags.h
+++ b/arch/ia64/include/asm/irqflags.h
@@ -89,6 +89,7 @@ static inline bool arch_irqs_disabled(void)
89 89
90static inline void arch_safe_halt(void) 90static inline void arch_safe_halt(void)
91{ 91{
92 arch_local_irq_enable();
92 ia64_pal_halt_light(); /* PAL_HALT_LIGHT */ 93 ia64_pal_halt_light(); /* PAL_HALT_LIGHT */
93} 94}
94 95
diff --git a/arch/ia64/include/asm/linkage.h b/arch/ia64/include/asm/linkage.h
index ef22a45c1890..787575701f1c 100644
--- a/arch/ia64/include/asm/linkage.h
+++ b/arch/ia64/include/asm/linkage.h
@@ -11,4 +11,8 @@
11 11
12#endif 12#endif
13 13
14#define cond_syscall(x) asm(".weak\t" #x "#\n" #x "#\t=\tsys_ni_syscall#")
15#define SYSCALL_ALIAS(alias, name) \
16 asm ( #alias "# = " #name "#\n\t.globl " #alias "#")
17
14#endif 18#endif
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index 43f96ab18fa0..8c7096168716 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
143extern int cpe_vector; 143extern int cpe_vector;
144extern int ia64_cpe_irq; 144extern int ia64_cpe_irq;
145extern void ia64_mca_init(void); 145extern void ia64_mca_init(void);
146extern void ia64_mca_irq_init(void);
146extern void ia64_mca_cpu_init(void *); 147extern void ia64_mca_cpu_init(void *);
147extern void ia64_os_mca_dispatch(void); 148extern void ia64_os_mca_dispatch(void);
148extern void ia64_os_mca_dispatch_end(void); 149extern void ia64_os_mca_dispatch_end(void);
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 2e27ef175652..2db0a6c6daa5 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -67,14 +67,13 @@ extern int paddr_to_nid(unsigned long paddr);
67 67
68extern void map_cpu_to_node(int cpu, int nid); 68extern void map_cpu_to_node(int cpu, int nid);
69extern void unmap_cpu_from_node(int cpu, int nid); 69extern void unmap_cpu_from_node(int cpu, int nid);
70 70extern void numa_clear_node(int cpu);
71 71
72#else /* !CONFIG_NUMA */ 72#else /* !CONFIG_NUMA */
73#define map_cpu_to_node(cpu, nid) do{}while(0) 73#define map_cpu_to_node(cpu, nid) do{}while(0)
74#define unmap_cpu_from_node(cpu, nid) do{}while(0) 74#define unmap_cpu_from_node(cpu, nid) do{}while(0)
75
76#define paddr_to_nid(addr) 0 75#define paddr_to_nid(addr) 0
77 76#define numa_clear_node(cpu) do { } while (0)
78#endif /* CONFIG_NUMA */ 77#endif /* CONFIG_NUMA */
79 78
80#endif /* _ASM_IA64_NUMA_H */ 79#endif /* _ASM_IA64_NUMA_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 020d655ed082..cade13dd0299 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -131,8 +131,6 @@ struct thread_info {
131#define TS_POLLING 1 /* true if in idle loop and not sleeping */ 131#define TS_POLLING 1 /* true if in idle loop and not sleeping */
132#define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */ 132#define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */
133 133
134#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
135
136#ifndef __ASSEMBLY__ 134#ifndef __ASSEMBLY__
137#define HAVE_SET_RESTORE_SIGMASK 1 135#define HAVE_SET_RESTORE_SIGMASK 1
138static inline void set_restore_sigmask(void) 136static inline void set_restore_sigmask(void)
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 096373800f73..afd45e0d552e 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -46,15 +46,5 @@ asmlinkage unsigned long sys_mmap2(
46struct pt_regs; 46struct pt_regs;
47asmlinkage long sys_ia64_pipe(void); 47asmlinkage long sys_ia64_pipe(void);
48 48
49/*
50 * "Conditional" syscalls
51 *
52 * Note, this macro can only be used in the file which defines sys_ni_syscall, i.e., in
53 * kernel/sys_ni.c. This version causes warnings because the declaration isn't a
54 * proper prototype, but we can't use __typeof__ either, because not all cond_syscall()
55 * declarations have prototypes at the moment.
56 */
57#define cond_syscall(x) asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
58
59#endif /* !__ASSEMBLY__ */ 49#endif /* !__ASSEMBLY__ */
60#endif /* _ASM_IA64_UNISTD_H */ 50#endif /* _ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index d959c84904be..20678a9ed11a 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o
23obj-$(CONFIG_NUMA) += numa.o 23obj-$(CONFIG_NUMA) += numa.o
24obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o 24obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
25obj-$(CONFIG_IA64_CYCLONE) += cyclone.o 25obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
26obj-$(CONFIG_CPU_FREQ) += cpufreq/
27obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 26obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
28obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 27obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
29obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 28obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
diff --git a/arch/ia64/kernel/cpufreq/Kconfig b/arch/ia64/kernel/cpufreq/Kconfig
deleted file mode 100644
index 2d9d5279b981..000000000000
--- a/arch/ia64/kernel/cpufreq/Kconfig
+++ /dev/null
@@ -1,29 +0,0 @@
1
2#
3# CPU Frequency scaling
4#
5
6menu "CPU Frequency scaling"
7
8source "drivers/cpufreq/Kconfig"
9
10if CPU_FREQ
11
12comment "CPUFreq processor drivers"
13
14config IA64_ACPI_CPUFREQ
15 tristate "ACPI Processor P-States driver"
16 select CPU_FREQ_TABLE
17 depends on ACPI_PROCESSOR
18 help
19 This driver adds a CPUFreq driver which utilizes the ACPI
20 Processor Performance States.
21
22 For details, take a look at <file:Documentation/cpu-freq/>.
23
24 If in doubt, say N.
25
26endif # CPU_FREQ
27
28endmenu
29
diff --git a/arch/ia64/kernel/cpufreq/Makefile b/arch/ia64/kernel/cpufreq/Makefile
deleted file mode 100644
index 4838f2a57c7a..000000000000
--- a/arch/ia64/kernel/cpufreq/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o
2
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
deleted file mode 100644
index f09b174244d5..000000000000
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ /dev/null
@@ -1,437 +0,0 @@
1/*
2 * arch/ia64/kernel/cpufreq/acpi-cpufreq.c
3 * This file provides the ACPI based P-state support. This
4 * module works with generic cpufreq infrastructure. Most of
5 * the code is based on i386 version
6 * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
7 *
8 * Copyright (C) 2005 Intel Corp
9 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 */
11
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/cpufreq.h>
17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
19#include <asm/io.h>
20#include <asm/uaccess.h>
21#include <asm/pal.h>
22
23#include <linux/acpi.h>
24#include <acpi/processor.h>
25
26MODULE_AUTHOR("Venkatesh Pallipadi");
27MODULE_DESCRIPTION("ACPI Processor P-States Driver");
28MODULE_LICENSE("GPL");
29
30
31struct cpufreq_acpi_io {
32 struct acpi_processor_performance acpi_data;
33 struct cpufreq_frequency_table *freq_table;
34 unsigned int resume;
35};
36
37static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
38
39static struct cpufreq_driver acpi_cpufreq_driver;
40
41
42static int
43processor_set_pstate (
44 u32 value)
45{
46 s64 retval;
47
48 pr_debug("processor_set_pstate\n");
49
50 retval = ia64_pal_set_pstate((u64)value);
51
52 if (retval) {
53 pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
54 value, retval);
55 return -ENODEV;
56 }
57 return (int)retval;
58}
59
60
61static int
62processor_get_pstate (
63 u32 *value)
64{
65 u64 pstate_index = 0;
66 s64 retval;
67
68 pr_debug("processor_get_pstate\n");
69
70 retval = ia64_pal_get_pstate(&pstate_index,
71 PAL_GET_PSTATE_TYPE_INSTANT);
72 *value = (u32) pstate_index;
73
74 if (retval)
75 pr_debug("Failed to get current freq with "
76 "error 0x%lx, idx 0x%x\n", retval, *value);
77
78 return (int)retval;
79}
80
81
82/* To be used only after data->acpi_data is initialized */
83static unsigned
84extract_clock (
85 struct cpufreq_acpi_io *data,
86 unsigned value,
87 unsigned int cpu)
88{
89 unsigned long i;
90
91 pr_debug("extract_clock\n");
92
93 for (i = 0; i < data->acpi_data.state_count; i++) {
94 if (value == data->acpi_data.states[i].status)
95 return data->acpi_data.states[i].core_frequency;
96 }
97 return data->acpi_data.states[i-1].core_frequency;
98}
99
100
101static unsigned int
102processor_get_freq (
103 struct cpufreq_acpi_io *data,
104 unsigned int cpu)
105{
106 int ret = 0;
107 u32 value = 0;
108 cpumask_t saved_mask;
109 unsigned long clock_freq;
110
111 pr_debug("processor_get_freq\n");
112
113 saved_mask = current->cpus_allowed;
114 set_cpus_allowed_ptr(current, cpumask_of(cpu));
115 if (smp_processor_id() != cpu)
116 goto migrate_end;
117
118 /* processor_get_pstate gets the instantaneous frequency */
119 ret = processor_get_pstate(&value);
120
121 if (ret) {
122 set_cpus_allowed_ptr(current, &saved_mask);
123 printk(KERN_WARNING "get performance failed with error %d\n",
124 ret);
125 ret = 0;
126 goto migrate_end;
127 }
128 clock_freq = extract_clock(data, value, cpu);
129 ret = (clock_freq*1000);
130
131migrate_end:
132 set_cpus_allowed_ptr(current, &saved_mask);
133 return ret;
134}
135
136
137static int
138processor_set_freq (
139 struct cpufreq_acpi_io *data,
140 unsigned int cpu,
141 int state)
142{
143 int ret = 0;
144 u32 value = 0;
145 struct cpufreq_freqs cpufreq_freqs;
146 cpumask_t saved_mask;
147 int retval;
148
149 pr_debug("processor_set_freq\n");
150
151 saved_mask = current->cpus_allowed;
152 set_cpus_allowed_ptr(current, cpumask_of(cpu));
153 if (smp_processor_id() != cpu) {
154 retval = -EAGAIN;
155 goto migrate_end;
156 }
157
158 if (state == data->acpi_data.state) {
159 if (unlikely(data->resume)) {
160 pr_debug("Called after resume, resetting to P%d\n", state);
161 data->resume = 0;
162 } else {
163 pr_debug("Already at target state (P%d)\n", state);
164 retval = 0;
165 goto migrate_end;
166 }
167 }
168
169 pr_debug("Transitioning from P%d to P%d\n",
170 data->acpi_data.state, state);
171
172 /* cpufreq frequency struct */
173 cpufreq_freqs.cpu = cpu;
174 cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
175 cpufreq_freqs.new = data->freq_table[state].frequency;
176
177 /* notify cpufreq */
178 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
179
180 /*
181 * First we write the target state's 'control' value to the
182 * control_register.
183 */
184
185 value = (u32) data->acpi_data.states[state].control;
186
187 pr_debug("Transitioning to state: 0x%08x\n", value);
188
189 ret = processor_set_pstate(value);
190 if (ret) {
191 unsigned int tmp = cpufreq_freqs.new;
192 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
193 cpufreq_freqs.new = cpufreq_freqs.old;
194 cpufreq_freqs.old = tmp;
195 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
196 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
197 printk(KERN_WARNING "Transition failed with error %d\n", ret);
198 retval = -ENODEV;
199 goto migrate_end;
200 }
201
202 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
203
204 data->acpi_data.state = state;
205
206 retval = 0;
207
208migrate_end:
209 set_cpus_allowed_ptr(current, &saved_mask);
210 return (retval);
211}
212
213
214static unsigned int
215acpi_cpufreq_get (
216 unsigned int cpu)
217{
218 struct cpufreq_acpi_io *data = acpi_io_data[cpu];
219
220 pr_debug("acpi_cpufreq_get\n");
221
222 return processor_get_freq(data, cpu);
223}
224
225
226static int
227acpi_cpufreq_target (
228 struct cpufreq_policy *policy,
229 unsigned int target_freq,
230 unsigned int relation)
231{
232 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
233 unsigned int next_state = 0;
234 unsigned int result = 0;
235
236 pr_debug("acpi_cpufreq_setpolicy\n");
237
238 result = cpufreq_frequency_table_target(policy,
239 data->freq_table, target_freq, relation, &next_state);
240 if (result)
241 return (result);
242
243 result = processor_set_freq(data, policy->cpu, next_state);
244
245 return (result);
246}
247
248
249static int
250acpi_cpufreq_verify (
251 struct cpufreq_policy *policy)
252{
253 unsigned int result = 0;
254 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
255
256 pr_debug("acpi_cpufreq_verify\n");
257
258 result = cpufreq_frequency_table_verify(policy,
259 data->freq_table);
260
261 return (result);
262}
263
264
265static int
266acpi_cpufreq_cpu_init (
267 struct cpufreq_policy *policy)
268{
269 unsigned int i;
270 unsigned int cpu = policy->cpu;
271 struct cpufreq_acpi_io *data;
272 unsigned int result = 0;
273
274 pr_debug("acpi_cpufreq_cpu_init\n");
275
276 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
277 if (!data)
278 return (-ENOMEM);
279
280 acpi_io_data[cpu] = data;
281
282 result = acpi_processor_register_performance(&data->acpi_data, cpu);
283
284 if (result)
285 goto err_free;
286
287 /* capability check */
288 if (data->acpi_data.state_count <= 1) {
289 pr_debug("No P-States\n");
290 result = -ENODEV;
291 goto err_unreg;
292 }
293
294 if ((data->acpi_data.control_register.space_id !=
295 ACPI_ADR_SPACE_FIXED_HARDWARE) ||
296 (data->acpi_data.status_register.space_id !=
297 ACPI_ADR_SPACE_FIXED_HARDWARE)) {
298 pr_debug("Unsupported address space [%d, %d]\n",
299 (u32) (data->acpi_data.control_register.space_id),
300 (u32) (data->acpi_data.status_register.space_id));
301 result = -ENODEV;
302 goto err_unreg;
303 }
304
305 /* alloc freq_table */
306 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
307 (data->acpi_data.state_count + 1),
308 GFP_KERNEL);
309 if (!data->freq_table) {
310 result = -ENOMEM;
311 goto err_unreg;
312 }
313
314 /* detect transition latency */
315 policy->cpuinfo.transition_latency = 0;
316 for (i=0; i<data->acpi_data.state_count; i++) {
317 if ((data->acpi_data.states[i].transition_latency * 1000) >
318 policy->cpuinfo.transition_latency) {
319 policy->cpuinfo.transition_latency =
320 data->acpi_data.states[i].transition_latency * 1000;
321 }
322 }
323 policy->cur = processor_get_freq(data, policy->cpu);
324
325 /* table init */
326 for (i = 0; i <= data->acpi_data.state_count; i++)
327 {
328 data->freq_table[i].index = i;
329 if (i < data->acpi_data.state_count) {
330 data->freq_table[i].frequency =
331 data->acpi_data.states[i].core_frequency * 1000;
332 } else {
333 data->freq_table[i].frequency = CPUFREQ_TABLE_END;
334 }
335 }
336
337 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
338 if (result) {
339 goto err_freqfree;
340 }
341
342 /* notify BIOS that we exist */
343 acpi_processor_notify_smm(THIS_MODULE);
344
345 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
346 "activated.\n", cpu);
347
348 for (i = 0; i < data->acpi_data.state_count; i++)
349 pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
350 (i == data->acpi_data.state?'*':' '), i,
351 (u32) data->acpi_data.states[i].core_frequency,
352 (u32) data->acpi_data.states[i].power,
353 (u32) data->acpi_data.states[i].transition_latency,
354 (u32) data->acpi_data.states[i].bus_master_latency,
355 (u32) data->acpi_data.states[i].status,
356 (u32) data->acpi_data.states[i].control);
357
358 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
359
360 /* the first call to ->target() should result in us actually
361 * writing something to the appropriate registers. */
362 data->resume = 1;
363
364 return (result);
365
366 err_freqfree:
367 kfree(data->freq_table);
368 err_unreg:
369 acpi_processor_unregister_performance(&data->acpi_data, cpu);
370 err_free:
371 kfree(data);
372 acpi_io_data[cpu] = NULL;
373
374 return (result);
375}
376
377
378static int
379acpi_cpufreq_cpu_exit (
380 struct cpufreq_policy *policy)
381{
382 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
383
384 pr_debug("acpi_cpufreq_cpu_exit\n");
385
386 if (data) {
387 cpufreq_frequency_table_put_attr(policy->cpu);
388 acpi_io_data[policy->cpu] = NULL;
389 acpi_processor_unregister_performance(&data->acpi_data,
390 policy->cpu);
391 kfree(data);
392 }
393
394 return (0);
395}
396
397
398static struct freq_attr* acpi_cpufreq_attr[] = {
399 &cpufreq_freq_attr_scaling_available_freqs,
400 NULL,
401};
402
403
404static struct cpufreq_driver acpi_cpufreq_driver = {
405 .verify = acpi_cpufreq_verify,
406 .target = acpi_cpufreq_target,
407 .get = acpi_cpufreq_get,
408 .init = acpi_cpufreq_cpu_init,
409 .exit = acpi_cpufreq_cpu_exit,
410 .name = "acpi-cpufreq",
411 .owner = THIS_MODULE,
412 .attr = acpi_cpufreq_attr,
413};
414
415
416static int __init
417acpi_cpufreq_init (void)
418{
419 pr_debug("acpi_cpufreq_init\n");
420
421 return cpufreq_register_driver(&acpi_cpufreq_driver);
422}
423
424
425static void __exit
426acpi_cpufreq_exit (void)
427{
428 pr_debug("acpi_cpufreq_exit\n");
429
430 cpufreq_unregister_driver(&acpi_cpufreq_driver);
431 return;
432}
433
434
435late_initcall(acpi_cpufreq_init);
436module_exit(acpi_cpufreq_exit);
437
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index c4cd45d97749..abc6dee3799c 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -90,53 +90,6 @@ ENTRY(fsys_getpid)
90 FSYS_RETURN 90 FSYS_RETURN
91END(fsys_getpid) 91END(fsys_getpid)
92 92
93ENTRY(fsys_getppid)
94 .prologue
95 .altrp b6
96 .body
97 add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
98 ;;
99 ld8 r17=[r17] // r17 = current->group_leader
100 add r9=TI_FLAGS+IA64_TASK_SIZE,r16
101 ;;
102
103 ld4 r9=[r9]
104 add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = &current->group_leader->real_parent
105 ;;
106 and r9=TIF_ALLWORK_MASK,r9
107
1081: ld8 r18=[r17] // r18 = current->group_leader->real_parent
109 ;;
110 cmp.ne p8,p0=0,r9
111 add r8=IA64_TASK_TGID_OFFSET,r18 // r8 = &current->group_leader->real_parent->tgid
112 ;;
113
114 /*
115 * The .acq is needed to ensure that the read of tgid has returned its data before
116 * we re-check "real_parent".
117 */
118 ld4.acq r8=[r8] // r8 = current->group_leader->real_parent->tgid
119#ifdef CONFIG_SMP
120 /*
121 * Re-read current->group_leader->real_parent.
122 */
123 ld8 r19=[r17] // r19 = current->group_leader->real_parent
124(p8) br.spnt.many fsys_fallback_syscall
125 ;;
126 cmp.ne p6,p0=r18,r19 // did real_parent change?
127 mov r19=0 // i must not leak kernel bits...
128(p6) br.cond.spnt.few 1b // yes -> redo the read of tgid and the check
129 ;;
130 mov r17=0 // i must not leak kernel bits...
131 mov r18=0 // i must not leak kernel bits...
132#else
133 mov r17=0 // i must not leak kernel bits...
134 mov r18=0 // i must not leak kernel bits...
135 mov r19=0 // i must not leak kernel bits...
136#endif
137 FSYS_RETURN
138END(fsys_getppid)
139
140ENTRY(fsys_set_tid_address) 93ENTRY(fsys_set_tid_address)
141 .prologue 94 .prologue
142 .altrp b6 95 .altrp b6
@@ -614,7 +567,7 @@ paravirt_fsyscall_table:
614 data8 0 // chown 567 data8 0 // chown
615 data8 0 // lseek // 1040 568 data8 0 // lseek // 1040
616 data8 fsys_getpid // getpid 569 data8 fsys_getpid // getpid
617 data8 fsys_getppid // getppid 570 data8 0 // getppid
618 data8 0 // mount 571 data8 0 // mount
619 data8 0 // umount 572 data8 0 // umount
620 data8 0 // setuid // 1045 573 data8 0 // setuid // 1045
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index ee33c3aaa2fc..19f107be734e 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -76,7 +76,7 @@
76 * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ 76 * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
77 * 77 *
78 * Note: The term "IRQ" is loosely used everywhere in Linux kernel to 78 * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
79 * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ 79 * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
80 * (isa_irq) is the only exception in this source code. 80 * (isa_irq) is the only exception in this source code.
81 */ 81 */
82 82
@@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
1010 return 0; 1010 return 0;
1011} 1011}
1012 1012
1013static int
1014iosapic_delete_rte(unsigned int irq, unsigned int gsi)
1015{
1016 struct iosapic_rte_info *rte, *temp;
1017
1018 list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes,
1019 rte_list) {
1020 if (rte->iosapic->gsi_base + rte->rte_index == gsi) {
1021 if (rte->refcnt)
1022 return -EBUSY;
1023
1024 list_del(&rte->rte_list);
1025 kfree(rte);
1026 return 0;
1027 }
1028 }
1029
1030 return -EINVAL;
1031}
1032
1013int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) 1033int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
1014{ 1034{
1015 int num_rte, err, index; 1035 int num_rte, err, index;
@@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
1069 1089
1070int iosapic_remove(unsigned int gsi_base) 1090int iosapic_remove(unsigned int gsi_base)
1071{ 1091{
1072 int index, err = 0; 1092 int i, irq, index, err = 0;
1073 unsigned long flags; 1093 unsigned long flags;
1074 1094
1075 spin_lock_irqsave(&iosapic_lock, flags); 1095 spin_lock_irqsave(&iosapic_lock, flags);
@@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base)
1087 goto out; 1107 goto out;
1088 } 1108 }
1089 1109
1110 for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) {
1111 irq = __gsi_to_irq(i);
1112 if (irq < 0)
1113 continue;
1114
1115 err = iosapic_delete_rte(irq, i);
1116 if (err)
1117 goto out;
1118 }
1119
1090 iounmap(iosapic_lists[index].addr); 1120 iounmap(iosapic_lists[index].addr);
1091 iosapic_free(index); 1121 iosapic_free(index);
1092 out: 1122 out:
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index ad69606613eb..f2c418281130 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -23,6 +23,8 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/kernel_stat.h> 24#include <linux/kernel_stat.h>
25 25
26#include <asm/mca.h>
27
26/* 28/*
27 * 'what should we do if we get a hw irq event on an illegal vector'. 29 * 'what should we do if we get a hw irq event on an illegal vector'.
28 * each architecture has to answer this themselves. 30 * each architecture has to answer this themselves.
@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
83 85
84#endif /* CONFIG_SMP */ 86#endif /* CONFIG_SMP */
85 87
88int __init arch_early_irq_init(void)
89{
90 ia64_mca_irq_init();
91 return 0;
92}
93
86#ifdef CONFIG_HOTPLUG_CPU 94#ifdef CONFIG_HOTPLUG_CPU
87unsigned int vectors_in_migration[NR_IRQS]; 95unsigned int vectors_in_migration[NR_IRQS];
88 96
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 65bf9cd39044..d7396dbb07bb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -2074,22 +2074,16 @@ ia64_mca_init(void)
2074 printk(KERN_INFO "MCA related initialization done\n"); 2074 printk(KERN_INFO "MCA related initialization done\n");
2075} 2075}
2076 2076
2077
2077/* 2078/*
2078 * ia64_mca_late_init 2079 * These pieces cannot be done in ia64_mca_init() because it is called before
2079 * 2080 * early_irq_init() which would wipe out our percpu irq registrations. But we
2080 * Opportunity to setup things that require initialization later 2081 * cannot leave them until ia64_mca_late_init() because by then all the other
2081 * than ia64_mca_init. Setup a timer to poll for CPEs if the 2082 * processors have been brought online and have set their own CMC vectors to
2082 * platform doesn't support an interrupt driven mechanism. 2083 * point at a non-existant action. Called from arch_early_irq_init().
2083 *
2084 * Inputs : None
2085 * Outputs : Status
2086 */ 2084 */
2087static int __init 2085void __init ia64_mca_irq_init(void)
2088ia64_mca_late_init(void)
2089{ 2086{
2090 if (!mca_init)
2091 return 0;
2092
2093 /* 2087 /*
2094 * Configure the CMCI/P vector and handler. Interrupts for CMC are 2088 * Configure the CMCI/P vector and handler. Interrupts for CMC are
2095 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). 2089 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)
2108 /* Setup the CPEI/P handler */ 2102 /* Setup the CPEI/P handler */
2109 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 2103 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2110#endif 2104#endif
2105}
2106
2107/*
2108 * ia64_mca_late_init
2109 *
2110 * Opportunity to setup things that require initialization later
2111 * than ia64_mca_init. Setup a timer to poll for CPEs if the
2112 * platform doesn't support an interrupt driven mechanism.
2113 *
2114 * Inputs : None
2115 * Outputs : Status
2116 */
2117static int __init
2118ia64_mca_late_init(void)
2119{
2120 if (!mca_init)
2121 return 0;
2111 2122
2112 register_hotcpu_notifier(&mca_cpu_notifier); 2123 register_hotcpu_notifier(&mca_cpu_notifier);
2113 2124
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 9392e021c93b..94f8bf777afa 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -349,7 +349,7 @@ init_record_index_pools(void)
349 349
350 /* - 3 - */ 350 /* - 3 - */
351 slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; 351 slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
352 slidx_pool.buffer = (slidx_list_t *) 352 slidx_pool.buffer =
353 kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); 353 kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
354 354
355 return slidx_pool.buffer ? 0 : -ENOMEM; 355 return slidx_pool.buffer ? 0 : -ENOMEM;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 2eda28414abb..9ea25fce06d5 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -42,6 +42,7 @@
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/tracehook.h> 43#include <linux/tracehook.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/cpu.h>
45 46
46#include <asm/errno.h> 47#include <asm/errno.h>
47#include <asm/intrinsics.h> 48#include <asm/intrinsics.h>
@@ -1322,8 +1323,6 @@ out:
1322} 1323}
1323EXPORT_SYMBOL(pfm_unregister_buffer_fmt); 1324EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1324 1325
1325extern void update_pal_halt_status(int);
1326
1327static int 1326static int
1328pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) 1327pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1329{ 1328{
@@ -1371,9 +1370,9 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1371 cpu)); 1370 cpu));
1372 1371
1373 /* 1372 /*
1374 * disable default_idle() to go to PAL_HALT 1373 * Force idle() into poll mode
1375 */ 1374 */
1376 update_pal_halt_status(0); 1375 cpu_idle_poll_ctrl(true);
1377 1376
1378 UNLOCK_PFS(flags); 1377 UNLOCK_PFS(flags);
1379 1378
@@ -1430,11 +1429,8 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1430 is_syswide, 1429 is_syswide,
1431 cpu)); 1430 cpu));
1432 1431
1433 /* 1432 /* Undo forced polling. Last session reenables pal_halt */
1434 * if possible, enable default_idle() to go into PAL_HALT 1433 cpu_idle_poll_ctrl(false);
1435 */
1436 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1437 update_pal_halt_status(1);
1438 1434
1439 UNLOCK_PFS(flags); 1435 UNLOCK_PFS(flags);
1440 1436
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 6f7dc8b7b35c..55d4ba47a907 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -96,21 +96,13 @@ show_stack (struct task_struct *task, unsigned long *sp)
96} 96}
97 97
98void 98void
99dump_stack (void)
100{
101 show_stack(NULL, NULL);
102}
103
104EXPORT_SYMBOL(dump_stack);
105
106void
107show_regs (struct pt_regs *regs) 99show_regs (struct pt_regs *regs)
108{ 100{
109 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; 101 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
110 102
111 print_modules(); 103 print_modules();
112 printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), 104 printk("\n");
113 smp_processor_id(), current->comm); 105 show_regs_print_info(KERN_DEFAULT);
114 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", 106 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n",
115 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), 107 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
116 init_utsname()->release); 108 init_utsname()->release);
@@ -209,41 +201,13 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
209 local_irq_disable(); /* force interrupt disable */ 201 local_irq_disable(); /* force interrupt disable */
210} 202}
211 203
212static int pal_halt = 1;
213static int can_do_pal_halt = 1;
214
215static int __init nohalt_setup(char * str) 204static int __init nohalt_setup(char * str)
216{ 205{
217 pal_halt = can_do_pal_halt = 0; 206 cpu_idle_poll_ctrl(true);
218 return 1; 207 return 1;
219} 208}
220__setup("nohalt", nohalt_setup); 209__setup("nohalt", nohalt_setup);
221 210
222void
223update_pal_halt_status(int status)
224{
225 can_do_pal_halt = pal_halt && status;
226}
227
228/*
229 * We use this if we don't have any better idle routine..
230 */
231void
232default_idle (void)
233{
234 local_irq_enable();
235 while (!need_resched()) {
236 if (can_do_pal_halt) {
237 local_irq_disable();
238 if (!need_resched()) {
239 safe_halt();
240 }
241 local_irq_enable();
242 } else
243 cpu_relax();
244 }
245}
246
247#ifdef CONFIG_HOTPLUG_CPU 211#ifdef CONFIG_HOTPLUG_CPU
248/* We don't actually take CPU down, just spin without interrupts. */ 212/* We don't actually take CPU down, just spin without interrupts. */
249static inline void play_dead(void) 213static inline void play_dead(void)
@@ -270,47 +234,29 @@ static inline void play_dead(void)
270} 234}
271#endif /* CONFIG_HOTPLUG_CPU */ 235#endif /* CONFIG_HOTPLUG_CPU */
272 236
273void __attribute__((noreturn)) 237void arch_cpu_idle_dead(void)
274cpu_idle (void) 238{
239 play_dead();
240}
241
242void arch_cpu_idle(void)
275{ 243{
276 void (*mark_idle)(int) = ia64_mark_idle; 244 void (*mark_idle)(int) = ia64_mark_idle;
277 int cpu = smp_processor_id();
278
279 /* endless idle loop with no priority at all */
280 while (1) {
281 rcu_idle_enter();
282 if (can_do_pal_halt) {
283 current_thread_info()->status &= ~TS_POLLING;
284 /*
285 * TS_POLLING-cleared state must be visible before we
286 * test NEED_RESCHED:
287 */
288 smp_mb();
289 } else {
290 current_thread_info()->status |= TS_POLLING;
291 }
292 245
293 if (!need_resched()) {
294#ifdef CONFIG_SMP 246#ifdef CONFIG_SMP
295 min_xtp(); 247 min_xtp();
296#endif 248#endif
297 rmb(); 249 rmb();
298 if (mark_idle) 250 if (mark_idle)
299 (*mark_idle)(1); 251 (*mark_idle)(1);
300 252
301 default_idle(); 253 safe_halt();
302 if (mark_idle) 254
303 (*mark_idle)(0); 255 if (mark_idle)
256 (*mark_idle)(0);
304#ifdef CONFIG_SMP 257#ifdef CONFIG_SMP
305 normal_xtp(); 258 normal_xtp();
306#endif 259#endif
307 }
308 rcu_idle_exit();
309 schedule_preempt_disabled();
310 check_pgt_cache();
311 if (cpu_is_offline(cpu))
312 play_dead();
313 }
314} 260}
315 261
316void 262void
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 2029cc0d2fc6..13bfdd22afc8 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1063,6 +1063,7 @@ check_bugs (void)
1063static int __init run_dmi_scan(void) 1063static int __init run_dmi_scan(void)
1064{ 1064{
1065 dmi_scan_machine(); 1065 dmi_scan_machine();
1066 dmi_set_dump_stack_arch_desc();
1066 return 0; 1067 return 0;
1067} 1068}
1068core_initcall(run_dmi_scan); 1069core_initcall(run_dmi_scan);
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 500f1e4d9f9d..8d87168d218d 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -455,7 +455,7 @@ start_secondary (void *unused)
455 preempt_disable(); 455 preempt_disable();
456 smp_callin(); 456 smp_callin();
457 457
458 cpu_idle(); 458 cpu_startup_entry(CPUHP_ONLINE);
459 return 0; 459 return 0;
460} 460}
461 461
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 4332f7ee5203..a7869f8f49a6 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
256 "srlz.d;;" 256 "srlz.d;;"
257 "ssm psr.i;;" 257 "ssm psr.i;;"
258 "srlz.d;;" 258 "srlz.d;;"
259 : "=r"(ret) : "r"(iha), "r"(pte):"memory"); 259 : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
260 260
261 return ret; 261 return ret;
262} 262}
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 80dab509dfb0..67c59ebec899 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -47,6 +47,8 @@ void show_mem(unsigned int filter)
47 printk(KERN_INFO "Mem-info:\n"); 47 printk(KERN_INFO "Mem-info:\n");
48 show_free_areas(filter); 48 show_free_areas(filter);
49 printk(KERN_INFO "Node memory in pages:\n"); 49 printk(KERN_INFO "Node memory in pages:\n");
50 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
51 return;
50 for_each_online_pgdat(pgdat) { 52 for_each_online_pgdat(pgdat) {
51 unsigned long present; 53 unsigned long present;
52 unsigned long flags; 54 unsigned long flags;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index c2e955ee79a8..ae4db4bd6d97 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -623,6 +623,8 @@ void show_mem(unsigned int filter)
623 623
624 printk(KERN_INFO "Mem-info:\n"); 624 printk(KERN_INFO "Mem-info:\n");
625 show_free_areas(filter); 625 show_free_areas(filter);
626 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
627 return;
626 printk(KERN_INFO "Node memory in pages:\n"); 628 printk(KERN_INFO "Node memory in pages:\n");
627 for_each_online_pgdat(pgdat) { 629 for_each_online_pgdat(pgdat) {
628 unsigned long present; 630 unsigned long present;
@@ -817,13 +819,12 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
817#endif 819#endif
818 820
819#ifdef CONFIG_SPARSEMEM_VMEMMAP 821#ifdef CONFIG_SPARSEMEM_VMEMMAP
820int __meminit vmemmap_populate(struct page *start_page, 822int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
821 unsigned long size, int node)
822{ 823{
823 return vmemmap_populate_basepages(start_page, size, node); 824 return vmemmap_populate_basepages(start, end, node);
824} 825}
825 826
826void vmemmap_free(struct page *memmap, unsigned long nr_pages) 827void vmemmap_free(unsigned long start, unsigned long end)
827{ 828{
828} 829}
829#endif 830#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 20bc967c7209..d1fe4b402601 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -154,25 +154,14 @@ ia64_init_addr_space (void)
154void 154void
155free_initmem (void) 155free_initmem (void)
156{ 156{
157 unsigned long addr, eaddr; 157 free_reserved_area((unsigned long)ia64_imva(__init_begin),
158 158 (unsigned long)ia64_imva(__init_end),
159 addr = (unsigned long) ia64_imva(__init_begin); 159 0, "unused kernel");
160 eaddr = (unsigned long) ia64_imva(__init_end);
161 while (addr < eaddr) {
162 ClearPageReserved(virt_to_page(addr));
163 init_page_count(virt_to_page(addr));
164 free_page(addr);
165 ++totalram_pages;
166 addr += PAGE_SIZE;
167 }
168 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
169 (__init_end - __init_begin) >> 10);
170} 160}
171 161
172void __init 162void __init
173free_initrd_mem (unsigned long start, unsigned long end) 163free_initrd_mem (unsigned long start, unsigned long end)
174{ 164{
175 struct page *page;
176 /* 165 /*
177 * EFI uses 4KB pages while the kernel can use 4KB or bigger. 166 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
178 * Thus EFI and the kernel may have different page sizes. It is 167 * Thus EFI and the kernel may have different page sizes. It is
@@ -213,11 +202,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
213 for (; start < end; start += PAGE_SIZE) { 202 for (; start < end; start += PAGE_SIZE) {
214 if (!virt_addr_valid(start)) 203 if (!virt_addr_valid(start))
215 continue; 204 continue;
216 page = virt_to_page(start); 205 free_reserved_page(virt_to_page(start));
217 ClearPageReserved(page);
218 init_page_count(page);
219 free_page(start);
220 ++totalram_pages;
221 } 206 }
222} 207}
223 208
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 3dccdd8eb275..43964cde6214 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -16,7 +16,7 @@
16#include <asm/meminit.h> 16#include <asm/meminit.h>
17 17
18static inline void __iomem * 18static inline void __iomem *
19__ioremap (unsigned long phys_addr) 19__ioremap_uc(unsigned long phys_addr)
20{ 20{
21 return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); 21 return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
22} 22}
@@ -24,7 +24,11 @@ __ioremap (unsigned long phys_addr)
24void __iomem * 24void __iomem *
25early_ioremap (unsigned long phys_addr, unsigned long size) 25early_ioremap (unsigned long phys_addr, unsigned long size)
26{ 26{
27 return __ioremap(phys_addr); 27 u64 attr;
28 attr = kern_mem_attribute(phys_addr, size);
29 if (attr & EFI_MEMORY_WB)
30 return (void __iomem *) phys_to_virt(phys_addr);
31 return __ioremap_uc(phys_addr);
28} 32}
29 33
30void __iomem * 34void __iomem *
@@ -47,7 +51,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
47 if (attr & EFI_MEMORY_WB) 51 if (attr & EFI_MEMORY_WB)
48 return (void __iomem *) phys_to_virt(phys_addr); 52 return (void __iomem *) phys_to_virt(phys_addr);
49 else if (attr & EFI_MEMORY_UC) 53 else if (attr & EFI_MEMORY_UC)
50 return __ioremap(phys_addr); 54 return __ioremap_uc(phys_addr);
51 55
52 /* 56 /*
53 * Some chipsets don't support UC access to memory. If 57 * Some chipsets don't support UC access to memory. If
@@ -93,7 +97,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
93 return (void __iomem *) (offset + (char __iomem *)addr); 97 return (void __iomem *) (offset + (char __iomem *)addr);
94 } 98 }
95 99
96 return __ioremap(phys_addr); 100 return __ioremap_uc(phys_addr);
97} 101}
98EXPORT_SYMBOL(ioremap); 102EXPORT_SYMBOL(ioremap);
99 103
@@ -103,7 +107,7 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
103 if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) 107 if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
104 return NULL; 108 return NULL;
105 109
106 return __ioremap(phys_addr); 110 return __ioremap_uc(phys_addr);
107} 111}
108EXPORT_SYMBOL(ioremap_nocache); 112EXPORT_SYMBOL(ioremap_nocache);
109 113
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 3efea7d0a351..4248492b9321 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -61,18 +61,36 @@ paddr_to_nid(unsigned long paddr)
61int __meminit __early_pfn_to_nid(unsigned long pfn) 61int __meminit __early_pfn_to_nid(unsigned long pfn)
62{ 62{
63 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; 63 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
64 /*
65 * NOTE: The following SMP-unsafe globals are only used early in boot
66 * when the kernel is running single-threaded.
67 */
68 static int __meminitdata last_ssec, last_esec;
69 static int __meminitdata last_nid;
70
71 if (section >= last_ssec && section < last_esec)
72 return last_nid;
64 73
65 for (i = 0; i < num_node_memblks; i++) { 74 for (i = 0; i < num_node_memblks; i++) {
66 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; 75 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
67 esec = (node_memblk[i].start_paddr + node_memblk[i].size + 76 esec = (node_memblk[i].start_paddr + node_memblk[i].size +
68 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; 77 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
69 if (section >= ssec && section < esec) 78 if (section >= ssec && section < esec) {
79 last_ssec = ssec;
80 last_esec = esec;
81 last_nid = node_memblk[i].nid;
70 return node_memblk[i].nid; 82 return node_memblk[i].nid;
83 }
71 } 84 }
72 85
73 return -1; 86 return -1;
74} 87}
75 88
89void __cpuinit numa_clear_node(int cpu)
90{
91 unmap_cpu_from_node(cpu, NUMA_NO_NODE);
92}
93
76#ifdef CONFIG_MEMORY_HOTPLUG 94#ifdef CONFIG_MEMORY_HOTPLUG
77/* 95/*
78 * SRAT information is stored in node_memblk[], then we can use SRAT 96 * SRAT information is stored in node_memblk[], then we can use SRAT
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 60532ab27346..de1474ff0bc5 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/pci-acpi.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/ioport.h> 20#include <linux/ioport.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
@@ -458,6 +459,16 @@ void pcibios_fixup_bus(struct pci_bus *b)
458 platform_pci_fixup_bus(b); 459 platform_pci_fixup_bus(b);
459} 460}
460 461
462void pcibios_add_bus(struct pci_bus *bus)
463{
464 acpi_pci_add_bus(bus);
465}
466
467void pcibios_remove_bus(struct pci_bus *bus)
468{
469 acpi_pci_remove_bus(bus);
470}
471
461void pcibios_set_master (struct pci_dev *dev) 472void pcibios_set_master (struct pci_dev *dev)
462{ 473{
463 /* No special bus mastering setup handling */ 474 /* No special bus mastering setup handling */
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 14c1711238c0..e35f6485c1fd 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -490,11 +490,14 @@ static int __init tiocx_init(void)
490{ 490{
491 cnodeid_t cnodeid; 491 cnodeid_t cnodeid;
492 int found_tiocx_device = 0; 492 int found_tiocx_device = 0;
493 int err;
493 494
494 if (!ia64_platform_is("sn2")) 495 if (!ia64_platform_is("sn2"))
495 return 0; 496 return 0;
496 497
497 bus_register(&tiocx_bus_type); 498 err = bus_register(&tiocx_bus_type);
499 if (err)
500 return err;
498 501
499 for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) { 502 for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) {
500 nasid_t nasid; 503 nasid_t nasid;
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index 555629b05267..59db80193454 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -48,14 +48,4 @@
48#define __IGNORE_getresgid 48#define __IGNORE_getresgid
49#define __IGNORE_chown 49#define __IGNORE_chown
50 50
51/*
52 * "Conditional" syscalls
53 *
54 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
55 * but it doesn't work on all toolchains, so we just do it by hand
56 */
57#ifndef cond_syscall
58#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
59#endif
60
61#endif /* _ASM_M32R_UNISTD_H */ 51#endif /* _ASM_M32R_UNISTD_H */
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index bde899e155d3..e69221d581d5 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -47,24 +47,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
47void (*pm_power_off)(void) = NULL; 47void (*pm_power_off)(void) = NULL;
48EXPORT_SYMBOL(pm_power_off); 48EXPORT_SYMBOL(pm_power_off);
49 49
50/*
51 * The idle thread. There's no useful work to be
52 * done, so just try to conserve power and have a
53 * low exit latency (ie sit in a loop waiting for
54 * somebody to say that they'd like to reschedule)
55 */
56void cpu_idle (void)
57{
58 /* endless idle loop with no priority at all */
59 while (1) {
60 rcu_idle_enter();
61 while (!need_resched())
62 cpu_relax();
63 rcu_idle_exit();
64 schedule_preempt_disabled();
65 }
66}
67
68void machine_restart(char *__unused) 50void machine_restart(char *__unused)
69{ 51{
70#if defined(CONFIG_PLAT_MAPPI3) 52#if defined(CONFIG_PLAT_MAPPI3)
@@ -91,6 +73,8 @@ void machine_power_off(void)
91void show_regs(struct pt_regs * regs) 73void show_regs(struct pt_regs * regs)
92{ 74{
93 printk("\n"); 75 printk("\n");
76 show_regs_print_info(KERN_DEFAULT);
77
94 printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \ 78 printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \
95 regs->bpc, regs->psw, regs->lr, regs->fp); 79 regs->bpc, regs->psw, regs->lr, regs->fp);
96 printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \ 80 printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 13168a769f8f..0ac558adc605 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -432,7 +432,7 @@ int __init start_secondary(void *unused)
432 */ 432 */
433 local_flush_tlb_all(); 433 local_flush_tlb_all();
434 434
435 cpu_idle(); 435 cpu_startup_entry(CPUHP_ONLINE);
436 return 0; 436 return 0;
437} 437}
438 438
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 3bcb207e5b6d..a7a424f852e4 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -132,10 +132,8 @@ static void show_trace(struct task_struct *task, unsigned long *stack)
132 printk("Call Trace: "); 132 printk("Call Trace: ");
133 while (!kstack_end(stack)) { 133 while (!kstack_end(stack)) {
134 addr = *stack++; 134 addr = *stack++;
135 if (__kernel_text_address(addr)) { 135 if (__kernel_text_address(addr))
136 printk("[<%08lx>] ", addr); 136 printk("[<%08lx>] %pSR\n", addr, (void *)addr);
137 print_symbol("%s\n", addr);
138 }
139 } 137 }
140 printk("\n"); 138 printk("\n");
141} 139}
@@ -169,15 +167,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
169 show_trace(task, sp); 167 show_trace(task, sp);
170} 168}
171 169
172void dump_stack(void)
173{
174 unsigned long stack;
175
176 show_trace(current, &stack);
177}
178
179EXPORT_SYMBOL(dump_stack);
180
181static void show_registers(struct pt_regs *regs) 170static void show_registers(struct pt_regs *regs)
182{ 171{
183 int i = 0; 172 int i = 0;
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 78b660e903da..ab4cbce91a9b 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -28,10 +28,7 @@
28#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
29#include <asm/setup.h> 29#include <asm/setup.h>
30#include <asm/tlb.h> 30#include <asm/tlb.h>
31 31#include <asm/sections.h>
32/* References to section boundaries */
33extern char _text, _etext, _edata;
34extern char __init_begin, __init_end;
35 32
36pgd_t swapper_pg_dir[1024]; 33pgd_t swapper_pg_dir[1024];
37 34
@@ -184,17 +181,7 @@ void __init mem_init(void)
184 *======================================================================*/ 181 *======================================================================*/
185void free_initmem(void) 182void free_initmem(void)
186{ 183{
187 unsigned long addr; 184 free_initmem_default(0);
188
189 addr = (unsigned long)(&__init_begin);
190 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
191 ClearPageReserved(virt_to_page(addr));
192 init_page_count(virt_to_page(addr));
193 free_page(addr);
194 totalram_pages++;
195 }
196 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", \
197 (int)(&__init_end - &__init_begin) >> 10);
198} 185}
199 186
200#ifdef CONFIG_BLK_DEV_INITRD 187#ifdef CONFIG_BLK_DEV_INITRD
@@ -204,13 +191,6 @@ void free_initmem(void)
204 *======================================================================*/ 191 *======================================================================*/
205void free_initrd_mem(unsigned long start, unsigned long end) 192void free_initrd_mem(unsigned long start, unsigned long end)
206{ 193{
207 unsigned long p; 194 free_reserved_area(start, end, 0, "initrd");
208 for (p = start; p < end; p += PAGE_SIZE) {
209 ClearPageReserved(virt_to_page(p));
210 init_page_count(virt_to_page(p));
211 free_page(p);
212 totalram_pages++;
213 }
214 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
215} 195}
216#endif 196#endif
diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
index 93ef0346b209..675b087198f6 100644
--- a/arch/m68k/Kconfig.bus
+++ b/arch/m68k/Kconfig.bus
@@ -45,6 +45,16 @@ config ISA
45 (MCA) or VESA. ISA is an older system, now being displaced by PCI; 45 (MCA) or VESA. ISA is an older system, now being displaced by PCI;
46 newer boards don't support it. If you have ISA, say Y, otherwise N. 46 newer boards don't support it. If you have ISA, say Y, otherwise N.
47 47
48config ATARI_ROM_ISA
49 bool "Atari ROM port ISA adapter support"
50 depends on ATARI
51 help
52 This option enables support for the ROM port ISA adapter used to
53 operate ISA cards on Atari. Only 8 bit cards are supported, and
54 no interrupt lines are connected.
55 The only driver currently using this adapter is the EtherNEC
56 driver for RTL8019AS based NE2000 compatible network cards.
57
48config GENERIC_ISA_DMA 58config GENERIC_ISA_DMA
49 def_bool ISA 59 def_bool ISA
50 60
diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
index 4bc945dfe467..d163991c5717 100644
--- a/arch/m68k/Kconfig.devices
+++ b/arch/m68k/Kconfig.devices
@@ -55,6 +55,30 @@ config NFETH
55 which will emulate a regular ethernet device while presenting an 55 which will emulate a regular ethernet device while presenting an
56 ethertap device to the host system. 56 ethertap device to the host system.
57 57
58config ATARI_ETHERNAT
59 bool "Atari EtherNAT Ethernet support"
60 depends on ATARI
61 ---help---
62 Say Y to include support for the EtherNAT network adapter for the
63 CT/60 extension port.
64
65 To compile the actual ethernet driver, choose Y or M for the SMC91X
66 option in the network device section; the module will be called smc91x.
67
68config ATARI_ETHERNEC
69 bool "Atari EtherNEC Ethernet support"
70 depends on ATARI_ROM_ISA
71 ---help---
72 Say Y to include support for the EtherNEC network adapter for the
73 ROM port. The driver works by polling instead of interrupts, so it
74 is quite slow.
75
76 This driver also suppports the ethernet part of the NetUSBee ROM
77 port combined Ethernet/USB adapter.
78
79 To compile the actual ethernet driver, choose Y or M in for the NE2000
80 option in the network device section; the module will be called ne.
81
58endmenu 82endmenu
59 83
60menu "Character devices" 84menu "Character devices"
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 3f41092d1b70..20cde4e9fc77 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -49,6 +49,7 @@
49#include <asm/atari_stdma.h> 49#include <asm/atari_stdma.h>
50#include <asm/irq.h> 50#include <asm/irq.h>
51#include <asm/entry.h> 51#include <asm/entry.h>
52#include <asm/io.h>
52 53
53 54
54/* 55/*
@@ -122,6 +123,136 @@ static struct irq_chip atari_irq_chip = {
122}; 123};
123 124
124/* 125/*
126 * ST-MFP timer D chained interrupts - each driver gets its own timer
127 * interrupt instance.
128 */
129
130struct mfptimerbase {
131 volatile struct MFP *mfp;
132 unsigned char mfp_mask, mfp_data;
133 unsigned short int_mask;
134 int handler_irq, mfptimer_irq, server_irq;
135 char *name;
136} stmfp_base = {
137 .mfp = &st_mfp,
138 .int_mask = 0x0,
139 .handler_irq = IRQ_MFP_TIMD,
140 .mfptimer_irq = IRQ_MFP_TIMER1,
141 .name = "MFP Timer D"
142};
143
144static irqreturn_t mfptimer_handler(int irq, void *dev_id)
145{
146 struct mfptimerbase *base = dev_id;
147 int mach_irq;
148 unsigned char ints;
149
150 mach_irq = base->mfptimer_irq;
151 ints = base->int_mask;
152 for (; ints; mach_irq++, ints >>= 1) {
153 if (ints & 1)
154 generic_handle_irq(mach_irq);
155 }
156 return IRQ_HANDLED;
157}
158
159
160static void atari_mfptimer_enable(struct irq_data *data)
161{
162 int mfp_num = data->irq - IRQ_MFP_TIMER1;
163 stmfp_base.int_mask |= 1 << mfp_num;
164 atari_enable_irq(IRQ_MFP_TIMD);
165}
166
167static void atari_mfptimer_disable(struct irq_data *data)
168{
169 int mfp_num = data->irq - IRQ_MFP_TIMER1;
170 stmfp_base.int_mask &= ~(1 << mfp_num);
171 if (!stmfp_base.int_mask)
172 atari_disable_irq(IRQ_MFP_TIMD);
173}
174
175static struct irq_chip atari_mfptimer_chip = {
176 .name = "timer_d",
177 .irq_enable = atari_mfptimer_enable,
178 .irq_disable = atari_mfptimer_disable,
179};
180
181
182/*
183 * EtherNAT CPLD interrupt handling
184 * CPLD interrupt register is at phys. 0x80000023
185 * Need this mapped in at interrupt startup time
186 * Possibly need this mapped on demand anyway -
187 * EtherNAT USB driver needs to disable IRQ before
188 * startup!
189 */
190
191static unsigned char *enat_cpld;
192
193static unsigned int atari_ethernat_startup(struct irq_data *data)
194{
195 int enat_num = 140 - data->irq + 1;
196
197 m68k_irq_startup(data);
198 /*
199 * map CPLD interrupt register
200 */
201 if (!enat_cpld)
202 enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2);
203 /*
204 * do _not_ enable the USB chip interrupt here - causes interrupt storm
205 * and triggers dead interrupt watchdog
206 * Need to reset the USB chip to a sane state in early startup before
207 * removing this hack
208 */
209 if (enat_num == 1)
210 *enat_cpld |= 1 << enat_num;
211
212 return 0;
213}
214
215static void atari_ethernat_enable(struct irq_data *data)
216{
217 int enat_num = 140 - data->irq + 1;
218 /*
219 * map CPLD interrupt register
220 */
221 if (!enat_cpld)
222 enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2);
223 *enat_cpld |= 1 << enat_num;
224}
225
226static void atari_ethernat_disable(struct irq_data *data)
227{
228 int enat_num = 140 - data->irq + 1;
229 /*
230 * map CPLD interrupt register
231 */
232 if (!enat_cpld)
233 enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2);
234 *enat_cpld &= ~(1 << enat_num);
235}
236
237static void atari_ethernat_shutdown(struct irq_data *data)
238{
239 int enat_num = 140 - data->irq + 1;
240 if (enat_cpld) {
241 *enat_cpld &= ~(1 << enat_num);
242 iounmap(enat_cpld);
243 enat_cpld = NULL;
244 }
245}
246
247static struct irq_chip atari_ethernat_chip = {
248 .name = "ethernat",
249 .irq_startup = atari_ethernat_startup,
250 .irq_shutdown = atari_ethernat_shutdown,
251 .irq_enable = atari_ethernat_enable,
252 .irq_disable = atari_ethernat_disable,
253};
254
255/*
125 * void atari_init_IRQ (void) 256 * void atari_init_IRQ (void)
126 * 257 *
127 * Parameters: None 258 * Parameters: None
@@ -198,6 +329,27 @@ void __init atari_init_IRQ(void)
198 /* Initialize the PSG: all sounds off, both ports output */ 329 /* Initialize the PSG: all sounds off, both ports output */
199 sound_ym.rd_data_reg_sel = 7; 330 sound_ym.rd_data_reg_sel = 7;
200 sound_ym.wd_data = 0xff; 331 sound_ym.wd_data = 0xff;
332
333 m68k_setup_irq_controller(&atari_mfptimer_chip, handle_simple_irq,
334 IRQ_MFP_TIMER1, 8);
335
336 /* prepare timer D data for use as poll interrupt */
337 /* set Timer D data Register - needs to be > 0 */
338 st_mfp.tim_dt_d = 254; /* < 100 Hz */
339 /* start timer D, div = 1:100 */
340 st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 0xf0) | 0x6;
341
342 /* request timer D dispatch handler */
343 if (request_irq(IRQ_MFP_TIMD, mfptimer_handler, IRQF_SHARED,
344 stmfp_base.name, &stmfp_base))
345 pr_err("Couldn't register %s interrupt\n", stmfp_base.name);
346
347 /*
348 * EtherNAT ethernet / USB interrupt handlers
349 */
350
351 m68k_setup_irq_controller(&atari_ethernat_chip, handle_simple_irq,
352 139, 2);
201} 353}
202 354
203 355
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 037c11c99331..fb2d0bd9b3ad 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -31,6 +31,8 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/ioport.h> 33#include <linux/ioport.h>
34#include <linux/platform_device.h>
35#include <linux/usb/isp116x.h>
34#include <linux/vt_kern.h> 36#include <linux/vt_kern.h>
35#include <linux/module.h> 37#include <linux/module.h>
36 38
@@ -655,3 +657,240 @@ static void atari_get_hardware_list(struct seq_file *m)
655 ATARIHW_ANNOUNCE(VME, "VME Bus"); 657 ATARIHW_ANNOUNCE(VME, "VME Bus");
656 ATARIHW_ANNOUNCE(DSP56K, "DSP56001 processor"); 658 ATARIHW_ANNOUNCE(DSP56K, "DSP56001 processor");
657} 659}
660
661/*
662 * MSch: initial platform device support for Atari,
663 * required for EtherNAT/EtherNEC/NetUSBee drivers
664 */
665
666#if defined(CONFIG_ATARI_ETHERNAT) || defined(CONFIG_ATARI_ETHERNEC)
667static void isp1160_delay(struct device *dev, int delay)
668{
669 ndelay(delay);
670}
671#endif
672
673#ifdef CONFIG_ATARI_ETHERNAT
674/*
675 * EtherNAT: SMC91C111 Ethernet chipset, handled by smc91x driver
676 */
677
678#define ATARI_ETHERNAT_IRQ 140
679
680static struct resource smc91x_resources[] = {
681 [0] = {
682 .name = "smc91x-regs",
683 .start = ATARI_ETHERNAT_PHYS_ADDR,
684 .end = ATARI_ETHERNAT_PHYS_ADDR + 0xfffff,
685 .flags = IORESOURCE_MEM,
686 },
687 [1] = {
688 .name = "smc91x-irq",
689 .start = ATARI_ETHERNAT_IRQ,
690 .end = ATARI_ETHERNAT_IRQ,
691 .flags = IORESOURCE_IRQ,
692 },
693};
694
695static struct platform_device smc91x_device = {
696 .name = "smc91x",
697 .id = -1,
698 .num_resources = ARRAY_SIZE(smc91x_resources),
699 .resource = smc91x_resources,
700};
701
702/*
703 * ISP 1160 - using the isp116x-hcd module
704 */
705
706#define ATARI_USB_PHYS_ADDR 0x80000012
707#define ATARI_USB_IRQ 139
708
709static struct resource isp1160_resources[] = {
710 [0] = {
711 .name = "isp1160-data",
712 .start = ATARI_USB_PHYS_ADDR,
713 .end = ATARI_USB_PHYS_ADDR + 0x1,
714 .flags = IORESOURCE_MEM,
715 },
716 [1] = {
717 .name = "isp1160-regs",
718 .start = ATARI_USB_PHYS_ADDR + 0x4,
719 .end = ATARI_USB_PHYS_ADDR + 0x5,
720 .flags = IORESOURCE_MEM,
721 },
722 [2] = {
723 .name = "isp1160-irq",
724 .start = ATARI_USB_IRQ,
725 .end = ATARI_USB_IRQ,
726 .flags = IORESOURCE_IRQ,
727 },
728};
729
730/* (DataBusWidth16|AnalogOCEnable|DREQOutputPolarity|DownstreamPort15KRSel ) */
731static struct isp116x_platform_data isp1160_platform_data = {
732 /* Enable internal resistors on downstream ports */
733 .sel15Kres = 1,
734 /* On-chip overcurrent protection */
735 .oc_enable = 1,
736 /* INT output polarity */
737 .int_act_high = 1,
738 /* INT edge or level triggered */
739 .int_edge_triggered = 0,
740
741 /* WAKEUP pin connected - NOT SUPPORTED */
742 /* .remote_wakeup_connected = 0, */
743 /* Wakeup by devices on usb bus enabled */
744 .remote_wakeup_enable = 0,
745 .delay = isp1160_delay,
746};
747
748static struct platform_device isp1160_device = {
749 .name = "isp116x-hcd",
750 .id = 0,
751 .num_resources = ARRAY_SIZE(isp1160_resources),
752 .resource = isp1160_resources,
753 .dev = {
754 .platform_data = &isp1160_platform_data,
755 },
756};
757
758static struct platform_device *atari_ethernat_devices[] __initdata = {
759 &smc91x_device,
760 &isp1160_device
761};
762#endif /* CONFIG_ATARI_ETHERNAT */
763
764#ifdef CONFIG_ATARI_ETHERNEC
765/*
766 * EtherNEC: RTL8019 (NE2000 compatible) Ethernet chipset,
767 * handled by ne.c driver
768 */
769
770#define ATARI_ETHERNEC_PHYS_ADDR 0xfffa0000
771#define ATARI_ETHERNEC_BASE 0x300
772#define ATARI_ETHERNEC_IRQ IRQ_MFP_TIMER1
773
774static struct resource rtl8019_resources[] = {
775 [0] = {
776 .name = "rtl8019-regs",
777 .start = ATARI_ETHERNEC_BASE,
778 .end = ATARI_ETHERNEC_BASE + 0x20 - 1,
779 .flags = IORESOURCE_IO,
780 },
781 [1] = {
782 .name = "rtl8019-irq",
783 .start = ATARI_ETHERNEC_IRQ,
784 .end = ATARI_ETHERNEC_IRQ,
785 .flags = IORESOURCE_IRQ,
786 },
787};
788
789static struct platform_device rtl8019_device = {
790 .name = "ne",
791 .id = -1,
792 .num_resources = ARRAY_SIZE(rtl8019_resources),
793 .resource = rtl8019_resources,
794};
795
796/*
797 * NetUSBee: ISP1160 USB host adapter via ROM-port adapter
798 */
799
800#define ATARI_NETUSBEE_PHYS_ADDR 0xfffa8000
801#define ATARI_NETUSBEE_BASE 0x340
802#define ATARI_NETUSBEE_IRQ IRQ_MFP_TIMER2
803
804static struct resource netusbee_resources[] = {
805 [0] = {
806 .name = "isp1160-data",
807 .start = ATARI_NETUSBEE_BASE,
808 .end = ATARI_NETUSBEE_BASE + 0x1,
809 .flags = IORESOURCE_MEM,
810 },
811 [1] = {
812 .name = "isp1160-regs",
813 .start = ATARI_NETUSBEE_BASE + 0x20,
814 .end = ATARI_NETUSBEE_BASE + 0x21,
815 .flags = IORESOURCE_MEM,
816 },
817 [2] = {
818 .name = "isp1160-irq",
819 .start = ATARI_NETUSBEE_IRQ,
820 .end = ATARI_NETUSBEE_IRQ,
821 .flags = IORESOURCE_IRQ,
822 },
823};
824
825/* (DataBusWidth16|AnalogOCEnable|DREQOutputPolarity|DownstreamPort15KRSel ) */
826static struct isp116x_platform_data netusbee_platform_data = {
827 /* Enable internal resistors on downstream ports */
828 .sel15Kres = 1,
829 /* On-chip overcurrent protection */
830 .oc_enable = 1,
831 /* INT output polarity */
832 .int_act_high = 1,
833 /* INT edge or level triggered */
834 .int_edge_triggered = 0,
835
836 /* WAKEUP pin connected - NOT SUPPORTED */
837 /* .remote_wakeup_connected = 0, */
838 /* Wakeup by devices on usb bus enabled */
839 .remote_wakeup_enable = 0,
840 .delay = isp1160_delay,
841};
842
843static struct platform_device netusbee_device = {
844 .name = "isp116x-hcd",
845 .id = 1,
846 .num_resources = ARRAY_SIZE(netusbee_resources),
847 .resource = netusbee_resources,
848 .dev = {
849 .platform_data = &netusbee_platform_data,
850 },
851};
852
853static struct platform_device *atari_netusbee_devices[] __initdata = {
854 &rtl8019_device,
855 &netusbee_device
856};
857#endif /* CONFIG_ATARI_ETHERNEC */
858
859int __init atari_platform_init(void)
860{
861 int rv = 0;
862
863 if (!MACH_IS_ATARI)
864 return -ENODEV;
865
866#ifdef CONFIG_ATARI_ETHERNAT
867 {
868 unsigned char *enatc_virt;
869 enatc_virt = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0xf);
870 if (hwreg_present(enatc_virt)) {
871 rv = platform_add_devices(atari_ethernat_devices,
872 ARRAY_SIZE(atari_ethernat_devices));
873 }
874 iounmap(enatc_virt);
875 }
876#endif
877
878#ifdef CONFIG_ATARI_ETHERNEC
879 {
880 int error;
881 unsigned char *enec_virt;
882 enec_virt = (unsigned char *)ioremap((ATARI_ETHERNEC_PHYS_ADDR), 0xf);
883 if (hwreg_present(enec_virt)) {
884 error = platform_add_devices(atari_netusbee_devices,
885 ARRAY_SIZE(atari_netusbee_devices));
886 if (error && !rv)
887 rv = error;
888 }
889 iounmap(enec_virt);
890 }
891#endif
892
893 return rv;
894}
895
896arch_initcall(atari_platform_init);
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index c0cb36350775..d887050e6da6 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -805,5 +805,11 @@ struct MSTE_RTC {
805 805
806#define mste_rtc ((*(volatile struct MSTE_RTC *)MSTE_RTC_BAS)) 806#define mste_rtc ((*(volatile struct MSTE_RTC *)MSTE_RTC_BAS))
807 807
808/*
809** EtherNAT add-on card for Falcon - combined ethernet and USB adapter
810*/
811
812#define ATARI_ETHERNAT_PHYS_ADDR 0x80000000
813
808#endif /* linux/atarihw.h */ 814#endif /* linux/atarihw.h */
809 815
diff --git a/arch/m68k/include/asm/atariints.h b/arch/m68k/include/asm/atariints.h
index 5fc13bdf9044..953e0ac6855e 100644
--- a/arch/m68k/include/asm/atariints.h
+++ b/arch/m68k/include/asm/atariints.h
@@ -32,7 +32,7 @@
32#define VME_SOURCE_BASE 56 32#define VME_SOURCE_BASE 56
33#define VME_MAX_SOURCES 16 33#define VME_MAX_SOURCES 16
34 34
35#define NUM_ATARI_SOURCES (VME_SOURCE_BASE+VME_MAX_SOURCES-STMFP_SOURCE_BASE) 35#define NUM_ATARI_SOURCES 141
36 36
37/* convert vector number to int source number */ 37/* convert vector number to int source number */
38#define IRQ_VECTOR_TO_SOURCE(v) ((v) - ((v) < 0x20 ? 0x18 : (0x40-8))) 38#define IRQ_VECTOR_TO_SOURCE(v) ((v) - ((v) < 0x20 ? 0x18 : (0x40-8)))
@@ -94,6 +94,15 @@
94#define IRQ_SCCA_RX (52) 94#define IRQ_SCCA_RX (52)
95#define IRQ_SCCA_SPCOND (54) 95#define IRQ_SCCA_SPCOND (54)
96 96
97/* shared MFP timer D interrupts - hires timer for EtherNEC et al. */
98#define IRQ_MFP_TIMER1 (64)
99#define IRQ_MFP_TIMER2 (65)
100#define IRQ_MFP_TIMER3 (66)
101#define IRQ_MFP_TIMER4 (67)
102#define IRQ_MFP_TIMER5 (68)
103#define IRQ_MFP_TIMER6 (69)
104#define IRQ_MFP_TIMER7 (70)
105#define IRQ_MFP_TIMER8 (71)
97 106
98#define INT_CLK 24576 /* CLK while int_clk =2.456MHz and divide = 100 */ 107#define INT_CLK 24576 /* CLK while int_clk =2.456MHz and divide = 100 */
99#define INT_TICKS 246 /* to make sched_time = 99.902... HZ */ 108#define INT_TICKS 246 /* to make sched_time = 99.902... HZ */
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index 5c81d0eae5cf..bc755bc620ad 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -124,6 +124,9 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
124#define cmpxchg_local(ptr, o, n) \ 124#define cmpxchg_local(ptr, o, n) \
125 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ 125 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
126 (unsigned long)(n), sizeof(*(ptr)))) 126 (unsigned long)(n), sizeof(*(ptr))))
127
128#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
129
127#else 130#else
128 131
129/* 132/*
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index 12d8fe4f1d30..d28fa8fe26fe 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -92,5 +92,28 @@ static inline void __udelay(unsigned long usecs)
92#define udelay(n) (__builtin_constant_p(n) ? \ 92#define udelay(n) (__builtin_constant_p(n) ? \
93 ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n)) 93 ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
94 94
95/*
96 * nanosecond delay:
97 *
98 * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of loops
99 * per microsecond
100 *
101 * 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of
102 * nanoseconds per loop
103 *
104 * So n / ( 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) ) would
105 * be the number of loops for n nanoseconds
106 */
107
108/*
109 * The simpler m68k and ColdFire processors do not have a 32*32->64
110 * multiply instruction. So we need to handle them a little differently.
111 * We use a bit of shifting and a single 32*32->32 multiply to get close.
112 * This is a macro so that the const version can factor out the first
113 * multiply and shift.
114 */
115#define HZSCALE (268435456 / (1000000 / HZ))
116
117#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000));
95 118
96#endif /* defined(_M68K_DELAY_H) */ 119#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index a6686d26fe17..ffdf54f44bc6 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -63,6 +63,23 @@
63#endif 63#endif
64#endif /* AMIGA_PCMCIA */ 64#endif /* AMIGA_PCMCIA */
65 65
66#ifdef CONFIG_ATARI_ROM_ISA
67
68#define enec_isa_read_base 0xfffa0000
69#define enec_isa_write_base 0xfffb0000
70
71#define ENEC_ISA_IO_B(ioaddr) (enec_isa_read_base+((((unsigned long)(ioaddr))&0x7F)<<9))
72#define ENEC_ISA_IO_W(ioaddr) (enec_isa_read_base+((((unsigned long)(ioaddr))&0x7F)<<9))
73#define ENEC_ISA_MEM_B(madr) (enec_isa_read_base+((((unsigned long)(madr))&0x7F)<<9))
74#define ENEC_ISA_MEM_W(madr) (enec_isa_read_base+((((unsigned long)(madr))&0x7F)<<9))
75
76#ifndef MULTI_ISA
77#define MULTI_ISA 0
78#else
79#undef MULTI_ISA
80#define MULTI_ISA 1
81#endif
82#endif /* ATARI_ROM_ISA */
66 83
67 84
68#if defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE) 85#if defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE)
@@ -111,14 +128,15 @@ void mcf_pci_outsl(u32 addr, const u32 *buf, u32 len);
111#define readw(addr) in_le16(addr) 128#define readw(addr) in_le16(addr)
112#define writew(v, addr) out_le16((addr), (v)) 129#define writew(v, addr) out_le16((addr), (v))
113 130
114#elif defined(CONFIG_ISA) 131#elif defined(CONFIG_ISA) || defined(CONFIG_ATARI_ROM_ISA)
115 132
116#if MULTI_ISA == 0 133#if MULTI_ISA == 0
117#undef MULTI_ISA 134#undef MULTI_ISA
118#endif 135#endif
119 136
120#define ISA_TYPE_Q40 (1) 137#define ISA_TYPE_Q40 (1)
121#define ISA_TYPE_AG (2) 138#define ISA_TYPE_AG (2)
139#define ISA_TYPE_ENEC (3)
122 140
123#if defined(CONFIG_Q40) && !defined(MULTI_ISA) 141#if defined(CONFIG_Q40) && !defined(MULTI_ISA)
124#define ISA_TYPE ISA_TYPE_Q40 142#define ISA_TYPE ISA_TYPE_Q40
@@ -128,6 +146,10 @@ void mcf_pci_outsl(u32 addr, const u32 *buf, u32 len);
128#define ISA_TYPE ISA_TYPE_AG 146#define ISA_TYPE ISA_TYPE_AG
129#define ISA_SEX 1 147#define ISA_SEX 1
130#endif 148#endif
149#if defined(CONFIG_ATARI_ROM_ISA) && !defined(MULTI_ISA)
150#define ISA_TYPE ISA_TYPE_ENEC
151#define ISA_SEX 0
152#endif
131 153
132#ifdef MULTI_ISA 154#ifdef MULTI_ISA
133extern int isa_type; 155extern int isa_type;
@@ -152,6 +174,9 @@ static inline u8 __iomem *isa_itb(unsigned long addr)
152#ifdef CONFIG_AMIGA_PCMCIA 174#ifdef CONFIG_AMIGA_PCMCIA
153 case ISA_TYPE_AG: return (u8 __iomem *)AG_ISA_IO_B(addr); 175 case ISA_TYPE_AG: return (u8 __iomem *)AG_ISA_IO_B(addr);
154#endif 176#endif
177#ifdef CONFIG_ATARI_ROM_ISA
178 case ISA_TYPE_ENEC: return (u8 __iomem *)ENEC_ISA_IO_B(addr);
179#endif
155 default: return NULL; /* avoid warnings, just in case */ 180 default: return NULL; /* avoid warnings, just in case */
156 } 181 }
157} 182}
@@ -165,6 +190,9 @@ static inline u16 __iomem *isa_itw(unsigned long addr)
165#ifdef CONFIG_AMIGA_PCMCIA 190#ifdef CONFIG_AMIGA_PCMCIA
166 case ISA_TYPE_AG: return (u16 __iomem *)AG_ISA_IO_W(addr); 191 case ISA_TYPE_AG: return (u16 __iomem *)AG_ISA_IO_W(addr);
167#endif 192#endif
193#ifdef CONFIG_ATARI_ROM_ISA
194 case ISA_TYPE_ENEC: return (u16 __iomem *)ENEC_ISA_IO_W(addr);
195#endif
168 default: return NULL; /* avoid warnings, just in case */ 196 default: return NULL; /* avoid warnings, just in case */
169 } 197 }
170} 198}
@@ -188,6 +216,9 @@ static inline u8 __iomem *isa_mtb(unsigned long addr)
188#ifdef CONFIG_AMIGA_PCMCIA 216#ifdef CONFIG_AMIGA_PCMCIA
189 case ISA_TYPE_AG: return (u8 __iomem *)addr; 217 case ISA_TYPE_AG: return (u8 __iomem *)addr;
190#endif 218#endif
219#ifdef CONFIG_ATARI_ROM_ISA
220 case ISA_TYPE_ENEC: return (u8 __iomem *)ENEC_ISA_MEM_B(addr);
221#endif
191 default: return NULL; /* avoid warnings, just in case */ 222 default: return NULL; /* avoid warnings, just in case */
192 } 223 }
193} 224}
@@ -201,6 +232,9 @@ static inline u16 __iomem *isa_mtw(unsigned long addr)
201#ifdef CONFIG_AMIGA_PCMCIA 232#ifdef CONFIG_AMIGA_PCMCIA
202 case ISA_TYPE_AG: return (u16 __iomem *)addr; 233 case ISA_TYPE_AG: return (u16 __iomem *)addr;
203#endif 234#endif
235#ifdef CONFIG_ATARI_ROM_ISA
236 case ISA_TYPE_ENEC: return (u16 __iomem *)ENEC_ISA_MEM_W(addr);
237#endif
204 default: return NULL; /* avoid warnings, just in case */ 238 default: return NULL; /* avoid warnings, just in case */
205 } 239 }
206} 240}
@@ -222,6 +256,36 @@ static inline u16 __iomem *isa_mtw(unsigned long addr)
222 (ISA_SEX ? out_be16(isa_mtw((unsigned long)(p)),(val)) \ 256 (ISA_SEX ? out_be16(isa_mtw((unsigned long)(p)),(val)) \
223 : out_le16(isa_mtw((unsigned long)(p)),(val))) 257 : out_le16(isa_mtw((unsigned long)(p)),(val)))
224 258
259#ifdef CONFIG_ATARI_ROM_ISA
260#define isa_rom_inb(port) rom_in_8(isa_itb(port))
261#define isa_rom_inw(port) \
262 (ISA_SEX ? rom_in_be16(isa_itw(port)) \
263 : rom_in_le16(isa_itw(port)))
264
265#define isa_rom_outb(val, port) rom_out_8(isa_itb(port), (val))
266#define isa_rom_outw(val, port) \
267 (ISA_SEX ? rom_out_be16(isa_itw(port), (val)) \
268 : rom_out_le16(isa_itw(port), (val)))
269
270#define isa_rom_readb(p) rom_in_8(isa_mtb((unsigned long)(p)))
271#define isa_rom_readw(p) \
272 (ISA_SEX ? rom_in_be16(isa_mtw((unsigned long)(p))) \
273 : rom_in_le16(isa_mtw((unsigned long)(p))))
274#define isa_rom_readw_swap(p) \
275 (ISA_SEX ? rom_in_le16(isa_mtw((unsigned long)(p))) \
276 : rom_in_be16(isa_mtw((unsigned long)(p))))
277#define isa_rom_readw_raw(p) rom_in_be16(isa_mtw((unsigned long)(p)))
278
279#define isa_rom_writeb(val, p) rom_out_8(isa_mtb((unsigned long)(p)), (val))
280#define isa_rom_writew(val, p) \
281 (ISA_SEX ? rom_out_be16(isa_mtw((unsigned long)(p)), (val)) \
282 : rom_out_le16(isa_mtw((unsigned long)(p)), (val)))
283#define isa_rom_writew_swap(val, p) \
284 (ISA_SEX ? rom_out_le16(isa_mtw((unsigned long)(p)), (val)) \
285 : rom_out_be16(isa_mtw((unsigned long)(p)), (val)))
286#define isa_rom_writew_raw(val, p) rom_out_be16(isa_mtw((unsigned long)(p)), (val))
287#endif /* CONFIG_ATARI_ROM_ISA */
288
225static inline void isa_delay(void) 289static inline void isa_delay(void)
226{ 290{
227 switch(ISA_TYPE) 291 switch(ISA_TYPE)
@@ -232,6 +296,9 @@ static inline void isa_delay(void)
232#ifdef CONFIG_AMIGA_PCMCIA 296#ifdef CONFIG_AMIGA_PCMCIA
233 case ISA_TYPE_AG: break; 297 case ISA_TYPE_AG: break;
234#endif 298#endif
299#ifdef CONFIG_ATARI_ROM_ISA
300 case ISA_TYPE_ENEC: break;
301#endif
235 default: break; /* avoid warnings */ 302 default: break; /* avoid warnings */
236 } 303 }
237} 304}
@@ -263,6 +330,29 @@ static inline void isa_delay(void)
263 raw_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr)<<1)) 330 raw_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr)<<1))
264 331
265 332
333#ifdef CONFIG_ATARI_ROM_ISA
334#define isa_rom_inb_p(p) ({ u8 _v = isa_rom_inb(p); isa_delay(); _v; })
335#define isa_rom_inw_p(p) ({ u16 _v = isa_rom_inw(p); isa_delay(); _v; })
336#define isa_rom_outb_p(v, p) ({ isa_rom_outb((v), (p)); isa_delay(); })
337#define isa_rom_outw_p(v, p) ({ isa_rom_outw((v), (p)); isa_delay(); })
338
339#define isa_rom_insb(port, buf, nr) raw_rom_insb(isa_itb(port), (u8 *)(buf), (nr))
340
341#define isa_rom_insw(port, buf, nr) \
342 (ISA_SEX ? raw_rom_insw(isa_itw(port), (u16 *)(buf), (nr)) : \
343 raw_rom_insw_swapw(isa_itw(port), (u16 *)(buf), (nr)))
344
345#define isa_rom_outsb(port, buf, nr) raw_rom_outsb(isa_itb(port), (u8 *)(buf), (nr))
346
347#define isa_rom_outsw(port, buf, nr) \
348 (ISA_SEX ? raw_rom_outsw(isa_itw(port), (u16 *)(buf), (nr)) : \
349 raw_rom_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr)))
350#endif /* CONFIG_ATARI_ROM_ISA */
351
352#endif /* CONFIG_ISA || CONFIG_ATARI_ROM_ISA */
353
354
355#if defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
266#define inb isa_inb 356#define inb isa_inb
267#define inb_p isa_inb_p 357#define inb_p isa_inb_p
268#define outb isa_outb 358#define outb isa_outb
@@ -285,9 +375,43 @@ static inline void isa_delay(void)
285#define readw isa_readw 375#define readw isa_readw
286#define writeb isa_writeb 376#define writeb isa_writeb
287#define writew isa_writew 377#define writew isa_writew
378#endif /* CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
288 379
289#else /* CONFIG_ISA */ 380#ifdef CONFIG_ATARI_ROM_ISA
290 381/*
382 * kernel with both ROM port ISA and IDE compiled in, those have
383 * conflicting defs for in/out. Simply consider port < 1024
384 * ROM port ISA and everything else regular ISA for IDE. read,write defined
385 * below.
386 */
387#define inb(port) ((port) < 1024 ? isa_rom_inb(port) : in_8(port))
388#define inb_p(port) ((port) < 1024 ? isa_rom_inb_p(port) : in_8(port))
389#define inw(port) ((port) < 1024 ? isa_rom_inw(port) : in_le16(port))
390#define inw_p(port) ((port) < 1024 ? isa_rom_inw_p(port) : in_le16(port))
391#define inl isa_inl
392#define inl_p isa_inl_p
393
394#define outb(val, port) ((port) < 1024 ? isa_rom_outb((val), (port)) : out_8((port), (val)))
395#define outb_p(val, port) ((port) < 1024 ? isa_rom_outb_p((val), (port)) : out_8((port), (val)))
396#define outw(val, port) ((port) < 1024 ? isa_rom_outw((val), (port)) : out_le16((port), (val)))
397#define outw_p(val, port) ((port) < 1024 ? isa_rom_outw_p((val), (port)) : out_le16((port), (val)))
398#define outl isa_outl
399#define outl_p isa_outl_p
400
401#define insb(port, buf, nr) ((port) < 1024 ? isa_rom_insb((port), (buf), (nr)) : isa_insb((port), (buf), (nr)))
402#define insw(port, buf, nr) ((port) < 1024 ? isa_rom_insw((port), (buf), (nr)) : isa_insw((port), (buf), (nr)))
403#define insl isa_insl
404#define outsb(port, buf, nr) ((port) < 1024 ? isa_rom_outsb((port), (buf), (nr)) : isa_outsb((port), (buf), (nr)))
405#define outsw(port, buf, nr) ((port) < 1024 ? isa_rom_outsw((port), (buf), (nr)) : isa_outsw((port), (buf), (nr)))
406#define outsl isa_outsl
407
408#define readb(addr) in_8(addr)
409#define writeb(val, addr) out_8((addr), (val))
410#define readw(addr) in_le16(addr)
411#define writew(val, addr) out_le16((addr), (val))
412#endif /* CONFIG_ATARI_ROM_ISA */
413
414#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
291/* 415/*
292 * We need to define dummy functions for GENERIC_IOMAP support. 416 * We need to define dummy functions for GENERIC_IOMAP support.
293 */ 417 */
@@ -319,7 +443,7 @@ static inline void isa_delay(void)
319#define readw(addr) in_le16(addr) 443#define readw(addr) in_le16(addr)
320#define writew(val,addr) out_le16((addr),(val)) 444#define writew(val,addr) out_le16((addr),(val))
321 445
322#endif /* CONFIG_ISA */ 446#endif /* !CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
323 447
324#define readl(addr) in_le32(addr) 448#define readl(addr) in_le32(addr)
325#define writel(val,addr) out_le32((addr),(val)) 449#define writel(val,addr) out_le32((addr),(val))
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index c1155f0e22cc..81ca118d58af 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -6,12 +6,16 @@
6 * different m68k hosts compiled into the kernel. 6 * different m68k hosts compiled into the kernel.
7 * Currently the Atari has 72 and the Amiga 24, but if both are 7 * Currently the Atari has 72 and the Amiga 24, but if both are
8 * supported in the kernel it is better to make room for 72. 8 * supported in the kernel it is better to make room for 72.
9 * With EtherNAT add-on card on Atari, the highest interrupt
10 * number is 140 so NR_IRQS needs to be 141.
9 */ 11 */
10#if defined(CONFIG_COLDFIRE) 12#if defined(CONFIG_COLDFIRE)
11#define NR_IRQS 256 13#define NR_IRQS 256
12#elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X) 14#elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
13#define NR_IRQS 200 15#define NR_IRQS 200
14#elif defined(CONFIG_ATARI) || defined(CONFIG_MAC) 16#elif defined(CONFIG_ATARI)
17#define NR_IRQS 141
18#elif defined(CONFIG_MAC)
15#define NR_IRQS 72 19#define NR_IRQS 72
16#elif defined(CONFIG_Q40) 20#elif defined(CONFIG_Q40)
17#define NR_IRQS 43 21#define NR_IRQS 43
diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h
index d9eb9834ccc8..932faa35655b 100644
--- a/arch/m68k/include/asm/raw_io.h
+++ b/arch/m68k/include/asm/raw_io.h
@@ -10,7 +10,7 @@
10 10
11#ifdef __KERNEL__ 11#ifdef __KERNEL__
12 12
13#include <asm/types.h> 13#include <asm/byteorder.h>
14 14
15 15
16/* Values for nocacheflag and cmode */ 16/* Values for nocacheflag and cmode */
@@ -60,6 +60,57 @@ extern void __iounmap(void *addr, unsigned long size);
60#define __raw_writew(val,addr) out_be16((addr),(val)) 60#define __raw_writew(val,addr) out_be16((addr),(val))
61#define __raw_writel(val,addr) out_be32((addr),(val)) 61#define __raw_writel(val,addr) out_be32((addr),(val))
62 62
63/*
64 * Atari ROM port (cartridge port) ISA adapter, used for the EtherNEC NE2000
65 * network card driver.
66 * The ISA adapter connects address lines A9-A13 to ISA address lines A0-A4,
67 * and hardwires the rest of the ISA addresses for a base address of 0x300.
68 *
69 * Data lines D8-D15 are connected to ISA data lines D0-D7 for reading.
70 * For writes, address lines A1-A8 are latched to ISA data lines D0-D7
71 * (meaning the bit pattern on A1-A8 can be read back as byte).
72 *
73 * Read and write operations are distinguished by the base address used:
74 * reads are from the ROM A side range, writes are through the B side range
75 * addresses (A side base + 0x10000).
76 *
77 * Reads and writes are byte only.
78 *
79 * 16 bit reads and writes are necessary for the NetUSBee adapter's USB
80 * chipset - 16 bit words are read straight off the ROM port while 16 bit
81 * reads are split into two byte writes. The low byte is latched to the
82 * NetUSBee buffer by a read from the _read_ window (with the data pattern
83 * asserted as A1-A8 address pattern). The high byte is then written to the
84 * write range as usual, completing the write cycle.
85 */
86
87#if defined(CONFIG_ATARI_ROM_ISA)
88#define rom_in_8(addr) \
89 ({ u16 __v = (*(__force volatile u16 *) (addr)); __v >>= 8; __v; })
90#define rom_in_be16(addr) \
91 ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
92#define rom_in_le16(addr) \
93 ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
94
95#define rom_out_8(addr, b) \
96 ({u8 __w, __v = (b); u32 _addr = ((u32) (addr)); \
97 __w = ((*(__force volatile u8 *) ((_addr | 0x10000) + (__v<<1)))); })
98#define rom_out_be16(addr, w) \
99 ({u16 __w, __v = (w); u32 _addr = ((u32) (addr)); \
100 __w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v & 0xFF)<<1)))); \
101 __w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v >> 8)<<1)))); })
102#define rom_out_le16(addr, w) \
103 ({u16 __w, __v = (w); u32 _addr = ((u32) (addr)); \
104 __w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v >> 8)<<1)))); \
105 __w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v & 0xFF)<<1)))); })
106
107#define raw_rom_inb rom_in_8
108#define raw_rom_inw rom_in_be16
109
110#define raw_rom_outb(val, port) rom_out_8((port), (val))
111#define raw_rom_outw(val, port) rom_out_be16((port), (val))
112#endif /* CONFIG_ATARI_ROM_ISA */
113
63static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len) 114static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
64{ 115{
65 unsigned int i; 116 unsigned int i;
@@ -342,6 +393,62 @@ static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
342 : "d0", "a0", "a1", "d6"); 393 : "d0", "a0", "a1", "d6");
343} 394}
344 395
396
397#if defined(CONFIG_ATARI_ROM_ISA)
398static inline void raw_rom_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
399{
400 unsigned int i;
401
402 for (i = 0; i < len; i++)
403 *buf++ = rom_in_8(port);
404}
405
406static inline void raw_rom_outsb(volatile u8 __iomem *port, const u8 *buf,
407 unsigned int len)
408{
409 unsigned int i;
410
411 for (i = 0; i < len; i++)
412 rom_out_8(port, *buf++);
413}
414
415static inline void raw_rom_insw(volatile u16 __iomem *port, u16 *buf,
416 unsigned int nr)
417{
418 unsigned int i;
419
420 for (i = 0; i < nr; i++)
421 *buf++ = rom_in_be16(port);
422}
423
424static inline void raw_rom_outsw(volatile u16 __iomem *port, const u16 *buf,
425 unsigned int nr)
426{
427 unsigned int i;
428
429 for (i = 0; i < nr; i++)
430 rom_out_be16(port, *buf++);
431}
432
433static inline void raw_rom_insw_swapw(volatile u16 __iomem *port, u16 *buf,
434 unsigned int nr)
435{
436 unsigned int i;
437
438 for (i = 0; i < nr; i++)
439 *buf++ = rom_in_le16(port);
440}
441
442static inline void raw_rom_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
443 unsigned int nr)
444{
445 unsigned int i;
446
447 for (i = 0; i < nr; i++)
448 rom_out_le16(port, *buf++);
449}
450#endif /* CONFIG_ATARI_ROM_ISA */
451
345#endif /* __KERNEL__ */ 452#endif /* __KERNEL__ */
346 453
347#endif /* _RAW_IO_H */ 454#endif /* _RAW_IO_H */
diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h
index 32198454da70..9aea9f11fa25 100644
--- a/arch/m68k/include/asm/string.h
+++ b/arch/m68k/include/asm/string.h
@@ -4,15 +4,6 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6 6
7static inline size_t __kernel_strlen(const char *s)
8{
9 const char *sc;
10
11 for (sc = s; *sc++; )
12 ;
13 return sc - s - 1;
14}
15
16static inline char *__kernel_strcpy(char *dest, const char *src) 7static inline char *__kernel_strcpy(char *dest, const char *src)
17{ 8{
18 char *xdest = dest; 9 char *xdest = dest;
@@ -27,11 +18,6 @@ static inline char *__kernel_strcpy(char *dest, const char *src)
27 18
28#ifndef __IN_STRING_C 19#ifndef __IN_STRING_C
29 20
30#define __HAVE_ARCH_STRLEN
31#define strlen(s) (__builtin_constant_p(s) ? \
32 __builtin_strlen(s) : \
33 __kernel_strlen(s))
34
35#define __HAVE_ARCH_STRNLEN 21#define __HAVE_ARCH_STRNLEN
36static inline size_t strnlen(const char *s, size_t count) 22static inline size_t strnlen(const char *s, size_t count)
37{ 23{
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 6cd92671ca5e..014f288fc813 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -32,12 +32,4 @@
32#define __ARCH_WANT_SYS_FORK 32#define __ARCH_WANT_SYS_FORK
33#define __ARCH_WANT_SYS_VFORK 33#define __ARCH_WANT_SYS_VFORK
34 34
35/*
36 * "Conditional" syscalls
37 *
38 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
39 * but it doesn't work on all toolchains, so we just do it by hand
40 */
41#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
42
43#endif /* _ASM_M68K_UNISTD_H_ */ 35#endif /* _ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index d538694ad208..c55ff719fa72 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -51,40 +51,16 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
51 return sw->retpc; 51 return sw->retpc;
52} 52}
53 53
54/* 54void arch_cpu_idle(void)
55 * The idle loop on an m68k..
56 */
57static void default_idle(void)
58{ 55{
59 if (!need_resched())
60#if defined(MACH_ATARI_ONLY) 56#if defined(MACH_ATARI_ONLY)
61 /* block out HSYNC on the atari (falcon) */ 57 /* block out HSYNC on the atari (falcon) */
62 __asm__("stop #0x2200" : : : "cc"); 58 __asm__("stop #0x2200" : : : "cc");
63#else 59#else
64 __asm__("stop #0x2000" : : : "cc"); 60 __asm__("stop #0x2000" : : : "cc");
65#endif 61#endif
66} 62}
67 63
68void (*idle)(void) = default_idle;
69
70/*
71 * The idle thread. There's no useful work to be
72 * done, so just try to conserve power and have a
73 * low exit latency (ie sit in a loop waiting for
74 * somebody to say that they'd like to reschedule)
75 */
76void cpu_idle(void)
77{
78 /* endless idle loop with no priority at all */
79 while (1) {
80 rcu_idle_enter();
81 while (!need_resched())
82 idle();
83 rcu_idle_exit();
84 schedule_preempt_disabled();
85 }
86}
87
88void machine_restart(char * __unused) 64void machine_restart(char * __unused)
89{ 65{
90 if (mach_reset) 66 if (mach_reset)
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 80cfbe56ea32..e67e53159573 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -381,6 +381,12 @@ void __init setup_arch(char **cmdline_p)
381 isa_sex = 1; 381 isa_sex = 1;
382 } 382 }
383#endif 383#endif
384#ifdef CONFIG_ATARI_ROM_ISA
385 if (MACH_IS_ATARI) {
386 isa_type = ISA_TYPE_ENEC;
387 isa_sex = 0;
388 }
389#endif
384#endif 390#endif
385} 391}
386 392
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index f32ab22e7ed3..88fcd8c70e7b 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -992,18 +992,6 @@ void show_stack(struct task_struct *task, unsigned long *stack)
992} 992}
993 993
994/* 994/*
995 * The architecture-independent backtrace generator
996 */
997void dump_stack(void)
998{
999 unsigned long stack;
1000
1001 show_trace(&stack);
1002}
1003
1004EXPORT_SYMBOL(dump_stack);
1005
1006/*
1007 * The vector number returned in the frame pointer may also contain 995 * The vector number returned in the frame pointer may also contain
1008 * the "fs" (Fault Status) bits on ColdFire. These are in the bottom 996 * the "fs" (Fault Status) bits on ColdFire. These are in the bottom
1009 * 2 bits, and upper 2 bits. So we need to mask out the real vector 997 * 2 bits, and upper 2 bits. So we need to mask out the real vector
diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c
index b9a57abfad08..4d61fa8a112c 100644
--- a/arch/m68k/lib/string.c
+++ b/arch/m68k/lib/string.c
@@ -17,6 +17,6 @@ EXPORT_SYMBOL(strcpy);
17 17
18char *strcat(char *dest, const char *src) 18char *strcat(char *dest, const char *src)
19{ 19{
20 return __kernel_strcpy(dest + __kernel_strlen(dest), src); 20 return __kernel_strcpy(dest + strlen(dest), src);
21} 21}
22EXPORT_SYMBOL(strcat); 22EXPORT_SYMBOL(strcat);
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 519aad8fa812..1af2ca3411f6 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -110,18 +110,7 @@ void __init paging_init(void)
110void free_initmem(void) 110void free_initmem(void)
111{ 111{
112#ifndef CONFIG_MMU_SUN3 112#ifndef CONFIG_MMU_SUN3
113 unsigned long addr; 113 free_initmem_default(0);
114
115 addr = (unsigned long) __init_begin;
116 for (; addr < ((unsigned long) __init_end); addr += PAGE_SIZE) {
117 ClearPageReserved(virt_to_page(addr));
118 init_page_count(virt_to_page(addr));
119 free_page(addr);
120 totalram_pages++;
121 }
122 pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n",
123 (addr - (unsigned long) __init_begin) >> 10,
124 (unsigned int) __init_begin, (unsigned int) __init_end);
125#endif /* CONFIG_MMU_SUN3 */ 114#endif /* CONFIG_MMU_SUN3 */
126} 115}
127 116
@@ -213,15 +202,6 @@ void __init mem_init(void)
213#ifdef CONFIG_BLK_DEV_INITRD 202#ifdef CONFIG_BLK_DEV_INITRD
214void free_initrd_mem(unsigned long start, unsigned long end) 203void free_initrd_mem(unsigned long start, unsigned long end)
215{ 204{
216 int pages = 0; 205 free_reserved_area(start, end, 0, "initrd");
217 for (; start < end; start += PAGE_SIZE) {
218 ClearPageReserved(virt_to_page(start));
219 init_page_count(virt_to_page(start));
220 free_page(start);
221 totalram_pages++;
222 pages++;
223 }
224 pr_notice("Freeing initrd memory: %dk freed\n",
225 pages << (PAGE_SHIFT - 10));
226} 206}
227#endif 207#endif
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index afc8973d1488..b06b41861aac 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -25,6 +25,7 @@ config METAG
25 select HAVE_MEMBLOCK 25 select HAVE_MEMBLOCK
26 select HAVE_MEMBLOCK_NODE_MAP 26 select HAVE_MEMBLOCK_NODE_MAP
27 select HAVE_MOD_ARCH_SPECIFIC 27 select HAVE_MOD_ARCH_SPECIFIC
28 select HAVE_OPROFILE
28 select HAVE_PERF_EVENTS 29 select HAVE_PERF_EVENTS
29 select HAVE_SYSCALL_TRACEPOINTS 30 select HAVE_SYSCALL_TRACEPOINTS
30 select IRQ_DOMAIN 31 select IRQ_DOMAIN
@@ -209,6 +210,9 @@ config METAG_PERFCOUNTER_IRQS
209 When disabled, Performance Counters information will be collected 210 When disabled, Performance Counters information will be collected
210 based on Timer Interrupt. 211 based on Timer Interrupt.
211 212
213config HW_PERF_EVENTS
214 def_bool METAG_PERFCOUNTER_IRQS && PERF_EVENTS
215
212config METAG_DA 216config METAG_DA
213 bool "DA support" 217 bool "DA support"
214 help 218 help
diff --git a/arch/metag/Makefile b/arch/metag/Makefile
index 81bd6a1c7483..b566116b171b 100644
--- a/arch/metag/Makefile
+++ b/arch/metag/Makefile
@@ -49,6 +49,8 @@ core-y += arch/metag/mm/
49libs-y += arch/metag/lib/ 49libs-y += arch/metag/lib/
50libs-y += arch/metag/tbx/ 50libs-y += arch/metag/tbx/
51 51
52drivers-$(CONFIG_OPROFILE) += arch/metag/oprofile/
53
52boot := arch/metag/boot 54boot := arch/metag/boot
53 55
54boot_targets += uImage 56boot_targets += uImage
diff --git a/arch/metag/boot/dts/Makefile b/arch/metag/boot/dts/Makefile
index e0b5afd8bde8..dbd95217733a 100644
--- a/arch/metag/boot/dts/Makefile
+++ b/arch/metag/boot/dts/Makefile
@@ -4,13 +4,17 @@ dtb-y += skeleton.dtb
4builtindtb-y := skeleton 4builtindtb-y := skeleton
5 5
6ifneq ($(CONFIG_METAG_BUILTIN_DTB_NAME),"") 6ifneq ($(CONFIG_METAG_BUILTIN_DTB_NAME),"")
7 builtindtb-y := $(CONFIG_METAG_BUILTIN_DTB_NAME) 7 builtindtb-y := $(patsubst "%",%,$(CONFIG_METAG_BUILTIN_DTB_NAME))
8endif 8endif
9obj-$(CONFIG_METAG_BUILTIN_DTB) += $(patsubst "%",%,$(builtindtb-y)).dtb.o 9
10dtb-$(CONFIG_METAG_BUILTIN_DTB) += $(builtindtb-y).dtb
11obj-$(CONFIG_METAG_BUILTIN_DTB) += $(builtindtb-y).dtb.o
10 12
11targets += dtbs 13targets += dtbs
12targets += $(dtb-y) 14targets += $(dtb-y)
13 15
16.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
17
14dtbs: $(addprefix $(obj)/, $(dtb-y)) 18dtbs: $(addprefix $(obj)/, $(dtb-y))
15 19
16clean-files += *.dtb 20clean-files += *.dtb *.dtb.S
diff --git a/arch/metag/configs/meta1_defconfig b/arch/metag/configs/meta1_defconfig
index c35a75e8ecfe..01cd67e4403d 100644
--- a/arch/metag/configs/meta1_defconfig
+++ b/arch/metag/configs/meta1_defconfig
@@ -1,6 +1,5 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
3CONFIG_LOG_BUF_SHIFT=13
4CONFIG_SYSFS_DEPRECATED=y 3CONFIG_SYSFS_DEPRECATED=y
5CONFIG_SYSFS_DEPRECATED_V2=y 4CONFIG_SYSFS_DEPRECATED_V2=y
6CONFIG_KALLSYMS_ALL=y 5CONFIG_KALLSYMS_ALL=y
diff --git a/arch/metag/configs/meta2_defconfig b/arch/metag/configs/meta2_defconfig
index fb3148410183..643392ba7ed5 100644
--- a/arch/metag/configs/meta2_defconfig
+++ b/arch/metag/configs/meta2_defconfig
@@ -1,7 +1,6 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
4CONFIG_LOG_BUF_SHIFT=13
5CONFIG_SYSFS_DEPRECATED=y 4CONFIG_SYSFS_DEPRECATED=y
6CONFIG_SYSFS_DEPRECATED_V2=y 5CONFIG_SYSFS_DEPRECATED_V2=y
7CONFIG_KALLSYMS_ALL=y 6CONFIG_KALLSYMS_ALL=y
diff --git a/arch/metag/configs/meta2_smp_defconfig b/arch/metag/configs/meta2_smp_defconfig
index 6c7b777ac276..f3306737da20 100644
--- a/arch/metag/configs/meta2_smp_defconfig
+++ b/arch/metag/configs/meta2_smp_defconfig
@@ -1,7 +1,6 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
4CONFIG_LOG_BUF_SHIFT=13
5CONFIG_SYSFS_DEPRECATED=y 4CONFIG_SYSFS_DEPRECATED=y
6CONFIG_SYSFS_DEPRECATED_V2=y 5CONFIG_SYSFS_DEPRECATED_V2=y
7CONFIG_KALLSYMS_ALL=y 6CONFIG_KALLSYMS_ALL=y
diff --git a/arch/metag/include/asm/metag_mem.h b/arch/metag/include/asm/metag_mem.h
index 3f7b54d8ccac..aa5a076df439 100644
--- a/arch/metag/include/asm/metag_mem.h
+++ b/arch/metag/include/asm/metag_mem.h
@@ -700,6 +700,9 @@
700#define SYSC_xCPARTG_AND_S 8 700#define SYSC_xCPARTG_AND_S 8
701#define SYSC_xCPARTL_OR_BITS 0x000F0000 /* Ors into top 4 bits */ 701#define SYSC_xCPARTL_OR_BITS 0x000F0000 /* Ors into top 4 bits */
702#define SYSC_xCPARTL_OR_S 16 702#define SYSC_xCPARTL_OR_S 16
703#ifdef METAC_2_1
704#define SYSC_DCPART_GCON_BIT 0x00100000 /* Coherent shared local */
705#endif /* METAC_2_1 */
703#define SYSC_xCPARTG_OR_BITS 0x0F000000 /* Ors into top 4 bits */ 706#define SYSC_xCPARTG_OR_BITS 0x0F000000 /* Ors into top 4 bits */
704#define SYSC_xCPARTG_OR_S 24 707#define SYSC_xCPARTG_OR_S 24
705#define SYSC_CWRMODE_BIT 0x80000000 /* Write cache mode bit */ 708#define SYSC_CWRMODE_BIT 0x80000000 /* Write cache mode bit */
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
index 0ecd34d8b5f6..7c4a33006142 100644
--- a/arch/metag/include/asm/thread_info.h
+++ b/arch/metag/include/asm/thread_info.h
@@ -150,6 +150,4 @@ static inline int kstack_end(void *addr)
150#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ 150#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
151 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) 151 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
152 152
153#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
154
155#endif /* _ASM_THREAD_INFO_H */ 153#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild
index 876c71f866de..84e09feb4d54 100644
--- a/arch/metag/include/uapi/asm/Kbuild
+++ b/arch/metag/include/uapi/asm/Kbuild
@@ -2,6 +2,7 @@
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4header-y += byteorder.h 4header-y += byteorder.h
5header-y += ech.h
5header-y += ptrace.h 6header-y += ptrace.h
6header-y += resource.h 7header-y += resource.h
7header-y += sigcontext.h 8header-y += sigcontext.h
diff --git a/arch/metag/include/uapi/asm/ech.h b/arch/metag/include/uapi/asm/ech.h
new file mode 100644
index 000000000000..ac94d1cf9be4
--- /dev/null
+++ b/arch/metag/include/uapi/asm/ech.h
@@ -0,0 +1,15 @@
1#ifndef _UAPI_METAG_ECH_H
2#define _UAPI_METAG_ECH_H
3
4/*
5 * These bits can be set in the top half of the D0.8 register when DSP context
6 * switching is enabled, in order to support partial DSP context save/restore.
7 */
8
9#define TBICTX_XEXT_BIT 0x1000 /* Enable extended context save */
10#define TBICTX_XTDP_BIT 0x0800 /* DSP accumulators/RAM/templates */
11#define TBICTX_XHL2_BIT 0x0400 /* Hardware loops */
12#define TBICTX_XAXX_BIT 0x0200 /* Extended AX registers (A*.4-7) */
13#define TBICTX_XDX8_BIT 0x0100 /* Extended DX registers (D*.8-15) */
14
15#endif /* _UAPI_METAG_ECH_H */
diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c
index 3a589dfb966b..954548b1bea8 100644
--- a/arch/metag/kernel/cachepart.c
+++ b/arch/metag/kernel/cachepart.c
@@ -24,15 +24,21 @@
24unsigned int get_dcache_size(void) 24unsigned int get_dcache_size(void)
25{ 25{
26 unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); 26 unsigned int config2 = metag_in32(METAC_CORE_CONFIG2);
27 return 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS) 27 unsigned int sz = 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS)
28 >> METAC_CORECFG2_DCSZ_S); 28 >> METAC_CORECFG2_DCSZ_S);
29 if (config2 & METAC_CORECFG2_DCSMALL_BIT)
30 sz >>= 6;
31 return sz;
29} 32}
30 33
31unsigned int get_icache_size(void) 34unsigned int get_icache_size(void)
32{ 35{
33 unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); 36 unsigned int config2 = metag_in32(METAC_CORE_CONFIG2);
34 return 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS) 37 unsigned int sz = 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS)
35 >> METAC_CORE_C2ICSZ_S); 38 >> METAC_CORE_C2ICSZ_S);
39 if (config2 & METAC_CORECFG2_ICSMALL_BIT)
40 sz >>= 6;
41 return sz;
36} 42}
37 43
38unsigned int get_global_dcache_size(void) 44unsigned int get_global_dcache_size(void)
@@ -61,7 +67,7 @@ static unsigned int get_thread_cache_size(unsigned int cache, int thread_id)
61 return 0; 67 return 0;
62#if PAGE_OFFSET >= LINGLOBAL_BASE 68#if PAGE_OFFSET >= LINGLOBAL_BASE
63 /* Checking for global cache */ 69 /* Checking for global cache */
64 cache_size = (cache == DCACHE ? get_global_dache_size() : 70 cache_size = (cache == DCACHE ? get_global_dcache_size() :
65 get_global_icache_size()); 71 get_global_icache_size());
66 offset = 8; 72 offset = 8;
67#else 73#else
diff --git a/arch/metag/kernel/da.c b/arch/metag/kernel/da.c
index 52aabb658fde..a35dbed6fffa 100644
--- a/arch/metag/kernel/da.c
+++ b/arch/metag/kernel/da.c
@@ -5,12 +5,14 @@
5 */ 5 */
6 6
7 7
8#include <linux/export.h>
8#include <linux/io.h> 9#include <linux/io.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <asm/da.h> 11#include <asm/da.h>
11#include <asm/metag_mem.h> 12#include <asm/metag_mem.h>
12 13
13bool _metag_da_present; 14bool _metag_da_present;
15EXPORT_SYMBOL_GPL(_metag_da_present);
14 16
15int __init metag_da_probe(void) 17int __init metag_da_probe(void)
16{ 18{
diff --git a/arch/metag/kernel/head.S b/arch/metag/kernel/head.S
index 969dffabc03a..713f71d1bdfe 100644
--- a/arch/metag/kernel/head.S
+++ b/arch/metag/kernel/head.S
@@ -1,6 +1,7 @@
1 ! Copyright 2005,2006,2007,2009 Imagination Technologies 1 ! Copyright 2005,2006,2007,2009 Imagination Technologies
2 2
3#include <linux/init.h> 3#include <linux/init.h>
4#include <asm/metag_mem.h>
4#include <generated/asm-offsets.h> 5#include <generated/asm-offsets.h>
5#undef __exit 6#undef __exit
6 7
@@ -48,6 +49,13 @@ __exit:
48 .global _secondary_startup 49 .global _secondary_startup
49 .type _secondary_startup,function 50 .type _secondary_startup,function
50_secondary_startup: 51_secondary_startup:
52#if CONFIG_PAGE_OFFSET < LINGLOBAL_BASE
53 ! In case GCOn has just been turned on we need to fence any writes that
54 ! the boot thread might have performed prior to coherency taking effect.
55 MOVT D0Re0,#HI(LINSYSEVENT_WR_ATOMIC_UNLOCK)
56 MOV D1Re0,#0
57 SETD [D0Re0], D1Re0
58#endif
51 MOVT A0StP,#HI(_secondary_data_stack) 59 MOVT A0StP,#HI(_secondary_data_stack)
52 ADD A0StP,A0StP,#LO(_secondary_data_stack) 60 ADD A0StP,A0StP,#LO(_secondary_data_stack)
53 GETD A0StP,[A0StP] 61 GETD A0StP,[A0StP]
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index a876d5ff3897..366569425c52 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -22,9 +22,9 @@
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24#include <asm/core_reg.h> 24#include <asm/core_reg.h>
25#include <asm/hwthread.h>
26#include <asm/io.h> 25#include <asm/io.h>
27#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/processor.h>
28 28
29#include "perf_event.h" 29#include "perf_event.h"
30 30
@@ -40,10 +40,10 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
40/* PMU admin */ 40/* PMU admin */
41const char *perf_pmu_name(void) 41const char *perf_pmu_name(void)
42{ 42{
43 if (metag_pmu) 43 if (!metag_pmu)
44 return metag_pmu->pmu.name; 44 return NULL;
45 45
46 return NULL; 46 return metag_pmu->name;
47} 47}
48EXPORT_SYMBOL_GPL(perf_pmu_name); 48EXPORT_SYMBOL_GPL(perf_pmu_name);
49 49
@@ -171,6 +171,7 @@ static int metag_pmu_event_init(struct perf_event *event)
171 switch (event->attr.type) { 171 switch (event->attr.type) {
172 case PERF_TYPE_HARDWARE: 172 case PERF_TYPE_HARDWARE:
173 case PERF_TYPE_HW_CACHE: 173 case PERF_TYPE_HW_CACHE:
174 case PERF_TYPE_RAW:
174 err = _hw_perf_event_init(event); 175 err = _hw_perf_event_init(event);
175 break; 176 break;
176 177
@@ -211,9 +212,10 @@ again:
211 /* 212 /*
212 * Calculate the delta and add it to the counter. 213 * Calculate the delta and add it to the counter.
213 */ 214 */
214 delta = new_raw_count - prev_raw_count; 215 delta = (new_raw_count - prev_raw_count) & MAX_PERIOD;
215 216
216 local64_add(delta, &event->count); 217 local64_add(delta, &event->count);
218 local64_sub(delta, &hwc->period_left);
217} 219}
218 220
219int metag_pmu_event_set_period(struct perf_event *event, 221int metag_pmu_event_set_period(struct perf_event *event,
@@ -223,6 +225,10 @@ int metag_pmu_event_set_period(struct perf_event *event,
223 s64 period = hwc->sample_period; 225 s64 period = hwc->sample_period;
224 int ret = 0; 226 int ret = 0;
225 227
228 /* The period may have been changed */
229 if (unlikely(period != hwc->last_period))
230 left += period - hwc->last_period;
231
226 if (unlikely(left <= -period)) { 232 if (unlikely(left <= -period)) {
227 left = period; 233 left = period;
228 local64_set(&hwc->period_left, left); 234 local64_set(&hwc->period_left, left);
@@ -240,8 +246,10 @@ int metag_pmu_event_set_period(struct perf_event *event,
240 if (left > (s64)metag_pmu->max_period) 246 if (left > (s64)metag_pmu->max_period)
241 left = metag_pmu->max_period; 247 left = metag_pmu->max_period;
242 248
243 if (metag_pmu->write) 249 if (metag_pmu->write) {
244 metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD); 250 local64_set(&hwc->prev_count, -(s32)left);
251 metag_pmu->write(idx, -left & MAX_PERIOD);
252 }
245 253
246 perf_event_update_userpage(event); 254 perf_event_update_userpage(event);
247 255
@@ -549,6 +557,10 @@ static int _hw_perf_event_init(struct perf_event *event)
549 if (err) 557 if (err)
550 return err; 558 return err;
551 break; 559 break;
560
561 case PERF_TYPE_RAW:
562 mapping = attr->config;
563 break;
552 } 564 }
553 565
554 /* Return early if the event is unsupported */ 566 /* Return early if the event is unsupported */
@@ -610,15 +622,13 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
610 WARN_ONCE((config != 0x100), 622 WARN_ONCE((config != 0x100),
611 "invalid configuration (%d) for counter (%d)\n", 623 "invalid configuration (%d) for counter (%d)\n",
612 config, idx); 624 config, idx);
613 625 local64_set(&event->prev_count, __core_reg_get(TXTACTCYC));
614 /* Reset the cycle count */
615 __core_reg_set(TXTACTCYC, 0);
616 goto unlock; 626 goto unlock;
617 } 627 }
618 628
619 /* Check for a core internal or performance channel event. */ 629 /* Check for a core internal or performance channel event. */
620 if (tmp) { 630 if (tmp) {
621 void *perf_addr = (void *)PERF_COUNT(idx); 631 void *perf_addr;
622 632
623 /* 633 /*
624 * Anything other than a cycle count will write the low- 634 * Anything other than a cycle count will write the low-
@@ -632,9 +642,14 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
632 case 0xf0: 642 case 0xf0:
633 perf_addr = (void *)PERF_CHAN(idx); 643 perf_addr = (void *)PERF_CHAN(idx);
634 break; 644 break;
645
646 default:
647 perf_addr = NULL;
648 break;
635 } 649 }
636 650
637 metag_out32((tmp & 0x0f), perf_addr); 651 if (perf_addr)
652 metag_out32((config & 0x0f), perf_addr);
638 653
639 /* 654 /*
640 * Now we use the high nibble as the performance event to 655 * Now we use the high nibble as the performance event to
@@ -643,13 +658,21 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
643 config = tmp >> 4; 658 config = tmp >> 4;
644 } 659 }
645 660
646 /*
647 * Enabled counters start from 0. Early cores clear the count on
648 * write but newer cores don't, so we make sure that the count is
649 * set to 0.
650 */
651 tmp = ((config & 0xf) << 28) | 661 tmp = ((config & 0xf) << 28) |
652 ((1 << 24) << cpu_2_hwthread_id[get_cpu()]); 662 ((1 << 24) << hard_processor_id());
663 if (metag_pmu->max_period)
664 /*
665 * Cores supporting overflow interrupts may have had the counter
666 * set to a specific value that needs preserving.
667 */
668 tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
669 else
670 /*
671 * Older cores reset the counter on write, so prev_count needs
672 * resetting too so we can calculate a correct delta.
673 */
674 local64_set(&event->prev_count, 0);
675
653 metag_out32(tmp, PERF_COUNT(idx)); 676 metag_out32(tmp, PERF_COUNT(idx));
654unlock: 677unlock:
655 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 678 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
@@ -693,9 +716,8 @@ static u64 metag_pmu_read_counter(int idx)
693{ 716{
694 u32 tmp = 0; 717 u32 tmp = 0;
695 718
696 /* The act of reading the cycle counter also clears it */
697 if (METAG_INST_COUNTER == idx) { 719 if (METAG_INST_COUNTER == idx) {
698 __core_reg_swap(TXTACTCYC, tmp); 720 tmp = __core_reg_get(TXTACTCYC);
699 goto out; 721 goto out;
700 } 722 }
701 723
@@ -764,10 +786,16 @@ static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
764 786
765 /* 787 /*
766 * Enable the counter again once core overflow processing has 788 * Enable the counter again once core overflow processing has
767 * completed. 789 * completed. Note the counter value may have been modified while it was
790 * inactive to set it up ready for the next interrupt.
768 */ 791 */
769 if (!perf_event_overflow(event, &sampledata, regs)) 792 if (!perf_event_overflow(event, &sampledata, regs)) {
793 __global_lock2(flags);
794 counter = (counter & 0xff000000) |
795 (metag_in32(PERF_COUNT(idx)) & 0x00ffffff);
770 metag_out32(counter, PERF_COUNT(idx)); 796 metag_out32(counter, PERF_COUNT(idx));
797 __global_unlock2(flags);
798 }
771 799
772 return IRQ_HANDLED; 800 return IRQ_HANDLED;
773} 801}
@@ -830,7 +858,7 @@ static int __init init_hw_perf_events(void)
830 metag_pmu->max_period = 0; 858 metag_pmu->max_period = 0;
831 } 859 }
832 860
833 metag_pmu->name = "Meta 2"; 861 metag_pmu->name = "meta2";
834 metag_pmu->version = version; 862 metag_pmu->version = version;
835 metag_pmu->pmu = pmu; 863 metag_pmu->pmu = pmu;
836 } 864 }
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c
index c6efe62e5b76..483dff986a23 100644
--- a/arch/metag/kernel/process.c
+++ b/arch/metag/kernel/process.c
@@ -22,6 +22,7 @@
22#include <linux/pm.h> 22#include <linux/pm.h>
23#include <linux/syscalls.h> 23#include <linux/syscalls.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <linux/smp.h>
25#include <asm/core_reg.h> 26#include <asm/core_reg.h>
26#include <asm/user_gateway.h> 27#include <asm/user_gateway.h>
27#include <asm/tcm.h> 28#include <asm/tcm.h>
@@ -31,7 +32,7 @@
31/* 32/*
32 * Wait for the next interrupt and enable local interrupts 33 * Wait for the next interrupt and enable local interrupts
33 */ 34 */
34static inline void arch_idle(void) 35void arch_cpu_idle(void)
35{ 36{
36 int tmp; 37 int tmp;
37 38
@@ -59,36 +60,12 @@ static inline void arch_idle(void)
59 : "r" (get_trigger_mask())); 60 : "r" (get_trigger_mask()));
60} 61}
61 62
62void cpu_idle(void)
63{
64 set_thread_flag(TIF_POLLING_NRFLAG);
65
66 while (1) {
67 tick_nohz_idle_enter();
68 rcu_idle_enter();
69
70 while (!need_resched()) {
71 /*
72 * We need to disable interrupts here to ensure we don't
73 * miss a wakeup call.
74 */
75 local_irq_disable();
76 if (!need_resched()) {
77#ifdef CONFIG_HOTPLUG_CPU 63#ifdef CONFIG_HOTPLUG_CPU
78 if (cpu_is_offline(smp_processor_id())) 64void arch_cpu_idle_dead(void)
79 cpu_die(); 65{
80#endif 66 cpu_die();
81 arch_idle();
82 } else {
83 local_irq_enable();
84 }
85 }
86
87 rcu_idle_exit();
88 tick_nohz_idle_exit();
89 schedule_preempt_disabled();
90 }
91} 67}
68#endif
92 69
93void (*pm_power_off)(void); 70void (*pm_power_off)(void);
94EXPORT_SYMBOL(pm_power_off); 71EXPORT_SYMBOL(pm_power_off);
@@ -152,6 +129,8 @@ void show_regs(struct pt_regs *regs)
152 "D1.7 " 129 "D1.7 "
153 }; 130 };
154 131
132 show_regs_print_info(KERN_INFO);
133
155 pr_info(" pt_regs @ %p\n", regs); 134 pr_info(" pt_regs @ %p\n", regs);
156 pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask); 135 pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask);
157 pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags, 136 pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags,
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 47a8828615a5..7563628822bd 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -288,10 +288,36 @@ static int metag_rp_state_set(struct task_struct *target,
288 return metag_rp_state_copyin(regs, pos, count, kbuf, ubuf); 288 return metag_rp_state_copyin(regs, pos, count, kbuf, ubuf);
289} 289}
290 290
291static int metag_tls_get(struct task_struct *target,
292 const struct user_regset *regset,
293 unsigned int pos, unsigned int count,
294 void *kbuf, void __user *ubuf)
295{
296 void __user *tls = target->thread.tls_ptr;
297 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
298}
299
300static int metag_tls_set(struct task_struct *target,
301 const struct user_regset *regset,
302 unsigned int pos, unsigned int count,
303 const void *kbuf, const void __user *ubuf)
304{
305 int ret;
306 void __user *tls;
307
308 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
309 if (ret)
310 return ret;
311
312 target->thread.tls_ptr = tls;
313 return ret;
314}
315
291enum metag_regset { 316enum metag_regset {
292 REGSET_GENERAL, 317 REGSET_GENERAL,
293 REGSET_CBUF, 318 REGSET_CBUF,
294 REGSET_READPIPE, 319 REGSET_READPIPE,
320 REGSET_TLS,
295}; 321};
296 322
297static const struct user_regset metag_regsets[] = { 323static const struct user_regset metag_regsets[] = {
@@ -319,6 +345,14 @@ static const struct user_regset metag_regsets[] = {
319 .get = metag_rp_state_get, 345 .get = metag_rp_state_get,
320 .set = metag_rp_state_set, 346 .set = metag_rp_state_set,
321 }, 347 },
348 [REGSET_TLS] = {
349 .core_note_type = NT_METAG_TLS,
350 .n = 1,
351 .size = sizeof(void *),
352 .align = sizeof(void *),
353 .get = metag_tls_get,
354 .set = metag_tls_set,
355 },
322}; 356};
323 357
324static const struct user_regset_view user_metag_view = { 358static const struct user_regset_view user_metag_view = {
diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c
index 879246170aec..4f5726f1a55b 100644
--- a/arch/metag/kernel/setup.c
+++ b/arch/metag/kernel/setup.c
@@ -124,6 +124,7 @@ struct machine_desc *machine_desc __initdata;
124u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = { 124u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = {
125 [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID 125 [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID
126}; 126};
127EXPORT_SYMBOL_GPL(cpu_2_hwthread_id);
127 128
128/* 129/*
129 * Map a hardware thread ID to a Linux CPU number 130 * Map a hardware thread ID to a Linux CPU number
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index 4b6d1f14df32..f443ec9a7cbe 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -28,6 +28,8 @@
28#include <asm/cachepart.h> 28#include <asm/cachepart.h>
29#include <asm/core_reg.h> 29#include <asm/core_reg.h>
30#include <asm/cpu.h> 30#include <asm/cpu.h>
31#include <asm/global_lock.h>
32#include <asm/metag_mem.h>
31#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
32#include <asm/pgtable.h> 34#include <asm/pgtable.h>
33#include <asm/pgalloc.h> 35#include <asm/pgalloc.h>
@@ -37,6 +39,9 @@
37#include <asm/hwthread.h> 39#include <asm/hwthread.h>
38#include <asm/traps.h> 40#include <asm/traps.h>
39 41
42#define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n))
43#define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n))
44
40DECLARE_PER_CPU(PTBI, pTBI); 45DECLARE_PER_CPU(PTBI, pTBI);
41 46
42void *secondary_data_stack; 47void *secondary_data_stack;
@@ -99,6 +104,114 @@ int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
99 return 0; 104 return 0;
100} 105}
101 106
107/**
108 * describe_cachepart_change: describe a change to cache partitions.
109 * @thread: Hardware thread number.
110 * @label: Label of cache type, e.g. "dcache" or "icache".
111 * @sz: Total size of the cache.
112 * @old: Old cache partition configuration (*CPART* register).
113 * @new: New cache partition configuration (*CPART* register).
114 *
115 * If the cache partition has changed, prints a message to the log describing
116 * those changes.
117 */
118static __cpuinit void describe_cachepart_change(unsigned int thread,
119 const char *label,
120 unsigned int sz,
121 unsigned int old,
122 unsigned int new)
123{
124 unsigned int lor1, land1, gor1, gand1;
125 unsigned int lor2, land2, gor2, gand2;
126 unsigned int diff = old ^ new;
127
128 if (!diff)
129 return;
130
131 pr_info("Thread %d: %s partition changed:", thread, label);
132 if (diff & (SYSC_xCPARTL_OR_BITS | SYSC_xCPARTL_AND_BITS)) {
133 lor1 = (old & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S;
134 lor2 = (new & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S;
135 land1 = (old & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S;
136 land2 = (new & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S;
137 pr_cont(" L:%#x+%#x->%#x+%#x",
138 (lor1 * sz) >> 4,
139 ((land1 + 1) * sz) >> 4,
140 (lor2 * sz) >> 4,
141 ((land2 + 1) * sz) >> 4);
142 }
143 if (diff & (SYSC_xCPARTG_OR_BITS | SYSC_xCPARTG_AND_BITS)) {
144 gor1 = (old & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S;
145 gor2 = (new & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S;
146 gand1 = (old & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S;
147 gand2 = (new & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S;
148 pr_cont(" G:%#x+%#x->%#x+%#x",
149 (gor1 * sz) >> 4,
150 ((gand1 + 1) * sz) >> 4,
151 (gor2 * sz) >> 4,
152 ((gand2 + 1) * sz) >> 4);
153 }
154 if (diff & SYSC_CWRMODE_BIT)
155 pr_cont(" %sWR",
156 (new & SYSC_CWRMODE_BIT) ? "+" : "-");
157 if (diff & SYSC_DCPART_GCON_BIT)
158 pr_cont(" %sGCOn",
159 (new & SYSC_DCPART_GCON_BIT) ? "+" : "-");
160 pr_cont("\n");
161}
162
163/**
164 * setup_smp_cache: ensure cache coherency for new SMP thread.
165 * @thread: New hardware thread number.
166 *
167 * Ensures that coherency is enabled and that the threads share the same cache
168 * partitions.
169 */
170static __cpuinit void setup_smp_cache(unsigned int thread)
171{
172 unsigned int this_thread, lflags;
173 unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new;
174 unsigned int icsz, icpart_old, icpart_new;
175
176 /*
177 * Copy over the current thread's cache partition configuration to the
178 * new thread so that they share cache partitions.
179 */
180 __global_lock2(lflags);
181 this_thread = hard_processor_id();
182 /* Share dcache partition */
183 dcpart_this = metag_in32(SYSC_DCPART(this_thread));
184 dcpart_old = metag_in32(SYSC_DCPART(thread));
185 dcpart_new = dcpart_this;
186#if PAGE_OFFSET < LINGLOBAL_BASE
187 /*
188 * For the local data cache to be coherent the threads must also have
189 * GCOn enabled.
190 */
191 dcpart_new |= SYSC_DCPART_GCON_BIT;
192 metag_out32(dcpart_new, SYSC_DCPART(this_thread));
193#endif
194 metag_out32(dcpart_new, SYSC_DCPART(thread));
195 /* Share icache partition too */
196 icpart_new = metag_in32(SYSC_ICPART(this_thread));
197 icpart_old = metag_in32(SYSC_ICPART(thread));
198 metag_out32(icpart_new, SYSC_ICPART(thread));
199 __global_unlock2(lflags);
200
201 /*
202 * Log if the cache partitions were altered so the user is aware of any
203 * potential unintentional cache wastage.
204 */
205 dcsz = get_dcache_size();
206 icsz = get_dcache_size();
207 describe_cachepart_change(this_thread, "dcache", dcsz,
208 dcpart_this, dcpart_new);
209 describe_cachepart_change(thread, "dcache", dcsz,
210 dcpart_old, dcpart_new);
211 describe_cachepart_change(thread, "icache", icsz,
212 icpart_old, icpart_new);
213}
214
102int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 215int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
103{ 216{
104 unsigned int thread = cpu_2_hwthread_id[cpu]; 217 unsigned int thread = cpu_2_hwthread_id[cpu];
@@ -108,6 +221,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
108 221
109 flush_tlb_all(); 222 flush_tlb_all();
110 223
224 setup_smp_cache(thread);
225
111 /* 226 /*
112 * Tell the secondary CPU where to find its idle thread's stack. 227 * Tell the secondary CPU where to find its idle thread's stack.
113 */ 228 */
@@ -297,7 +412,7 @@ asmlinkage void secondary_start_kernel(void)
297 /* 412 /*
298 * OK, it's off to the idle thread for us 413 * OK, it's off to the idle thread for us
299 */ 414 */
300 cpu_idle(); 415 cpu_startup_entry(CPUHP_ONLINE);
301} 416}
302 417
303void __init smp_cpus_done(unsigned int max_cpus) 418void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index 8961f247b500..2ceeaae5b199 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -987,9 +987,3 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
987 987
988 show_trace(tsk, sp, NULL); 988 show_trace(tsk, sp, NULL);
989} 989}
990
991void dump_stack(void)
992{
993 show_stack(NULL, NULL);
994}
995EXPORT_SYMBOL(dump_stack);
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig
index 975f2f4e3ecf..794f26a187f9 100644
--- a/arch/metag/mm/Kconfig
+++ b/arch/metag/mm/Kconfig
@@ -98,9 +98,6 @@ config MAX_ACTIVE_REGIONS
98 default "2" if SPARSEMEM 98 default "2" if SPARSEMEM
99 default "1" 99 default "1"
100 100
101config ARCH_POPULATES_NODE_MAP
102 def_bool y
103
104config ARCH_SELECT_MEMORY_MODEL 101config ARCH_SELECT_MEMORY_MODEL
105 def_bool y 102 def_bool y
106 103
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index 504a398d5f8b..d05b8455c44c 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -380,14 +380,8 @@ void __init mem_init(void)
380 380
381#ifdef CONFIG_HIGHMEM 381#ifdef CONFIG_HIGHMEM
382 unsigned long tmp; 382 unsigned long tmp;
383 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 383 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
384 struct page *page = pfn_to_page(tmp); 384 free_highmem_page(pfn_to_page(tmp));
385 ClearPageReserved(page);
386 init_page_count(page);
387 __free_page(page);
388 totalhigh_pages++;
389 }
390 totalram_pages += totalhigh_pages;
391 num_physpages += totalhigh_pages; 385 num_physpages += totalhigh_pages;
392#endif /* CONFIG_HIGHMEM */ 386#endif /* CONFIG_HIGHMEM */
393 387
@@ -412,32 +406,15 @@ void __init mem_init(void)
412 return; 406 return;
413} 407}
414 408
415static void free_init_pages(char *what, unsigned long begin, unsigned long end)
416{
417 unsigned long addr;
418
419 for (addr = begin; addr < end; addr += PAGE_SIZE) {
420 ClearPageReserved(virt_to_page(addr));
421 init_page_count(virt_to_page(addr));
422 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
423 free_page(addr);
424 totalram_pages++;
425 }
426 pr_info("Freeing %s: %luk freed\n", what, (end - begin) >> 10);
427}
428
429void free_initmem(void) 409void free_initmem(void)
430{ 410{
431 free_init_pages("unused kernel memory", 411 free_initmem_default(POISON_FREE_INITMEM);
432 (unsigned long)(&__init_begin),
433 (unsigned long)(&__init_end));
434} 412}
435 413
436#ifdef CONFIG_BLK_DEV_INITRD 414#ifdef CONFIG_BLK_DEV_INITRD
437void free_initrd_mem(unsigned long start, unsigned long end) 415void free_initrd_mem(unsigned long start, unsigned long end)
438{ 416{
439 end = end & PAGE_MASK; 417 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
440 free_init_pages("initrd memory", start, end);
441} 418}
442#endif 419#endif
443 420
diff --git a/arch/metag/oprofile/Makefile b/arch/metag/oprofile/Makefile
new file mode 100644
index 000000000000..c9639d4734d6
--- /dev/null
+++ b/arch/metag/oprofile/Makefile
@@ -0,0 +1,17 @@
1obj-$(CONFIG_OPROFILE) += oprofile.o
2
3oprofile-core-y += buffer_sync.o
4oprofile-core-y += cpu_buffer.o
5oprofile-core-y += event_buffer.o
6oprofile-core-y += oprof.o
7oprofile-core-y += oprofile_files.o
8oprofile-core-y += oprofile_stats.o
9oprofile-core-y += oprofilefs.o
10oprofile-core-y += timer_int.o
11oprofile-core-$(CONFIG_HW_PERF_EVENTS) += oprofile_perf.o
12
13oprofile-y += backtrace.o
14oprofile-y += common.o
15oprofile-y += $(addprefix ../../../drivers/oprofile/,$(oprofile-core-y))
16
17ccflags-y += -Werror
diff --git a/arch/metag/oprofile/backtrace.c b/arch/metag/oprofile/backtrace.c
new file mode 100644
index 000000000000..7cc3f37cb40e
--- /dev/null
+++ b/arch/metag/oprofile/backtrace.c
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2010-2013 Imagination Technologies Ltd.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#include <linux/oprofile.h>
10#include <linux/uaccess.h>
11#include <asm/processor.h>
12#include <asm/stacktrace.h>
13
14#include "backtrace.h"
15
16static void user_backtrace_fp(unsigned long __user *fp, unsigned int depth)
17{
18 while (depth-- && access_ok(VERIFY_READ, fp, 8)) {
19 unsigned long addr;
20 unsigned long __user *fpnew;
21 if (__copy_from_user_inatomic(&addr, fp + 1, sizeof(addr)))
22 break;
23 addr -= 4;
24
25 oprofile_add_trace(addr);
26
27 /* stack grows up, so frame pointers must decrease */
28 if (__copy_from_user_inatomic(&fpnew, fp + 0, sizeof(fpnew)))
29 break;
30 if (fpnew >= fp)
31 break;
32 fp = fpnew;
33 }
34}
35
36static int kernel_backtrace_frame(struct stackframe *frame, void *data)
37{
38 unsigned int *depth = data;
39
40 oprofile_add_trace(frame->pc);
41
42 /* decrement depth and stop if we reach 0 */
43 if ((*depth)-- == 0)
44 return 1;
45
46 /* otherwise onto the next frame */
47 return 0;
48}
49
50void metag_backtrace(struct pt_regs * const regs, unsigned int depth)
51{
52 if (user_mode(regs)) {
53 unsigned long *fp = (unsigned long *)regs->ctx.AX[1].U0;
54 user_backtrace_fp((unsigned long __user __force *)fp, depth);
55 } else {
56 struct stackframe frame;
57 frame.fp = regs->ctx.AX[1].U0; /* A0FrP */
58 frame.sp = user_stack_pointer(regs); /* A0StP */
59 frame.lr = 0; /* from stack */
60 frame.pc = regs->ctx.CurrPC; /* PC */
61 walk_stackframe(&frame, &kernel_backtrace_frame, &depth);
62 }
63}
diff --git a/arch/metag/oprofile/backtrace.h b/arch/metag/oprofile/backtrace.h
new file mode 100644
index 000000000000..c0fcc4265abb
--- /dev/null
+++ b/arch/metag/oprofile/backtrace.h
@@ -0,0 +1,6 @@
1#ifndef _METAG_OPROFILE_BACKTRACE_H
2#define _METAG_OPROFILE_BACKTRACE_H
3
4void metag_backtrace(struct pt_regs * const regs, unsigned int depth);
5
6#endif
diff --git a/arch/metag/oprofile/common.c b/arch/metag/oprofile/common.c
new file mode 100644
index 000000000000..ba26152b3c00
--- /dev/null
+++ b/arch/metag/oprofile/common.c
@@ -0,0 +1,66 @@
1/*
2 * arch/metag/oprofile/common.c
3 *
4 * Copyright (C) 2013 Imagination Technologies Ltd.
5 *
6 * Based on arch/sh/oprofile/common.c:
7 *
8 * Copyright (C) 2003 - 2010 Paul Mundt
9 *
10 * Based on arch/mips/oprofile/common.c:
11 *
12 * Copyright (C) 2004, 2005 Ralf Baechle
13 * Copyright (C) 2005 MIPS Technologies, Inc.
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/oprofile.h>
22#include <linux/perf_event.h>
23#include <linux/slab.h>
24
25#include "backtrace.h"
26
27#ifdef CONFIG_HW_PERF_EVENTS
28/*
29 * This will need to be reworked when multiple PMUs are supported.
30 */
31static char *metag_pmu_op_name;
32
33char *op_name_from_perf_id(void)
34{
35 return metag_pmu_op_name;
36}
37
38int __init oprofile_arch_init(struct oprofile_operations *ops)
39{
40 ops->backtrace = metag_backtrace;
41
42 if (perf_num_counters() == 0)
43 return -ENODEV;
44
45 metag_pmu_op_name = kasprintf(GFP_KERNEL, "metag/%s",
46 perf_pmu_name());
47 if (unlikely(!metag_pmu_op_name))
48 return -ENOMEM;
49
50 return oprofile_perf_init(ops);
51}
52
53void oprofile_arch_exit(void)
54{
55 oprofile_perf_exit();
56 kfree(metag_pmu_op_name);
57}
58#else
59int __init oprofile_arch_init(struct oprofile_operations *ops)
60{
61 ops->backtrace = metag_backtrace;
62 /* fall back to timer interrupt PC sampling */
63 return -ENODEV;
64}
65void oprofile_arch_exit(void) {}
66#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 1323fa2530eb..54237af0b07c 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -26,6 +26,7 @@ config MICROBLAZE
26 select GENERIC_CPU_DEVICES 26 select GENERIC_CPU_DEVICES
27 select GENERIC_ATOMIC64 27 select GENERIC_ATOMIC64
28 select GENERIC_CLOCKEVENTS 28 select GENERIC_CLOCKEVENTS
29 select GENERIC_IDLE_POLL_SETUP
29 select MODULES_USE_ELF_RELA 30 select MODULES_USE_ELF_RELA
30 select CLONE_BACKWARDS 31 select CLONE_BACKWARDS
31 32
@@ -38,9 +39,6 @@ config RWSEM_GENERIC_SPINLOCK
38config ZONE_DMA 39config ZONE_DMA
39 def_bool y 40 def_bool y
40 41
41config ARCH_POPULATES_NODE_MAP
42 def_bool y
43
44config RWSEM_XCHGADD_ALGORITHM 42config RWSEM_XCHGADD_ALGORITHM
45 bool 43 bool
46 44
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 0759153e8117..d6e0ffea28b6 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -22,7 +22,6 @@
22extern const struct seq_operations cpuinfo_op; 22extern const struct seq_operations cpuinfo_op;
23 23
24# define cpu_relax() barrier() 24# define cpu_relax() barrier()
25# define cpu_sleep() do {} while (0)
26 25
27#define task_pt_regs(tsk) \ 26#define task_pt_regs(tsk) \
28 (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) 27 (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
@@ -160,10 +159,6 @@ unsigned long get_wchan(struct task_struct *p);
160# define STACK_TOP TASK_SIZE 159# define STACK_TOP TASK_SIZE
161# define STACK_TOP_MAX STACK_TOP 160# define STACK_TOP_MAX STACK_TOP
162 161
163void disable_hlt(void);
164void enable_hlt(void);
165void default_idle(void);
166
167#ifdef CONFIG_DEBUG_FS 162#ifdef CONFIG_DEBUG_FS
168extern struct dentry *of_debugfs_root; 163extern struct dentry *of_debugfs_root;
169#endif 164#endif
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 0e0b0a5ec756..f05df5630c84 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -46,7 +46,6 @@ void machine_shutdown(void);
46void machine_halt(void); 46void machine_halt(void);
47void machine_power_off(void); 47void machine_power_off(void);
48 48
49void free_init_pages(char *what, unsigned long begin, unsigned long end);
50extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); 49extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
51extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); 50extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
52 51
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 008f30433d22..de26ea6373de 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -182,7 +182,6 @@ static inline bool test_and_clear_restore_sigmask(void)
182 ti->status &= ~TS_RESTORE_SIGMASK; 182 ti->status &= ~TS_RESTORE_SIGMASK;
183 return true; 183 return true;
184} 184}
185#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
186#endif 185#endif
187 186
188#endif /* __KERNEL__ */ 187#endif /* __KERNEL__ */
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index b3778391d9cc..6dece2d002dc 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -37,13 +37,5 @@
37#define __ARCH_WANT_SYS_VFORK 37#define __ARCH_WANT_SYS_VFORK
38#define __ARCH_WANT_SYS_FORK 38#define __ARCH_WANT_SYS_FORK
39 39
40/*
41 * "Conditional" syscalls
42 *
43 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
44 * but it doesn't work on all toolchains, so we just do it by hand
45 */
46#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
47
48#endif /* __ASSEMBLY__ */ 40#endif /* __ASSEMBLY__ */
49#endif /* _ASM_MICROBLAZE_UNISTD_H */ 41#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index 60dcacc68038..365f2d53f1b2 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -21,7 +21,6 @@
21#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/prom.h> 22#include <asm/prom.h>
23 23
24static u32 early_console_initialized;
25static u32 base_addr; 24static u32 base_addr;
26 25
27#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE 26#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
@@ -109,27 +108,11 @@ static struct console early_serial_uart16550_console = {
109}; 108};
110#endif /* CONFIG_SERIAL_8250_CONSOLE */ 109#endif /* CONFIG_SERIAL_8250_CONSOLE */
111 110
112static struct console *early_console;
113
114void early_printk(const char *fmt, ...)
115{
116 char buf[512];
117 int n;
118 va_list ap;
119
120 if (early_console_initialized) {
121 va_start(ap, fmt);
122 n = vscnprintf(buf, 512, fmt, ap);
123 early_console->write(early_console, buf, n);
124 va_end(ap);
125 }
126}
127
128int __init setup_early_printk(char *opt) 111int __init setup_early_printk(char *opt)
129{ 112{
130 int version = 0; 113 int version = 0;
131 114
132 if (early_console_initialized) 115 if (early_console)
133 return 1; 116 return 1;
134 117
135 base_addr = of_early_console(&version); 118 base_addr = of_early_console(&version);
@@ -159,7 +142,6 @@ int __init setup_early_printk(char *opt)
159 } 142 }
160 143
161 register_console(early_console); 144 register_console(early_console);
162 early_console_initialized = 1;
163 return 0; 145 return 0;
164 } 146 }
165 return 1; 147 return 1;
@@ -169,7 +151,7 @@ int __init setup_early_printk(char *opt)
169 * only for early console because of performance degression */ 151 * only for early console because of performance degression */
170void __init remap_early_printk(void) 152void __init remap_early_printk(void)
171{ 153{
172 if (!early_console_initialized || !early_console) 154 if (!early_console)
173 return; 155 return;
174 pr_info("early_printk_console remapping from 0x%x to ", base_addr); 156 pr_info("early_printk_console remapping from 0x%x to ", base_addr);
175 base_addr = (u32) ioremap(base_addr, PAGE_SIZE); 157 base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
@@ -194,9 +176,9 @@ void __init remap_early_printk(void)
194 176
195void __init disable_early_printk(void) 177void __init disable_early_printk(void)
196{ 178{
197 if (!early_console_initialized || !early_console) 179 if (!early_console)
198 return; 180 return;
199 pr_warn("disabling early console\n"); 181 pr_warn("disabling early console\n");
200 unregister_console(early_console); 182 unregister_console(early_console);
201 early_console_initialized = 0; 183 early_console = NULL;
202} 184}
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index fa0ea609137c..a55893807274 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -20,6 +20,8 @@
20 20
21void show_regs(struct pt_regs *regs) 21void show_regs(struct pt_regs *regs)
22{ 22{
23 show_regs_print_info(KERN_INFO);
24
23 pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode); 25 pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode);
24 pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n", 26 pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
25 regs->r1, regs->r2, regs->r3, regs->r4); 27 regs->r1, regs->r2, regs->r3, regs->r4);
@@ -44,71 +46,6 @@ void show_regs(struct pt_regs *regs)
44void (*pm_power_off)(void) = NULL; 46void (*pm_power_off)(void) = NULL;
45EXPORT_SYMBOL(pm_power_off); 47EXPORT_SYMBOL(pm_power_off);
46 48
47static int hlt_counter = 1;
48
49void disable_hlt(void)
50{
51 hlt_counter++;
52}
53EXPORT_SYMBOL(disable_hlt);
54
55void enable_hlt(void)
56{
57 hlt_counter--;
58}
59EXPORT_SYMBOL(enable_hlt);
60
61static int __init nohlt_setup(char *__unused)
62{
63 hlt_counter = 1;
64 return 1;
65}
66__setup("nohlt", nohlt_setup);
67
68static int __init hlt_setup(char *__unused)
69{
70 hlt_counter = 0;
71 return 1;
72}
73__setup("hlt", hlt_setup);
74
75void default_idle(void)
76{
77 if (likely(hlt_counter)) {
78 local_irq_disable();
79 stop_critical_timings();
80 cpu_relax();
81 start_critical_timings();
82 local_irq_enable();
83 } else {
84 clear_thread_flag(TIF_POLLING_NRFLAG);
85 smp_mb__after_clear_bit();
86 local_irq_disable();
87 while (!need_resched())
88 cpu_sleep();
89 local_irq_enable();
90 set_thread_flag(TIF_POLLING_NRFLAG);
91 }
92}
93
94void cpu_idle(void)
95{
96 set_thread_flag(TIF_POLLING_NRFLAG);
97
98 /* endless idle loop with no priority at all */
99 while (1) {
100 tick_nohz_idle_enter();
101 rcu_idle_enter();
102 while (!need_resched())
103 default_idle();
104 rcu_idle_exit();
105 tick_nohz_idle_exit();
106
107 schedule_preempt_disabled();
108 check_pgt_cache();
109 }
110}
111
112void flush_thread(void) 49void flush_thread(void)
113{ 50{
114} 51}
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index 30e6b5004a6a..cb619533a192 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -75,9 +75,3 @@ void show_stack(struct task_struct *task, unsigned long *sp)
75 75
76 debug_show_held_locks(task); 76 debug_show_held_locks(task);
77} 77}
78
79void dump_stack(void)
80{
81 show_stack(NULL, NULL);
82}
83EXPORT_SYMBOL(dump_stack);
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 8f8b367c079e..4ec137d13ad7 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -82,13 +82,9 @@ static unsigned long highmem_setup(void)
82 /* FIXME not sure about */ 82 /* FIXME not sure about */
83 if (memblock_is_reserved(pfn << PAGE_SHIFT)) 83 if (memblock_is_reserved(pfn << PAGE_SHIFT))
84 continue; 84 continue;
85 ClearPageReserved(page); 85 free_highmem_page(page);
86 init_page_count(page);
87 __free_page(page);
88 totalhigh_pages++;
89 reservedpages++; 86 reservedpages++;
90 } 87 }
91 totalram_pages += totalhigh_pages;
92 pr_info("High memory: %luk\n", 88 pr_info("High memory: %luk\n",
93 totalhigh_pages << (PAGE_SHIFT-10)); 89 totalhigh_pages << (PAGE_SHIFT-10));
94 90
@@ -236,40 +232,16 @@ void __init setup_memory(void)
236 paging_init(); 232 paging_init();
237} 233}
238 234
239void free_init_pages(char *what, unsigned long begin, unsigned long end)
240{
241 unsigned long addr;
242
243 for (addr = begin; addr < end; addr += PAGE_SIZE) {
244 ClearPageReserved(virt_to_page(addr));
245 init_page_count(virt_to_page(addr));
246 free_page(addr);
247 totalram_pages++;
248 }
249 pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
250}
251
252#ifdef CONFIG_BLK_DEV_INITRD 235#ifdef CONFIG_BLK_DEV_INITRD
253void free_initrd_mem(unsigned long start, unsigned long end) 236void free_initrd_mem(unsigned long start, unsigned long end)
254{ 237{
255 int pages = 0; 238 free_reserved_area(start, end, 0, "initrd");
256 for (; start < end; start += PAGE_SIZE) {
257 ClearPageReserved(virt_to_page(start));
258 init_page_count(virt_to_page(start));
259 free_page(start);
260 totalram_pages++;
261 pages++;
262 }
263 pr_notice("Freeing initrd memory: %dk freed\n",
264 (int)(pages * (PAGE_SIZE / 1024)));
265} 239}
266#endif 240#endif
267 241
268void free_initmem(void) 242void free_initmem(void)
269{ 243{
270 free_init_pages("unused kernel memory", 244 free_initmem_default(0);
271 (unsigned long)(&__init_begin),
272 (unsigned long)(&__init_end));
273} 245}
274 246
275void __init mem_init(void) 247void __init mem_init(void)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 51244bf97271..e5f3794744f1 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -404,6 +404,8 @@ config PMC_MSP
404 select IRQ_CPU 404 select IRQ_CPU
405 select SERIAL_8250 405 select SERIAL_8250
406 select SERIAL_8250_CONSOLE 406 select SERIAL_8250_CONSOLE
407 select USB_EHCI_BIG_ENDIAN_MMIO
408 select USB_EHCI_BIG_ENDIAN_DESC
407 help 409 help
408 This adds support for the PMC-Sierra family of Multi-Service 410 This adds support for the PMC-Sierra family of Multi-Service
409 Processor System-On-A-Chips. These parts include a number 411 Processor System-On-A-Chips. These parts include a number
@@ -1433,6 +1435,7 @@ config CPU_CAVIUM_OCTEON
1433 select CPU_SUPPORTS_HUGEPAGES 1435 select CPU_SUPPORTS_HUGEPAGES
1434 select LIBFDT 1436 select LIBFDT
1435 select USE_OF 1437 select USE_OF
1438 select USB_EHCI_BIG_ENDIAN_MMIO
1436 help 1439 help
1437 The Cavium Octeon processor is a highly integrated chip containing 1440 The Cavium Octeon processor is a highly integrated chip containing
1438 many ethernet hardware widgets for networking tasks. The processor 1441 many ethernet hardware widgets for networking tasks. The processor
@@ -1736,7 +1739,6 @@ config 32BIT
1736config 64BIT 1739config 64BIT
1737 bool "64-bit kernel" 1740 bool "64-bit kernel"
1738 depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL 1741 depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL
1739 select HAVE_SYSCALL_WRAPPERS
1740 help 1742 help
1741 Select this option if you want to build a 64-bit kernel. 1743 Select this option if you want to build a 64-bit kernel.
1742 1744
@@ -2538,7 +2540,14 @@ source "kernel/power/Kconfig"
2538 2540
2539endmenu 2541endmenu
2540 2542
2541source "arch/mips/kernel/cpufreq/Kconfig" 2543config MIPS_EXTERNAL_TIMER
2544 bool
2545
2546if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
2547menu "CPU Power Management"
2548source "drivers/cpufreq/Kconfig"
2549endmenu
2550endif
2542 2551
2543source "net/Kconfig" 2552source "net/Kconfig"
2544 2553
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index f1c9c3e2f678..e97fd60e92ef 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -85,20 +85,9 @@ static struct platform_device bcm63xx_spi_device = {
85 85
86int __init bcm63xx_spi_register(void) 86int __init bcm63xx_spi_register(void)
87{ 87{
88 struct clk *periph_clk;
89
90 if (BCMCPU_IS_6328() || BCMCPU_IS_6345()) 88 if (BCMCPU_IS_6328() || BCMCPU_IS_6345())
91 return -ENODEV; 89 return -ENODEV;
92 90
93 periph_clk = clk_get(NULL, "periph");
94 if (IS_ERR(periph_clk)) {
95 pr_err("unable to get periph clock\n");
96 return -ENODEV;
97 }
98
99 /* Set bus frequency */
100 spi_pdata.speed_hz = clk_get_rate(periph_clk);
101
102 spi_resources[0].start = bcm63xx_regset_address(RSET_SPI); 91 spi_resources[0].start = bcm63xx_regset_address(RSET_SPI);
103 spi_resources[0].end = spi_resources[0].start; 92 spi_resources[0].end = spi_resources[0].start;
104 spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI); 93 spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index ef99db994c2f..fe0d15d32660 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -10,6 +10,7 @@
10#define __ASM_HUGETLB_H 10#define __ASM_HUGETLB_H
11 11
12#include <asm/page.h> 12#include <asm/page.h>
13#include <asm-generic/hugetlb.h>
13 14
14 15
15static inline int is_hugepage_only_range(struct mm_struct *mm, 16static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/mips/include/asm/linkage.h b/arch/mips/include/asm/linkage.h
index e9a940d1b0c6..2767dda9e309 100644
--- a/arch/mips/include/asm/linkage.h
+++ b/arch/mips/include/asm/linkage.h
@@ -6,5 +6,8 @@
6#endif 6#endif
7 7
8#define __weak __attribute__((weak)) 8#define __weak __attribute__((weak))
9#define cond_syscall(x) asm(".weak\t" #x "\n" #x "\t=\tsys_ni_syscall")
10#define SYSCALL_ALIAS(alias, name) \
11 asm ( #alias " = " #name "\n\t.globl " #alias)
9 12
10#endif 13#endif
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index c9bae1362606..b0184cf02575 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -13,7 +13,6 @@ struct bcm63xx_spi_pdata {
13 unsigned int msg_ctl_width; 13 unsigned int msg_ctl_width;
14 int bus_num; 14 int bus_num;
15 int num_chipselect; 15 int num_chipselect;
16 u32 speed_hz;
17}; 16};
18 17
19enum bcm63xx_regs_spi { 18enum bcm63xx_regs_spi {
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 64f661e32879..63c9c886173a 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -63,12 +63,4 @@
63 63
64#endif /* !__ASSEMBLY__ */ 64#endif /* !__ASSEMBLY__ */
65 65
66/*
67 * "Conditional" syscalls
68 *
69 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
70 * but it doesn't work on all toolchains, so we just do it by hand
71 */
72#define cond_syscall(x) asm(".weak\t" #x "\n" #x "\t=\tsys_ni_syscall")
73
74#endif /* _ASM_UNISTD_H */ 66#endif /* _ASM_UNISTD_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index de75fb50562b..520a908d45d6 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -92,8 +92,6 @@ CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/n
92 92
93obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o 93obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
94 94
95obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/
96
97obj-$(CONFIG_PERF_EVENTS) += perf_event.o 95obj-$(CONFIG_PERF_EVENTS) += perf_event.o
98obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o 96obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
99 97
diff --git a/arch/mips/kernel/cpufreq/Kconfig b/arch/mips/kernel/cpufreq/Kconfig
deleted file mode 100644
index 58c601eee6fd..000000000000
--- a/arch/mips/kernel/cpufreq/Kconfig
+++ /dev/null
@@ -1,41 +0,0 @@
1#
2# CPU Frequency scaling
3#
4
5config MIPS_EXTERNAL_TIMER
6 bool
7
8config MIPS_CPUFREQ
9 bool
10 default y
11 depends on CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
12
13if MIPS_CPUFREQ
14
15menu "CPU Frequency scaling"
16
17source "drivers/cpufreq/Kconfig"
18
19if CPU_FREQ
20
21comment "CPUFreq processor drivers"
22
23config LOONGSON2_CPUFREQ
24 tristate "Loongson2 CPUFreq Driver"
25 select CPU_FREQ_TABLE
26 depends on MIPS_CPUFREQ
27 help
28 This option adds a CPUFreq driver for loongson processors which
29 support software configurable cpu frequency.
30
31 Loongson2F and it's successors support this feature.
32
33 For details, take a look at <file:Documentation/cpu-freq/>.
34
35 If in doubt, say N.
36
37endif # CPU_FREQ
38
39endmenu
40
41endif # MIPS_CPUFREQ
diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile
deleted file mode 100644
index 05a5715ee38c..000000000000
--- a/arch/mips/kernel/cpufreq/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the Linux/MIPS cpufreq.
3#
4
5obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
deleted file mode 100644
index 3237c5235f9c..000000000000
--- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
+++ /dev/null
@@ -1,255 +0,0 @@
1/*
2 * Cpufreq driver for the loongson-2 processors
3 *
4 * The 2E revision of loongson processor not support this feature.
5 *
6 * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
7 * Author: Yanhua, yanh@lemote.com
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/cpufreq.h>
14#include <linux/module.h>
15#include <linux/err.h>
16#include <linux/sched.h> /* set_cpus_allowed() */
17#include <linux/delay.h>
18#include <linux/platform_device.h>
19
20#include <asm/clock.h>
21
22#include <asm/mach-loongson/loongson.h>
23
24static uint nowait;
25
26static struct clk *cpuclk;
27
28static void (*saved_cpu_wait) (void);
29
30static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
31 unsigned long val, void *data);
32
33static struct notifier_block loongson2_cpufreq_notifier_block = {
34 .notifier_call = loongson2_cpu_freq_notifier
35};
36
37static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
38 unsigned long val, void *data)
39{
40 if (val == CPUFREQ_POSTCHANGE)
41 current_cpu_data.udelay_val = loops_per_jiffy;
42
43 return 0;
44}
45
46static unsigned int loongson2_cpufreq_get(unsigned int cpu)
47{
48 return clk_get_rate(cpuclk);
49}
50
51/*
52 * Here we notify other drivers of the proposed change and the final change.
53 */
54static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
55 unsigned int target_freq,
56 unsigned int relation)
57{
58 unsigned int cpu = policy->cpu;
59 unsigned int newstate = 0;
60 cpumask_t cpus_allowed;
61 struct cpufreq_freqs freqs;
62 unsigned int freq;
63
64 if (!cpu_online(cpu))
65 return -ENODEV;
66
67 cpus_allowed = current->cpus_allowed;
68 set_cpus_allowed_ptr(current, cpumask_of(cpu));
69
70 if (cpufreq_frequency_table_target
71 (policy, &loongson2_clockmod_table[0], target_freq, relation,
72 &newstate))
73 return -EINVAL;
74
75 freq =
76 ((cpu_clock_freq / 1000) *
77 loongson2_clockmod_table[newstate].index) / 8;
78 if (freq < policy->min || freq > policy->max)
79 return -EINVAL;
80
81 pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
82
83 freqs.cpu = cpu;
84 freqs.old = loongson2_cpufreq_get(cpu);
85 freqs.new = freq;
86 freqs.flags = 0;
87
88 if (freqs.new == freqs.old)
89 return 0;
90
91 /* notifiers */
92 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
93
94 set_cpus_allowed_ptr(current, &cpus_allowed);
95
96 /* setting the cpu frequency */
97 clk_set_rate(cpuclk, freq);
98
99 /* notifiers */
100 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
101
102 pr_debug("cpufreq: set frequency %u kHz\n", freq);
103
104 return 0;
105}
106
107static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
108{
109 int i;
110 unsigned long rate;
111 int ret;
112
113 if (!cpu_online(policy->cpu))
114 return -ENODEV;
115
116 cpuclk = clk_get(NULL, "cpu_clk");
117 if (IS_ERR(cpuclk)) {
118 printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
119 return PTR_ERR(cpuclk);
120 }
121
122 rate = cpu_clock_freq / 1000;
123 if (!rate) {
124 clk_put(cpuclk);
125 return -EINVAL;
126 }
127 ret = clk_set_rate(cpuclk, rate);
128 if (ret) {
129 clk_put(cpuclk);
130 return ret;
131 }
132
133 /* clock table init */
134 for (i = 2;
135 (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
136 i++)
137 loongson2_clockmod_table[i].frequency = (rate * i) / 8;
138
139 policy->cur = loongson2_cpufreq_get(policy->cpu);
140
141 cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
142 policy->cpu);
143
144 return cpufreq_frequency_table_cpuinfo(policy,
145 &loongson2_clockmod_table[0]);
146}
147
148static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
149{
150 return cpufreq_frequency_table_verify(policy,
151 &loongson2_clockmod_table[0]);
152}
153
154static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
155{
156 clk_put(cpuclk);
157 return 0;
158}
159
160static struct freq_attr *loongson2_table_attr[] = {
161 &cpufreq_freq_attr_scaling_available_freqs,
162 NULL,
163};
164
165static struct cpufreq_driver loongson2_cpufreq_driver = {
166 .owner = THIS_MODULE,
167 .name = "loongson2",
168 .init = loongson2_cpufreq_cpu_init,
169 .verify = loongson2_cpufreq_verify,
170 .target = loongson2_cpufreq_target,
171 .get = loongson2_cpufreq_get,
172 .exit = loongson2_cpufreq_exit,
173 .attr = loongson2_table_attr,
174};
175
176static struct platform_device_id platform_device_ids[] = {
177 {
178 .name = "loongson2_cpufreq",
179 },
180 {}
181};
182
183MODULE_DEVICE_TABLE(platform, platform_device_ids);
184
185static struct platform_driver platform_driver = {
186 .driver = {
187 .name = "loongson2_cpufreq",
188 .owner = THIS_MODULE,
189 },
190 .id_table = platform_device_ids,
191};
192
193/*
194 * This is the simple version of Loongson-2 wait, Maybe we need do this in
195 * interrupt disabled context.
196 */
197
198static DEFINE_SPINLOCK(loongson2_wait_lock);
199
200static void loongson2_cpu_wait(void)
201{
202 unsigned long flags;
203 u32 cpu_freq;
204
205 spin_lock_irqsave(&loongson2_wait_lock, flags);
206 cpu_freq = LOONGSON_CHIPCFG0;
207 LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
208 LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
209 spin_unlock_irqrestore(&loongson2_wait_lock, flags);
210}
211
212static int __init cpufreq_init(void)
213{
214 int ret;
215
216 /* Register platform stuff */
217 ret = platform_driver_register(&platform_driver);
218 if (ret)
219 return ret;
220
221 pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
222
223 cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
224 CPUFREQ_TRANSITION_NOTIFIER);
225
226 ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
227
228 if (!ret && !nowait) {
229 saved_cpu_wait = cpu_wait;
230 cpu_wait = loongson2_cpu_wait;
231 }
232
233 return ret;
234}
235
236static void __exit cpufreq_exit(void)
237{
238 if (!nowait && saved_cpu_wait)
239 cpu_wait = saved_cpu_wait;
240 cpufreq_unregister_driver(&loongson2_cpufreq_driver);
241 cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block,
242 CPUFREQ_TRANSITION_NOTIFIER);
243
244 platform_driver_unregister(&platform_driver);
245}
246
247module_init(cpufreq_init);
248module_exit(cpufreq_exit);
249
250module_param(nowait, uint, 0644);
251MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait");
252
253MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
254MODULE_DESCRIPTION("cpufreq driver for Loongson2F");
255MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
index 9e6440eaa455..505cb77d1280 100644
--- a/arch/mips/kernel/early_printk.c
+++ b/arch/mips/kernel/early_printk.c
@@ -7,7 +7,9 @@
7 * Copyright (C) 2007 MIPS Technologies, Inc. 7 * Copyright (C) 2007 MIPS Technologies, Inc.
8 * written by Ralf Baechle (ralf@linux-mips.org) 8 * written by Ralf Baechle (ralf@linux-mips.org)
9 */ 9 */
10#include <linux/kernel.h>
10#include <linux/console.h> 11#include <linux/console.h>
12#include <linux/printk.h>
11#include <linux/init.h> 13#include <linux/init.h>
12 14
13#include <asm/setup.h> 15#include <asm/setup.h>
@@ -24,20 +26,18 @@ static void early_console_write(struct console *con, const char *s, unsigned n)
24 } 26 }
25} 27}
26 28
27static struct console early_console = { 29static struct console early_console_prom = {
28 .name = "early", 30 .name = "early",
29 .write = early_console_write, 31 .write = early_console_write,
30 .flags = CON_PRINTBUFFER | CON_BOOT, 32 .flags = CON_PRINTBUFFER | CON_BOOT,
31 .index = -1 33 .index = -1
32}; 34};
33 35
34static int early_console_initialized __initdata;
35
36void __init setup_early_printk(void) 36void __init setup_early_printk(void)
37{ 37{
38 if (early_console_initialized) 38 if (early_console)
39 return; 39 return;
40 early_console_initialized = 1; 40 early_console = &early_console_prom;
41 41
42 register_console(&early_console); 42 register_console(&early_console_prom);
43} 43}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index db9655f08892..d1d576b765f5 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -119,99 +119,6 @@ SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf,
119 return sys_pwrite64(fd, buf, count, merge_64(a4, a5)); 119 return sys_pwrite64(fd, buf, count, merge_64(a4, a5));
120} 120}
121 121
122#ifdef CONFIG_SYSVIPC
123
124SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
125 unsigned long, ptr, unsigned long, fifth)
126{
127 int version, err;
128
129 version = call >> 16; /* hack for backward compatibility */
130 call &= 0xffff;
131
132 switch (call) {
133 case SEMOP:
134 /* struct sembuf is the same on 32 and 64bit :)) */
135 err = sys_semtimedop(first, compat_ptr(ptr), second, NULL);
136 break;
137 case SEMTIMEDOP:
138 err = compat_sys_semtimedop(first, compat_ptr(ptr), second,
139 compat_ptr(fifth));
140 break;
141 case SEMGET:
142 err = sys_semget(first, second, third);
143 break;
144 case SEMCTL:
145 err = compat_sys_semctl(first, second, third, compat_ptr(ptr));
146 break;
147 case MSGSND:
148 err = compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
149 break;
150 case MSGRCV:
151 err = compat_sys_msgrcv(first, second, fifth, third,
152 version, compat_ptr(ptr));
153 break;
154 case MSGGET:
155 err = sys_msgget((key_t) first, second);
156 break;
157 case MSGCTL:
158 err = compat_sys_msgctl(first, second, compat_ptr(ptr));
159 break;
160 case SHMAT:
161 err = compat_sys_shmat(first, second, third, version,
162 compat_ptr(ptr));
163 break;
164 case SHMDT:
165 err = sys_shmdt(compat_ptr(ptr));
166 break;
167 case SHMGET:
168 err = sys_shmget(first, (unsigned)second, third);
169 break;
170 case SHMCTL:
171 err = compat_sys_shmctl(first, second, compat_ptr(ptr));
172 break;
173 default:
174 err = -ENOSYS;
175 break;
176 }
177
178 return err;
179}
180
181#else
182
183SYSCALL_DEFINE6(32_ipc, u32, call, int, first, int, second, int, third,
184 u32, ptr, u32, fifth)
185{
186 return -ENOSYS;
187}
188
189#endif /* CONFIG_SYSVIPC */
190
191#ifdef CONFIG_MIPS32_N32
192SYSCALL_DEFINE4(n32_semctl, int, semid, int, semnum, int, cmd, u32, arg)
193{
194 /* compat_sys_semctl expects a pointer to union semun */
195 u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
196 if (put_user(arg, uptr))
197 return -EFAULT;
198 return compat_sys_semctl(semid, semnum, cmd, uptr);
199}
200
201SYSCALL_DEFINE4(n32_msgsnd, int, msqid, u32, msgp, unsigned int, msgsz,
202 int, msgflg)
203{
204 return compat_sys_msgsnd(msqid, msgsz, msgflg, compat_ptr(msgp));
205}
206
207SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz,
208 int, msgtyp, int, msgflg)
209{
210 return compat_sys_msgrcv(msqid, msgsz, msgtyp, msgflg, IPC_64,
211 compat_ptr(msgp));
212}
213#endif
214
215SYSCALL_DEFINE1(32_personality, unsigned long, personality) 122SYSCALL_DEFINE1(32_personality, unsigned long, personality)
216{ 123{
217 unsigned int p = personality & 0xffffffff; 124 unsigned int p = personality & 0xffffffff;
@@ -226,26 +133,6 @@ SYSCALL_DEFINE1(32_personality, unsigned long, personality)
226 return ret; 133 return ret;
227} 134}
228 135
229SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
230 compat_off_t __user *, offset, s32, count)
231{
232 mm_segment_t old_fs = get_fs();
233 int ret;
234 off_t of;
235
236 if (offset && get_user(of, offset))
237 return -EFAULT;
238
239 set_fs(KERNEL_DS);
240 ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count);
241 set_fs(old_fs);
242
243 if (offset && put_user(of, offset))
244 return -EFAULT;
245
246 return ret;
247}
248
249asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3, 136asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
250 size_t count) 137 size_t count)
251{ 138{
@@ -279,12 +166,6 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
279 merge_64(len_a4, len_a5)); 166 merge_64(len_a4, len_a5));
280} 167}
281 168
282asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
283 size_t len)
284{
285 return sys_lookup_dcookie(merge_64(a0, a1), buf, len);
286}
287
288SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, 169SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
289 u64, a3, u64, a4, int, dfd, const char __user *, pathname) 170 u64, a3, u64, a4, int, dfd, const char __user *, pathname)
290{ 171{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 3be4405c2d14..cfc742d75b7f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -41,44 +41,26 @@
41#include <asm/inst.h> 41#include <asm/inst.h>
42#include <asm/stacktrace.h> 42#include <asm/stacktrace.h>
43 43
44/* 44#ifdef CONFIG_HOTPLUG_CPU
45 * The idle thread. There's no useful work to be done, so just try to conserve 45void arch_cpu_idle_dead(void)
46 * power and have a low exit latency (ie sit in a loop waiting for somebody to
47 * say that they'd like to reschedule)
48 */
49void __noreturn cpu_idle(void)
50{ 46{
51 int cpu; 47 /* What the heck is this check doing ? */
52 48 if (!cpu_isset(smp_processor_id(), cpu_callin_map))
53 /* CPU is going idle. */ 49 play_dead();
54 cpu = smp_processor_id(); 50}
51#endif
55 52
56 /* endless idle loop with no priority at all */ 53void arch_cpu_idle(void)
57 while (1) { 54{
58 tick_nohz_idle_enter();
59 rcu_idle_enter();
60 while (!need_resched() && cpu_online(cpu)) {
61#ifdef CONFIG_MIPS_MT_SMTC 55#ifdef CONFIG_MIPS_MT_SMTC
62 extern void smtc_idle_loop_hook(void); 56 extern void smtc_idle_loop_hook(void);
63 57
64 smtc_idle_loop_hook(); 58 smtc_idle_loop_hook();
65#endif 59#endif
66 60 if (cpu_wait)
67 if (cpu_wait) { 61 (*cpu_wait)();
68 /* Don't trace irqs off for idle */ 62 else
69 stop_critical_timings(); 63 local_irq_enable();
70 (*cpu_wait)();
71 start_critical_timings();
72 }
73 }
74#ifdef CONFIG_HOTPLUG_CPU
75 if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
76 play_dead();
77#endif
78 rcu_idle_exit();
79 tick_nohz_idle_exit();
80 schedule_preempt_disabled();
81 }
82} 64}
83 65
84asmlinkage void ret_from_fork(void); 66asmlinkage void ret_from_fork(void);
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 693d60b0855f..edcb6594e7b5 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -143,7 +143,7 @@ EXPORT(sysn32_call_table)
143 PTR compat_sys_setitimer 143 PTR compat_sys_setitimer
144 PTR sys_alarm 144 PTR sys_alarm
145 PTR sys_getpid 145 PTR sys_getpid
146 PTR sys_32_sendfile 146 PTR compat_sys_sendfile
147 PTR sys_socket /* 6040 */ 147 PTR sys_socket /* 6040 */
148 PTR sys_connect 148 PTR sys_connect
149 PTR sys_accept 149 PTR sys_accept
@@ -168,11 +168,11 @@ EXPORT(sysn32_call_table)
168 PTR sys_newuname 168 PTR sys_newuname
169 PTR sys_semget 169 PTR sys_semget
170 PTR sys_semop 170 PTR sys_semop
171 PTR sys_n32_semctl 171 PTR compat_sys_semctl
172 PTR sys_shmdt /* 6065 */ 172 PTR sys_shmdt /* 6065 */
173 PTR sys_msgget 173 PTR sys_msgget
174 PTR sys_n32_msgsnd 174 PTR compat_sys_msgsnd
175 PTR sys_n32_msgrcv 175 PTR compat_sys_msgrcv
176 PTR compat_sys_msgctl 176 PTR compat_sys_msgctl
177 PTR compat_sys_fcntl /* 6070 */ 177 PTR compat_sys_fcntl /* 6070 */
178 PTR sys_flock 178 PTR sys_flock
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index af8887f779f1..103bfe570fe8 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -309,7 +309,7 @@ sys_call_table:
309 PTR compat_sys_wait4 309 PTR compat_sys_wait4
310 PTR sys_swapoff /* 4115 */ 310 PTR sys_swapoff /* 4115 */
311 PTR compat_sys_sysinfo 311 PTR compat_sys_sysinfo
312 PTR sys_32_ipc 312 PTR compat_sys_ipc
313 PTR sys_fsync 313 PTR sys_fsync
314 PTR sys32_sigreturn 314 PTR sys32_sigreturn
315 PTR __sys_clone /* 4120 */ 315 PTR __sys_clone /* 4120 */
@@ -399,7 +399,7 @@ sys_call_table:
399 PTR sys_capget 399 PTR sys_capget
400 PTR sys_capset /* 4205 */ 400 PTR sys_capset /* 4205 */
401 PTR compat_sys_sigaltstack 401 PTR compat_sys_sigaltstack
402 PTR sys_32_sendfile 402 PTR compat_sys_sendfile
403 PTR sys_ni_syscall 403 PTR sys_ni_syscall
404 PTR sys_ni_syscall 404 PTR sys_ni_syscall
405 PTR sys_mips_mmap2 /* 4210 */ 405 PTR sys_mips_mmap2 /* 4210 */
@@ -439,7 +439,7 @@ sys_call_table:
439 PTR compat_sys_io_submit 439 PTR compat_sys_io_submit
440 PTR sys_io_cancel /* 4245 */ 440 PTR sys_io_cancel /* 4245 */
441 PTR sys_exit_group 441 PTR sys_exit_group
442 PTR sys32_lookup_dcookie 442 PTR compat_sys_lookup_dcookie
443 PTR sys_epoll_create 443 PTR sys_epoll_create
444 PTR sys_epoll_ctl 444 PTR sys_epoll_ctl
445 PTR sys_epoll_wait /* 4250 */ 445 PTR sys_epoll_wait /* 4250 */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 66bf4e22d9b9..aee04af213c5 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -139,7 +139,7 @@ asmlinkage __cpuinit void start_secondary(void)
139 WARN_ON_ONCE(!irqs_disabled()); 139 WARN_ON_ONCE(!irqs_disabled());
140 mp_ops->smp_finish(); 140 mp_ops->smp_finish();
141 141
142 cpu_idle(); 142 cpu_startup_entry(CPUHP_ONLINE);
143} 143}
144 144
145/* 145/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c3abb88170fc..25225515451f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -206,19 +206,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
206 show_stacktrace(task, &regs); 206 show_stacktrace(task, &regs);
207} 207}
208 208
209/*
210 * The architecture-independent dump_stack generator
211 */
212void dump_stack(void)
213{
214 struct pt_regs regs;
215
216 prepare_frametrace(&regs);
217 show_backtrace(current, &regs);
218}
219
220EXPORT_SYMBOL(dump_stack);
221
222static void show_code(unsigned int __user *pc) 209static void show_code(unsigned int __user *pc)
223{ 210{
224 long i; 211 long i;
@@ -244,7 +231,7 @@ static void __show_regs(const struct pt_regs *regs)
244 unsigned int cause = regs->cp0_cause; 231 unsigned int cause = regs->cp0_cause;
245 int i; 232 int i;
246 233
247 printk("Cpu %d\n", smp_processor_id()); 234 show_regs_print_info(KERN_DEFAULT);
248 235
249 /* 236 /*
250 * Saved main processor registers 237 * Saved main processor registers
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 67929251286c..3d0346dbccf4 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -77,10 +77,9 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
77/* 77/*
78 * Not static inline because used by IP27 special magic initialization code 78 * Not static inline because used by IP27 special magic initialization code
79 */ 79 */
80unsigned long setup_zero_pages(void) 80void setup_zero_pages(void)
81{ 81{
82 unsigned int order; 82 unsigned int order, i;
83 unsigned long size;
84 struct page *page; 83 struct page *page;
85 84
86 if (cpu_has_vce) 85 if (cpu_has_vce)
@@ -94,15 +93,10 @@ unsigned long setup_zero_pages(void)
94 93
95 page = virt_to_page((void *)empty_zero_page); 94 page = virt_to_page((void *)empty_zero_page);
96 split_page(page, order); 95 split_page(page, order);
97 while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { 96 for (i = 0; i < (1 << order); i++, page++)
98 SetPageReserved(page); 97 mark_page_reserved(page);
99 page++;
100 }
101
102 size = PAGE_SIZE << order;
103 zero_page_mask = (size - 1) & PAGE_MASK;
104 98
105 return 1UL << order; 99 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
106} 100}
107 101
108#ifdef CONFIG_MIPS_MT_SMTC 102#ifdef CONFIG_MIPS_MT_SMTC
@@ -380,7 +374,7 @@ void __init mem_init(void)
380 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 374 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
381 375
382 totalram_pages += free_all_bootmem(); 376 totalram_pages += free_all_bootmem();
383 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 377 setup_zero_pages(); /* Setup zeroed pages. */
384 378
385 reservedpages = ram = 0; 379 reservedpages = ram = 0;
386 for (tmp = 0; tmp < max_low_pfn; tmp++) 380 for (tmp = 0; tmp < max_low_pfn; tmp++)
@@ -399,12 +393,8 @@ void __init mem_init(void)
399 SetPageReserved(page); 393 SetPageReserved(page);
400 continue; 394 continue;
401 } 395 }
402 ClearPageReserved(page); 396 free_highmem_page(page);
403 init_page_count(page);
404 __free_page(page);
405 totalhigh_pages++;
406 } 397 }
407 totalram_pages += totalhigh_pages;
408 num_physpages += totalhigh_pages; 398 num_physpages += totalhigh_pages;
409#endif 399#endif
410 400
@@ -440,11 +430,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
440 struct page *page = pfn_to_page(pfn); 430 struct page *page = pfn_to_page(pfn);
441 void *addr = phys_to_virt(PFN_PHYS(pfn)); 431 void *addr = phys_to_virt(PFN_PHYS(pfn));
442 432
443 ClearPageReserved(page);
444 init_page_count(page);
445 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); 433 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
446 __free_page(page); 434 free_reserved_page(page);
447 totalram_pages++;
448 } 435 }
449 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 436 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
450} 437}
@@ -452,18 +439,14 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
452#ifdef CONFIG_BLK_DEV_INITRD 439#ifdef CONFIG_BLK_DEV_INITRD
453void free_initrd_mem(unsigned long start, unsigned long end) 440void free_initrd_mem(unsigned long start, unsigned long end)
454{ 441{
455 free_init_pages("initrd memory", 442 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
456 virt_to_phys((void *)start),
457 virt_to_phys((void *)end));
458} 443}
459#endif 444#endif
460 445
461void __init_refok free_initmem(void) 446void __init_refok free_initmem(void)
462{ 447{
463 prom_free_prom_memory(); 448 prom_free_prom_memory();
464 free_init_pages("unused kernel memory", 449 free_initmem_default(POISON_FREE_INITMEM);
465 __pa_symbol(&__init_begin),
466 __pa_symbol(&__init_end));
467} 450}
468 451
469#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 452#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 0872f12f268d..594e60d6a43b 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -115,7 +115,6 @@ static void pcibios_scanbus(struct pci_controller *hose)
115 pci_bus_assign_resources(bus); 115 pci_bus_assign_resources(bus);
116 pci_enable_bridges(bus); 116 pci_enable_bridges(bus);
117 } 117 }
118 bus->dev.of_node = hose->of_node;
119 } 118 }
120} 119}
121 120
@@ -169,6 +168,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
169 } 168 }
170 } 169 }
171} 170}
171
172struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
173{
174 struct pci_controller *hose = bus->sysdata;
175
176 return of_node_get(hose->of_node);
177}
172#endif 178#endif
173 179
174static DEFINE_MUTEX(pci_scan_mutex); 180static DEFINE_MUTEX(pci_scan_mutex);
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 3505d08ff2fd..5f2bddb1860e 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -457,7 +457,7 @@ void __init prom_free_prom_memory(void)
457 /* We got nothing to free here ... */ 457 /* We got nothing to free here ... */
458} 458}
459 459
460extern unsigned long setup_zero_pages(void); 460extern void setup_zero_pages(void);
461 461
462void __init paging_init(void) 462void __init paging_init(void)
463{ 463{
@@ -492,7 +492,7 @@ void __init mem_init(void)
492 totalram_pages += free_all_bootmem_node(NODE_DATA(node)); 492 totalram_pages += free_all_bootmem_node(NODE_DATA(node));
493 } 493 }
494 494
495 totalram_pages -= setup_zero_pages(); /* This comes from node 0 */ 495 setup_zero_pages(); /* This comes from node 0 */
496 496
497 codesize = (unsigned long) &_etext - (unsigned long) &_text; 497 codesize = (unsigned long) &_etext - (unsigned long) &_text;
498 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 498 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index f90062b0622d..224b4262486d 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -165,8 +165,6 @@ void arch_release_thread_info(struct thread_info *ti);
165#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 165#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
166#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 166#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
167 167
168#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
169
170#endif /* __KERNEL__ */ 168#endif /* __KERNEL__ */
171 169
172#endif /* _ASM_THREAD_INFO_H */ 170#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index 7f9d9adfa51e..9d4e2d1ef90e 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -45,14 +45,4 @@
45#define __ARCH_WANT_SYS_VFORK 45#define __ARCH_WANT_SYS_VFORK
46#define __ARCH_WANT_SYS_CLONE 46#define __ARCH_WANT_SYS_CLONE
47 47
48/*
49 * "Conditional" syscalls
50 *
51 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
52 * but it doesn't work on all toolchains, so we just do it by hand
53 */
54#ifndef cond_syscall
55#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
56#endif
57
58#endif /* _ASM_UNISTD_H */ 48#endif /* _ASM_UNISTD_H */
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index 84f4e97e3074..3707da583d05 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -50,77 +50,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
50void (*pm_power_off)(void); 50void (*pm_power_off)(void);
51EXPORT_SYMBOL(pm_power_off); 51EXPORT_SYMBOL(pm_power_off);
52 52
53#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
54/*
55 * we use this if we don't have any better idle routine
56 */
57static void default_idle(void)
58{
59 local_irq_disable();
60 if (!need_resched())
61 safe_halt();
62 else
63 local_irq_enable();
64}
65
66#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
67/* 53/*
68 * On SMP it's slightly faster (but much more power-consuming!) 54 * On SMP it's slightly faster (but much more power-consuming!)
69 * to poll the ->work.need_resched flag instead of waiting for the 55 * to poll the ->work.need_resched flag instead of waiting for the
70 * cross-CPU IPI to arrive. Use this option with caution. 56 * cross-CPU IPI to arrive. Use this option with caution.
57 *
58 * tglx: No idea why this depends on HOTPLUG_CPU !?!
71 */ 59 */
72static inline void poll_idle(void) 60#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
73{ 61void arch_cpu_idle(void)
74 int oldval;
75
76 local_irq_enable();
77
78 /*
79 * Deal with another CPU just having chosen a thread to
80 * run here:
81 */
82 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
83
84 if (!oldval) {
85 set_thread_flag(TIF_POLLING_NRFLAG);
86 while (!need_resched())
87 cpu_relax();
88 clear_thread_flag(TIF_POLLING_NRFLAG);
89 } else {
90 set_need_resched();
91 }
92}
93#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
94
95/*
96 * the idle thread
97 * - there's no useful work to be done, so just try to conserve power and have
98 * a low exit latency (ie sit in a loop waiting for somebody to say that
99 * they'd like to reschedule)
100 */
101void cpu_idle(void)
102{ 62{
103 /* endless idle loop with no priority at all */ 63 safe_halt();
104 for (;;) {
105 rcu_idle_enter();
106 while (!need_resched()) {
107 void (*idle)(void);
108
109 smp_rmb();
110 if (!idle) {
111#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
112 idle = poll_idle;
113#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
114 idle = default_idle;
115#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
116 }
117 idle();
118 }
119 rcu_idle_exit();
120
121 schedule_preempt_disabled();
122 }
123} 64}
65#endif
124 66
125void release_segments(struct mm_struct *mm) 67void release_segments(struct mm_struct *mm)
126{ 68{
@@ -155,6 +97,7 @@ void machine_power_off(void)
155 97
156void show_regs(struct pt_regs *regs) 98void show_regs(struct pt_regs *regs)
157{ 99{
100 show_regs_print_info(KERN_DEFAULT);
158} 101}
159 102
160/* 103/*
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 5d7e152a23b7..a17f9c9c14c9 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -675,7 +675,7 @@ int __init start_secondary(void *unused)
675#ifdef CONFIG_GENERIC_CLOCKEVENTS 675#ifdef CONFIG_GENERIC_CLOCKEVENTS
676 init_clockevents(); 676 init_clockevents();
677#endif 677#endif
678 cpu_idle(); 678 cpu_startup_entry(CPUHP_ONLINE);
679 return 0; 679 return 0;
680} 680}
681 681
@@ -935,8 +935,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
935 int timeout; 935 int timeout;
936 936
937#ifdef CONFIG_HOTPLUG_CPU 937#ifdef CONFIG_HOTPLUG_CPU
938 if (num_online_cpus() == 1)
939 disable_hlt();
940 if (sleep_mode[cpu]) 938 if (sleep_mode[cpu])
941 run_wakeup_cpu(cpu); 939 run_wakeup_cpu(cpu);
942#endif /* CONFIG_HOTPLUG_CPU */ 940#endif /* CONFIG_HOTPLUG_CPU */
@@ -1003,9 +1001,6 @@ int __cpu_disable(void)
1003void __cpu_die(unsigned int cpu) 1001void __cpu_die(unsigned int cpu)
1004{ 1002{
1005 run_sleep_cpu(cpu); 1003 run_sleep_cpu(cpu);
1006
1007 if (num_online_cpus() == 1)
1008 enable_hlt();
1009} 1004}
1010 1005
1011#ifdef CONFIG_MN10300_CACHE_ENABLED 1006#ifdef CONFIG_MN10300_CACHE_ENABLED
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index b900e5afa0ae..a7a987c7954f 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -294,17 +294,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
294} 294}
295 295
296/* 296/*
297 * the architecture-independent dump_stack generator
298 */
299void dump_stack(void)
300{
301 unsigned long stack;
302
303 show_stack(current, &stack);
304}
305EXPORT_SYMBOL(dump_stack);
306
307/*
308 * dump the register file in the specified exception frame 297 * dump the register file in the specified exception frame
309 */ 298 */
310void show_registers_only(struct pt_regs *regs) 299void show_registers_only(struct pt_regs *regs)
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index e57e5bc23562..5a8ace63a6b4 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -139,30 +139,11 @@ void __init mem_init(void)
139} 139}
140 140
141/* 141/*
142 *
143 */
144void free_init_pages(char *what, unsigned long begin, unsigned long end)
145{
146 unsigned long addr;
147
148 for (addr = begin; addr < end; addr += PAGE_SIZE) {
149 ClearPageReserved(virt_to_page(addr));
150 init_page_count(virt_to_page(addr));
151 memset((void *) addr, 0xcc, PAGE_SIZE);
152 free_page(addr);
153 totalram_pages++;
154 }
155 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
156}
157
158/*
159 * recycle memory containing stuff only required for initialisation 142 * recycle memory containing stuff only required for initialisation
160 */ 143 */
161void free_initmem(void) 144void free_initmem(void)
162{ 145{
163 free_init_pages("unused kernel memory", 146 free_initmem_default(POISON_FREE_INITMEM);
164 (unsigned long) &__init_begin,
165 (unsigned long) &__init_end);
166} 147}
167 148
168/* 149/*
@@ -171,6 +152,6 @@ void free_initmem(void)
171#ifdef CONFIG_BLK_DEV_INITRD 152#ifdef CONFIG_BLK_DEV_INITRD
172void free_initrd_mem(unsigned long start, unsigned long end) 153void free_initrd_mem(unsigned long start, unsigned long end)
173{ 154{
174 free_init_pages("initrd memory", start, end); 155 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
175} 156}
176#endif 157#endif
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
index 07f3212422ad..d797acc901e4 100644
--- a/arch/openrisc/include/asm/thread_info.h
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -128,8 +128,6 @@ register struct thread_info *current_thread_info_reg asm("r10");
128/* For OpenRISC, this is anything in the LSW other than syscall trace */ 128/* For OpenRISC, this is anything in the LSW other than syscall trace */
129#define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP)) 129#define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP))
130 130
131#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
132
133#endif /* __KERNEL__ */ 131#endif /* __KERNEL__ */
134 132
135#endif /* _ASM_THREAD_INFO_H */ 133#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile
index 35f92ce51c24..ec6d9d37cefd 100644
--- a/arch/openrisc/kernel/Makefile
+++ b/arch/openrisc/kernel/Makefile
@@ -4,7 +4,7 @@
4 4
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6 6
7obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \ 7obj-y := setup.o or32_ksyms.o process.o dma.o \
8 traps.o time.o irq.o entry.o ptrace.o signal.o \ 8 traps.o time.o irq.o entry.o ptrace.o signal.o \
9 sys_call_table.o 9 sys_call_table.o
10 10
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
deleted file mode 100644
index 5e8a3b6d6bc6..000000000000
--- a/arch/openrisc/kernel/idle.c
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * OpenRISC idle.c
3 *
4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
6 * declaration.
7 *
8 * Modifications for the OpenRISC architecture:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * Idle daemon for or32. Idle daemon will handle any action
18 * that needs to be taken when the system becomes idle.
19 */
20
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/stddef.h>
27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/tick.h>
31
32#include <asm/pgtable.h>
33#include <asm/uaccess.h>
34#include <asm/io.h>
35#include <asm/processor.h>
36#include <asm/mmu.h>
37#include <asm/cache.h>
38#include <asm/pgalloc.h>
39
40void (*powersave) (void) = NULL;
41
42void cpu_idle(void)
43{
44 set_thread_flag(TIF_POLLING_NRFLAG);
45
46 /* endless idle loop with no priority at all */
47 while (1) {
48 tick_nohz_idle_enter();
49 rcu_idle_enter();
50
51 while (!need_resched()) {
52 check_pgt_cache();
53 rmb();
54
55 clear_thread_flag(TIF_POLLING_NRFLAG);
56
57 local_irq_disable();
58 /* Don't trace irqs off for idle */
59 stop_critical_timings();
60 if (!need_resched() && powersave != NULL)
61 powersave();
62 start_critical_timings();
63 local_irq_enable();
64 set_thread_flag(TIF_POLLING_NRFLAG);
65 }
66
67 rcu_idle_exit();
68 tick_nohz_idle_exit();
69 preempt_enable_no_resched();
70 schedule();
71 preempt_disable();
72 }
73}
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 00c233bf0d06..386af258591d 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -90,6 +90,7 @@ void show_regs(struct pt_regs *regs)
90{ 90{
91 extern void show_registers(struct pt_regs *regs); 91 extern void show_registers(struct pt_regs *regs);
92 92
93 show_regs_print_info(KERN_DEFAULT);
93 /* __PHX__ cleanup this mess */ 94 /* __PHX__ cleanup this mess */
94 show_registers(regs); 95 show_registers(regs);
95} 96}
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 5cce396016d0..3d3f6062f49c 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -105,17 +105,6 @@ void show_trace_task(struct task_struct *tsk)
105 */ 105 */
106} 106}
107 107
108/*
109 * The architecture-independent backtrace generator
110 */
111void dump_stack(void)
112{
113 unsigned long stack;
114
115 show_stack(current, &stack);
116}
117EXPORT_SYMBOL(dump_stack);
118
119void show_registers(struct pt_regs *regs) 108void show_registers(struct pt_regs *regs)
120{ 109{
121 int i; 110 int i;
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index e7fdc50c4bf0..b3cbc6703837 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -43,6 +43,7 @@
43#include <asm/kmap_types.h> 43#include <asm/kmap_types.h>
44#include <asm/fixmap.h> 44#include <asm/fixmap.h>
45#include <asm/tlbflush.h> 45#include <asm/tlbflush.h>
46#include <asm/sections.h>
46 47
47int mem_init_done; 48int mem_init_done;
48 49
@@ -201,9 +202,6 @@ void __init paging_init(void)
201 202
202/* References to section boundaries */ 203/* References to section boundaries */
203 204
204extern char _stext, _etext, _edata, __bss_start, _end;
205extern char __init_begin, __init_end;
206
207static int __init free_pages_init(void) 205static int __init free_pages_init(void)
208{ 206{
209 int reservedpages, pfn; 207 int reservedpages, pfn;
@@ -263,30 +261,11 @@ void __init mem_init(void)
263#ifdef CONFIG_BLK_DEV_INITRD 261#ifdef CONFIG_BLK_DEV_INITRD
264void free_initrd_mem(unsigned long start, unsigned long end) 262void free_initrd_mem(unsigned long start, unsigned long end)
265{ 263{
266 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", 264 free_reserved_area(start, end, 0, "initrd");
267 (end - start) >> 10);
268
269 for (; start < end; start += PAGE_SIZE) {
270 ClearPageReserved(virt_to_page(start));
271 init_page_count(virt_to_page(start));
272 free_page(start);
273 totalram_pages++;
274 }
275} 265}
276#endif 266#endif
277 267
278void free_initmem(void) 268void free_initmem(void)
279{ 269{
280 unsigned long addr; 270 free_initmem_default(0);
281
282 addr = (unsigned long)(&__init_begin);
283 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
284 ClearPageReserved(virt_to_page(addr));
285 init_page_count(virt_to_page(addr));
286 free_page(addr);
287 totalram_pages++;
288 }
289 printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
290 ((unsigned long)&__init_end -
291 (unsigned long)&__init_begin) >> 10);
292} 271}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 0339181bf3ac..433e75a2ee9a 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -1,5 +1,6 @@
1config PARISC 1config PARISC
2 def_bool y 2 def_bool y
3 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
3 select HAVE_IDE 4 select HAVE_IDE
4 select HAVE_OPROFILE 5 select HAVE_OPROFILE
5 select HAVE_FUNCTION_TRACER if 64BIT 6 select HAVE_FUNCTION_TRACER if 64BIT
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index 7305ac8f7f5b..bc989e522a04 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -12,18 +12,4 @@ config DEBUG_RODATA
12 portion of the kernel code won't be covered by a TLB anymore. 12 portion of the kernel code won't be covered by a TLB anymore.
13 If in doubt, say "N". 13 If in doubt, say "N".
14 14
15config DEBUG_STRICT_USER_COPY_CHECKS
16 bool "Strict copy size checks"
17 depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
18 ---help---
19 Enabling this option turns a certain set of sanity checks for user
20 copy operations into compile time failures.
21
22 The copy_from_user() etc checks are there to help test if there
23 are sufficient security checks on the length argument of
24 the copy operation, by having gcc prove that the argument is
25 within bounds.
26
27 If unsure, or if you run an older (pre 4.4) gcc, say N.
28
29endmenu 15endmenu
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index d1fb79a36f3d..6182832e5b6c 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -77,8 +77,6 @@ struct thread_info {
77#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ 77#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
78 _TIF_BLOCKSTEP) 78 _TIF_BLOCKSTEP)
79 79
80#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
81
82#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
83 81
84#endif /* _ASM_PARISC_THREAD_INFO_H */ 82#endif /* _ASM_PARISC_THREAD_INFO_H */
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index ae9a46cbfd92..74d835820ee7 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -170,12 +170,4 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
170 170
171#undef STR 171#undef STR
172 172
173/*
174 * "Conditional" syscalls
175 *
176 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
177 * but it doesn't work on all toolchains, so we just do it by hand
178 */
179#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
180
181#endif /* _ASM_PARISC_UNISTD_H_ */ 173#endif /* _ASM_PARISC_UNISTD_H_ */
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index d13507246c5d..55f92b614182 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -59,28 +59,6 @@
59#include <asm/unwind.h> 59#include <asm/unwind.h>
60#include <asm/sections.h> 60#include <asm/sections.h>
61 61
62/*
63 * The idle thread. There's no useful work to be
64 * done, so just try to conserve power and have a
65 * low exit latency (ie sit in a loop waiting for
66 * somebody to say that they'd like to reschedule)
67 */
68void cpu_idle(void)
69{
70 set_thread_flag(TIF_POLLING_NRFLAG);
71
72 /* endless idle loop with no priority at all */
73 while (1) {
74 rcu_idle_enter();
75 while (!need_resched())
76 barrier();
77 rcu_idle_exit();
78 schedule_preempt_disabled();
79 check_pgt_cache();
80 }
81}
82
83
84#define COMMAND_GLOBAL F_EXTEND(0xfffe0030) 62#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
85#define CMD_RESET 5 /* reset any module */ 63#define CMD_RESET 5 /* reset any module */
86 64
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 6266730efd61..fd1bb1519c2b 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -329,7 +329,7 @@ void __init smp_callin(void)
329 329
330 local_irq_enable(); /* Interrupts have been off until now */ 330 local_irq_enable(); /* Interrupts have been off until now */
331 331
332 cpu_idle(); /* Wait for timer to schedule some work */ 332 cpu_startup_entry(CPUHP_ONLINE);
333 333
334 /* NOTREACHED */ 334 /* NOTREACHED */
335 panic("smp_callin() AAAAaaaaahhhh....\n"); 335 panic("smp_callin() AAAAaaaaahhhh....\n");
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 051c8b90231f..f517e08e7f0d 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -60,47 +60,6 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
60 return -ENOSYS; 60 return -ENOSYS;
61} 61}
62 62
63/* Note: it is necessary to treat out_fd and in_fd as unsigned ints, with the
64 * corresponding cast to a signed int to insure that the proper conversion
65 * (sign extension) between the register representation of a signed int (msr in
66 * 32-bit mode) and the register representation of a signed int (msr in 64-bit
67 * mode) is performed.
68 */
69asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd,
70 compat_off_t __user *offset, compat_size_t count)
71{
72 return compat_sys_sendfile((int)out_fd, (int)in_fd, offset, count);
73}
74
75asmlinkage long sys32_sendfile64(u32 out_fd, u32 in_fd,
76 compat_loff_t __user *offset, compat_size_t count)
77{
78 return sys_sendfile64((int)out_fd, (int)in_fd,
79 (loff_t __user *)offset, count);
80}
81
82asmlinkage long sys32_semctl(int semid, int semnum, int cmd, union semun arg)
83{
84 union semun u;
85
86 if (cmd == SETVAL) {
87 /* Ugh. arg is a union of int,ptr,ptr,ptr, so is 8 bytes.
88 * The int should be in the first 4, but our argument
89 * frobbing has left it in the last 4.
90 */
91 u.val = *((int *)&arg + 1);
92 return sys_semctl (semid, semnum, cmd, u);
93 }
94 return sys_semctl (semid, semnum, cmd, arg);
95}
96
97long sys32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
98 size_t len)
99{
100 return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
101 buf, len);
102}
103
104asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi, 63asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
105 u32 mask_lo, int fd, 64 u32 mask_lo, int fd,
106 const char __user *pathname) 65 const char __user *pathname)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index f57dc137b8dd..0c9107285e66 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -198,7 +198,7 @@
198 ENTRY_SAME(madvise) 198 ENTRY_SAME(madvise)
199 ENTRY_SAME(clone_wrapper) /* 120 */ 199 ENTRY_SAME(clone_wrapper) /* 120 */
200 ENTRY_SAME(setdomainname) 200 ENTRY_SAME(setdomainname)
201 ENTRY_DIFF(sendfile) 201 ENTRY_COMP(sendfile)
202 /* struct sockaddr... */ 202 /* struct sockaddr... */
203 ENTRY_SAME(recvfrom) 203 ENTRY_SAME(recvfrom)
204 /* struct timex contains longs */ 204 /* struct timex contains longs */
@@ -282,7 +282,7 @@
282 ENTRY_COMP(recvmsg) 282 ENTRY_COMP(recvmsg)
283 ENTRY_SAME(semop) /* 185 */ 283 ENTRY_SAME(semop) /* 185 */
284 ENTRY_SAME(semget) 284 ENTRY_SAME(semget)
285 ENTRY_DIFF(semctl) 285 ENTRY_COMP(semctl)
286 ENTRY_COMP(msgsnd) 286 ENTRY_COMP(msgsnd)
287 ENTRY_COMP(msgrcv) 287 ENTRY_COMP(msgrcv)
288 ENTRY_SAME(msgget) /* 190 */ 288 ENTRY_SAME(msgget) /* 190 */
@@ -304,7 +304,7 @@
304 ENTRY_SAME(gettid) 304 ENTRY_SAME(gettid)
305 ENTRY_OURS(readahead) 305 ENTRY_OURS(readahead)
306 ENTRY_SAME(tkill) 306 ENTRY_SAME(tkill)
307 ENTRY_DIFF(sendfile64) 307 ENTRY_COMP(sendfile64)
308 ENTRY_COMP(futex) /* 210 */ 308 ENTRY_COMP(futex) /* 210 */
309 ENTRY_COMP(sched_setaffinity) 309 ENTRY_COMP(sched_setaffinity)
310 ENTRY_COMP(sched_getaffinity) 310 ENTRY_COMP(sched_getaffinity)
@@ -318,7 +318,7 @@
318 ENTRY_SAME(alloc_hugepages) /* 220 */ 318 ENTRY_SAME(alloc_hugepages) /* 220 */
319 ENTRY_SAME(free_hugepages) 319 ENTRY_SAME(free_hugepages)
320 ENTRY_SAME(exit_group) 320 ENTRY_SAME(exit_group)
321 ENTRY_DIFF(lookup_dcookie) 321 ENTRY_COMP(lookup_dcookie)
322 ENTRY_SAME(epoll_create) 322 ENTRY_SAME(epoll_create)
323 ENTRY_SAME(epoll_ctl) /* 225 */ 323 ENTRY_SAME(epoll_ctl) /* 225 */
324 ENTRY_SAME(epoll_wait) 324 ENTRY_SAME(epoll_wait)
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index aeb8f8f2c07a..f702bff0bed9 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -126,6 +126,8 @@ void show_regs(struct pt_regs *regs)
126 user = user_mode(regs); 126 user = user_mode(regs);
127 level = user ? KERN_DEBUG : KERN_CRIT; 127 level = user ? KERN_DEBUG : KERN_CRIT;
128 128
129 show_regs_print_info(level);
130
129 print_gr(level, regs); 131 print_gr(level, regs);
130 132
131 for (i = 0; i < 8; i += 4) 133 for (i = 0; i < 8; i += 4)
@@ -158,14 +160,6 @@ void show_regs(struct pt_regs *regs)
158 } 160 }
159} 161}
160 162
161
162void dump_stack(void)
163{
164 show_stack(NULL, NULL);
165}
166
167EXPORT_SYMBOL(dump_stack);
168
169static void do_show_stack(struct unwind_frame_info *info) 163static void do_show_stack(struct unwind_frame_info *info)
170{ 164{
171 int i = 1; 165 int i = 1;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3ac462de53a4..157b931e7b09 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -505,7 +505,6 @@ static void __init map_pages(unsigned long start_vaddr,
505 505
506void free_initmem(void) 506void free_initmem(void)
507{ 507{
508 unsigned long addr;
509 unsigned long init_begin = (unsigned long)__init_begin; 508 unsigned long init_begin = (unsigned long)__init_begin;
510 unsigned long init_end = (unsigned long)__init_end; 509 unsigned long init_end = (unsigned long)__init_end;
511 510
@@ -533,19 +532,10 @@ void free_initmem(void)
533 * pages are no-longer executable */ 532 * pages are no-longer executable */
534 flush_icache_range(init_begin, init_end); 533 flush_icache_range(init_begin, init_end);
535 534
536 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { 535 num_physpages += free_initmem_default(0);
537 ClearPageReserved(virt_to_page(addr));
538 init_page_count(virt_to_page(addr));
539 free_page(addr);
540 num_physpages++;
541 totalram_pages++;
542 }
543 536
544 /* set up a new led state on systems shipped LED State panel */ 537 /* set up a new led state on systems shipped LED State panel */
545 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 538 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
546
547 printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
548 (init_end - init_begin) >> 10);
549} 539}
550 540
551 541
@@ -697,6 +687,8 @@ void show_mem(unsigned int filter)
697 687
698 printk(KERN_INFO "Mem-info:\n"); 688 printk(KERN_INFO "Mem-info:\n");
699 show_free_areas(filter); 689 show_free_areas(filter);
690 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
691 return;
700#ifndef CONFIG_DISCONTIGMEM 692#ifndef CONFIG_DISCONTIGMEM
701 i = max_mapnr; 693 i = max_mapnr;
702 while (i-- > 0) { 694 while (i-- > 0) {
@@ -1107,15 +1099,6 @@ void flush_tlb_all(void)
1107#ifdef CONFIG_BLK_DEV_INITRD 1099#ifdef CONFIG_BLK_DEV_INITRD
1108void free_initrd_mem(unsigned long start, unsigned long end) 1100void free_initrd_mem(unsigned long start, unsigned long end)
1109{ 1101{
1110 if (start >= end) 1102 num_physpages += free_reserved_area(start, end, 0, "initrd");
1111 return;
1112 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1113 for (; start < end; start += PAGE_SIZE) {
1114 ClearPageReserved(virt_to_page(start));
1115 init_page_count(virt_to_page(start));
1116 free_page(start);
1117 num_physpages++;
1118 totalram_pages++;
1119 }
1120} 1103}
1121#endif 1104#endif
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ea5bb045983a..a0259edae5c9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -114,7 +114,6 @@ config PPC
114 select USE_GENERIC_SMP_HELPERS if SMP 114 select USE_GENERIC_SMP_HELPERS if SMP
115 select HAVE_OPROFILE 115 select HAVE_OPROFILE
116 select HAVE_DEBUG_KMEMLEAK 116 select HAVE_DEBUG_KMEMLEAK
117 select HAVE_SYSCALL_WRAPPERS if PPC64
118 select GENERIC_ATOMIC64 if PPC32 117 select GENERIC_ATOMIC64 if PPC32
119 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 118 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
120 select HAVE_PERF_EVENTS 119 select HAVE_PERF_EVENTS
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 62e11a32c4c2..4fcbd6b14a3a 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -3,6 +3,7 @@
3 3
4#ifdef CONFIG_HUGETLB_PAGE 4#ifdef CONFIG_HUGETLB_PAGE
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm-generic/hugetlb.h>
6 7
7extern struct kmem_cache *hugepte_cache; 8extern struct kmem_cache *hugepte_cache;
8 9
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
new file mode 100644
index 000000000000..b36f650a13ff
--- /dev/null
+++ b/arch/powerpc/include/asm/linkage.h
@@ -0,0 +1,13 @@
1#ifndef _ASM_POWERPC_LINKAGE_H
2#define _ASM_POWERPC_LINKAGE_H
3
4#ifdef CONFIG_PPC64
5#define cond_syscall(x) \
6 asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
7 "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
8#define SYSCALL_ALIAS(alias, name) \
9 asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
10 "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
11#endif
12
13#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index ebbec52d21bd..43523fe0d8b4 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -190,7 +190,7 @@ SYSCALL_SPU(getcwd)
190SYSCALL_SPU(capget) 190SYSCALL_SPU(capget)
191SYSCALL_SPU(capset) 191SYSCALL_SPU(capset)
192COMPAT_SYS(sigaltstack) 192COMPAT_SYS(sigaltstack)
193SYSX_SPU(sys_sendfile,compat_sys_sendfile_wrapper,sys_sendfile) 193COMPAT_SYS_SPU(sendfile)
194SYSCALL(ni_syscall) 194SYSCALL(ni_syscall)
195SYSCALL(ni_syscall) 195SYSCALL(ni_syscall)
196PPC_SYS(vfork) 196PPC_SYS(vfork)
@@ -230,7 +230,7 @@ COMPAT_SYS_SPU(sched_setaffinity)
230COMPAT_SYS_SPU(sched_getaffinity) 230COMPAT_SYS_SPU(sched_getaffinity)
231SYSCALL(ni_syscall) 231SYSCALL(ni_syscall)
232SYSCALL(ni_syscall) 232SYSCALL(ni_syscall)
233SYSX(sys_ni_syscall,compat_sys_sendfile64_wrapper,sys_sendfile64) 233SYS32ONLY(sendfile64)
234COMPAT_SYS_SPU(io_setup) 234COMPAT_SYS_SPU(io_setup)
235SYSCALL_SPU(io_destroy) 235SYSCALL_SPU(io_destroy)
236COMPAT_SYS_SPU(io_getevents) 236COMPAT_SYS_SPU(io_getevents)
@@ -239,7 +239,7 @@ SYSCALL_SPU(io_cancel)
239SYSCALL(set_tid_address) 239SYSCALL(set_tid_address)
240SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64) 240SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
241SYSCALL(exit_group) 241SYSCALL(exit_group)
242SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie) 242COMPAT_SYS(lookup_dcookie)
243SYSCALL_SPU(epoll_create) 243SYSCALL_SPU(epoll_create)
244SYSCALL_SPU(epoll_ctl) 244SYSCALL_SPU(epoll_ctl)
245SYSCALL_SPU(epoll_wait) 245SYSCALL_SPU(epoll_wait)
@@ -273,8 +273,8 @@ COMPAT_SYS(mq_timedreceive)
273COMPAT_SYS(mq_notify) 273COMPAT_SYS(mq_notify)
274COMPAT_SYS(mq_getsetattr) 274COMPAT_SYS(mq_getsetattr)
275COMPAT_SYS(kexec_load) 275COMPAT_SYS(kexec_load)
276COMPAT_SYS(add_key) 276SYSCALL(add_key)
277COMPAT_SYS(request_key) 277SYSCALL(request_key)
278COMPAT_SYS(keyctl) 278COMPAT_SYS(keyctl)
279COMPAT_SYS(waitid) 279COMPAT_SYS(waitid)
280SYSCALL(ioprio_set) 280SYSCALL(ioprio_set)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 406b7b9a1341..8ceea14d6fe4 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -182,8 +182,6 @@ static inline bool test_thread_local_flags(unsigned int flags)
182#define is_32bit_task() (1) 182#define is_32bit_task() (1)
183#endif 183#endif
184 184
185#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
186
187#endif /* !__ASSEMBLY__ */ 185#endif /* !__ASSEMBLY__ */
188 186
189#endif /* __KERNEL__ */ 187#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 1487f0f12293..3ca819f541bf 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -56,11 +56,5 @@
56#define __ARCH_WANT_SYS_VFORK 56#define __ARCH_WANT_SYS_VFORK
57#define __ARCH_WANT_SYS_CLONE 57#define __ARCH_WANT_SYS_CLONE
58 58
59/*
60 * "Conditional" syscalls
61 */
62#define cond_syscall(x) \
63 asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
64
65#endif /* __ASSEMBLY__ */ 59#endif /* __ASSEMBLY__ */
66#endif /* _ASM_POWERPC_UNISTD_H_ */ 60#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index b532060d0916..23016020915e 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -51,4 +51,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
51extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); 51extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
52extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); 52extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
53extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 53extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
54extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
54#endif /* _ASM_UPROBES_H */ 55#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/include/uapi/asm/linkage.h b/arch/powerpc/include/uapi/asm/linkage.h
deleted file mode 100644
index e1c4ac1cc4ba..000000000000
--- a/arch/powerpc/include/uapi/asm/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_POWERPC_LINKAGE_H
2#define _ASM_POWERPC_LINKAGE_H
3
4/* Nothing to see here... */
5
6#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index b3ba5163eae2..9ec3fe174cba 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -150,10 +150,7 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
150 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) 150 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
151 continue; 151 continue;
152 152
153 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); 153 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
154 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
155 free_page((unsigned long)__va(addr));
156 totalram_pages++;
157 } 154 }
158} 155}
159#endif 156#endif
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 06c8202a69cf..2230fd0ca3e4 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1045,10 +1045,7 @@ static void fadump_release_memory(unsigned long begin, unsigned long end)
1045 if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start)) 1045 if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start))
1046 continue; 1046 continue;
1047 1047
1048 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); 1048 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
1049 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1050 free_page((unsigned long)__va(addr));
1051 totalram_pages++;
1052 } 1049 }
1053} 1050}
1054 1051
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index ea78761aa169..939ea7ef0dc8 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -33,11 +33,6 @@
33#include <asm/runlatch.h> 33#include <asm/runlatch.h>
34#include <asm/smp.h> 34#include <asm/smp.h>
35 35
36#ifdef CONFIG_HOTPLUG_CPU
37#define cpu_should_die() cpu_is_offline(smp_processor_id())
38#else
39#define cpu_should_die() 0
40#endif
41 36
42unsigned long cpuidle_disable = IDLE_NO_OVERRIDE; 37unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
43EXPORT_SYMBOL(cpuidle_disable); 38EXPORT_SYMBOL(cpuidle_disable);
@@ -50,64 +45,38 @@ static int __init powersave_off(char *arg)
50} 45}
51__setup("powersave=off", powersave_off); 46__setup("powersave=off", powersave_off);
52 47
53/* 48#ifdef CONFIG_HOTPLUG_CPU
54 * The body of the idle task. 49void arch_cpu_idle_dead(void)
55 */
56void cpu_idle(void)
57{ 50{
58 set_thread_flag(TIF_POLLING_NRFLAG); 51 sched_preempt_enable_no_resched();
59 while (1) { 52 cpu_die();
60 tick_nohz_idle_enter(); 53}
61 rcu_idle_enter(); 54#endif
62
63 while (!need_resched() && !cpu_should_die()) {
64 ppc64_runlatch_off();
65
66 if (ppc_md.power_save) {
67 clear_thread_flag(TIF_POLLING_NRFLAG);
68 /*
69 * smp_mb is so clearing of TIF_POLLING_NRFLAG
70 * is ordered w.r.t. need_resched() test.
71 */
72 smp_mb();
73 local_irq_disable();
74
75 /* Don't trace irqs off for idle */
76 stop_critical_timings();
77
78 /* check again after disabling irqs */
79 if (!need_resched() && !cpu_should_die())
80 ppc_md.power_save();
81
82 start_critical_timings();
83
84 /* Some power_save functions return with
85 * interrupts enabled, some don't.
86 */
87 if (irqs_disabled())
88 local_irq_enable();
89 set_thread_flag(TIF_POLLING_NRFLAG);
90
91 } else {
92 /*
93 * Go into low thread priority and possibly
94 * low power mode.
95 */
96 HMT_low();
97 HMT_very_low();
98 }
99 }
100 55
101 HMT_medium(); 56void arch_cpu_idle(void)
102 ppc64_runlatch_on(); 57{
103 rcu_idle_exit(); 58 ppc64_runlatch_off();
104 tick_nohz_idle_exit(); 59
105 if (cpu_should_die()) { 60 if (ppc_md.power_save) {
106 sched_preempt_enable_no_resched(); 61 ppc_md.power_save();
107 cpu_die(); 62 /*
108 } 63 * Some power_save functions return with
109 schedule_preempt_disabled(); 64 * interrupts enabled, some don't.
65 */
66 if (irqs_disabled())
67 local_irq_enable();
68 } else {
69 local_irq_enable();
70 /*
71 * Go into low thread priority and possibly
72 * low power mode.
73 */
74 HMT_low();
75 HMT_very_low();
110 } 76 }
77
78 HMT_medium();
79 ppc64_runlatch_on();
111} 80}
112 81
113int powersave_nap; 82int powersave_nap;
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index a61b133c4f99..6782221d49bd 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -756,12 +756,7 @@ static __init void kvm_free_tmp(void)
756 end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK; 756 end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
757 757
758 /* Free the tmp space we don't need */ 758 /* Free the tmp space we don't need */
759 for (; start < end; start += PAGE_SIZE) { 759 free_reserved_area(start, end, 0, NULL);
760 ClearPageReserved(virt_to_page(start));
761 init_page_count(virt_to_page(start));
762 free_page(start);
763 totalram_pages++;
764 }
765} 760}
766 761
767static int __init kvm_guest_init(void) 762static int __init kvm_guest_init(void)
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index bec1e930ed73..48fbc2b97e95 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -511,8 +511,7 @@ int __init nvram_scan_partitions(void)
511 "detected: 0-length partition\n"); 511 "detected: 0-length partition\n");
512 goto out; 512 goto out;
513 } 513 }
514 tmp_part = (struct nvram_partition *) 514 tmp_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
515 kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
516 err = -ENOMEM; 515 err = -ENOMEM;
517 if (!tmp_part) { 516 if (!tmp_part) {
518 printk(KERN_ERR "nvram_scan_partitions: kmalloc failed\n"); 517 printk(KERN_ERR "nvram_scan_partitions: kmalloc failed\n");
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 16e77a81ab4f..13a8d9d0b5cb 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -831,6 +831,8 @@ void show_regs(struct pt_regs * regs)
831{ 831{
832 int i, trap; 832 int i, trap;
833 833
834 show_regs_print_info(KERN_DEFAULT);
835
834 printk("NIP: "REG" LR: "REG" CTR: "REG"\n", 836 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
835 regs->nip, regs->link, regs->ctr); 837 regs->nip, regs->link, regs->ctr);
836 printk("REGS: %p TRAP: %04lx %s (%s)\n", 838 printk("REGS: %p TRAP: %04lx %s (%s)\n",
@@ -850,12 +852,6 @@ void show_regs(struct pt_regs * regs)
850#else 852#else
851 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); 853 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
852#endif 854#endif
853 printk("TASK = %p[%d] '%s' THREAD: %p",
854 current, task_pid_nr(current), current->comm, task_thread_info(current));
855
856#ifdef CONFIG_SMP
857 printk(" CPU: %d", raw_smp_processor_id());
858#endif /* CONFIG_SMP */
859 855
860 for (i = 0; i < 32; i++) { 856 for (i = 0; i < 32; i++) {
861 if ((i % REGS_PER_LINE) == 0) 857 if ((i % REGS_PER_LINE) == 0)
@@ -1362,12 +1358,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
1362 } while (count++ < kstack_depth_to_print); 1358 } while (count++ < kstack_depth_to_print);
1363} 1359}
1364 1360
1365void dump_stack(void)
1366{
1367 show_stack(current, NULL);
1368}
1369EXPORT_SYMBOL(dump_stack);
1370
1371#ifdef CONFIG_PPC64 1361#ifdef CONFIG_PPC64
1372/* Called with hard IRQs off */ 1362/* Called with hard IRQs off */
1373void __ppc64_runlatch_on(void) 1363void __ppc64_runlatch_on(void)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 76bd9da8cb71..ee7ac5e6e28a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -669,7 +669,7 @@ __cpuinit void start_secondary(void *unused)
669 669
670 local_irq_enable(); 670 local_irq_enable();
671 671
672 cpu_idle(); 672 cpu_startup_entry(CPUHP_ONLINE);
673 673
674 BUG(); 674 BUG();
675} 675}
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index d0bafc0cdf06..cd6e19d263b3 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -61,91 +61,6 @@ asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
61 return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x)); 61 return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x));
62} 62}
63 63
64#ifdef CONFIG_SYSVIPC
65long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
66 u32 fifth)
67{
68 int version;
69
70 version = call >> 16; /* hack for backward compatibility */
71 call &= 0xffff;
72
73 switch (call) {
74
75 case SEMTIMEDOP:
76 if (fifth)
77 /* sign extend semid */
78 return compat_sys_semtimedop((int)first,
79 compat_ptr(ptr), second,
80 compat_ptr(fifth));
81 /* else fall through for normal semop() */
82 case SEMOP:
83 /* struct sembuf is the same on 32 and 64bit :)) */
84 /* sign extend semid */
85 return sys_semtimedop((int)first, compat_ptr(ptr), second,
86 NULL);
87 case SEMGET:
88 /* sign extend key, nsems */
89 return sys_semget((int)first, (int)second, third);
90 case SEMCTL:
91 /* sign extend semid, semnum */
92 return compat_sys_semctl((int)first, (int)second, third,
93 compat_ptr(ptr));
94
95 case MSGSND:
96 /* sign extend msqid */
97 return compat_sys_msgsnd((int)first, (int)second, third,
98 compat_ptr(ptr));
99 case MSGRCV:
100 /* sign extend msqid, msgtyp */
101 return compat_sys_msgrcv((int)first, second, (int)fifth,
102 third, version, compat_ptr(ptr));
103 case MSGGET:
104 /* sign extend key */
105 return sys_msgget((int)first, second);
106 case MSGCTL:
107 /* sign extend msqid */
108 return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
109
110 case SHMAT:
111 /* sign extend shmid */
112 return compat_sys_shmat((int)first, second, third, version,
113 compat_ptr(ptr));
114 case SHMDT:
115 return sys_shmdt(compat_ptr(ptr));
116 case SHMGET:
117 /* sign extend key_t */
118 return sys_shmget((int)first, second, third);
119 case SHMCTL:
120 /* sign extend shmid */
121 return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
122
123 default:
124 return -ENOSYS;
125 }
126
127 return -ENOSYS;
128}
129#endif
130
131/* Note: it is necessary to treat out_fd and in_fd as unsigned ints,
132 * with the corresponding cast to a signed int to insure that the
133 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
134 * and the register representation of a signed int (msr in 64-bit mode) is performed.
135 */
136asmlinkage long compat_sys_sendfile_wrapper(u32 out_fd, u32 in_fd,
137 compat_off_t __user *offset, u32 count)
138{
139 return compat_sys_sendfile((int)out_fd, (int)in_fd, offset, count);
140}
141
142asmlinkage long compat_sys_sendfile64_wrapper(u32 out_fd, u32 in_fd,
143 compat_loff_t __user *offset, u32 count)
144{
145 return sys_sendfile((int)out_fd, (int)in_fd,
146 (off_t __user *)offset, count);
147}
148
149unsigned long compat_sys_mmap2(unsigned long addr, size_t len, 64unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
150 unsigned long prot, unsigned long flags, 65 unsigned long prot, unsigned long flags,
151 unsigned long fd, unsigned long pgoff) 66 unsigned long fd, unsigned long pgoff)
@@ -195,13 +110,6 @@ asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long h
195 return sys_ftruncate(fd, (high << 32) | low); 110 return sys_ftruncate(fd, (high << 32) | low);
196} 111}
197 112
198long ppc32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
199 size_t len)
200{
201 return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
202 buf, len);
203}
204
205long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low, 113long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
206 size_t len, int advice) 114 size_t len, int advice)
207{ 115{
@@ -209,23 +117,6 @@ long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
209 advice); 117 advice);
210} 118}
211 119
212asmlinkage long compat_sys_add_key(const char __user *_type,
213 const char __user *_description,
214 const void __user *_payload,
215 u32 plen,
216 u32 ringid)
217{
218 return sys_add_key(_type, _description, _payload, plen, ringid);
219}
220
221asmlinkage long compat_sys_request_key(const char __user *_type,
222 const char __user *_description,
223 const char __user *_callout_info,
224 u32 destringid)
225{
226 return sys_request_key(_type, _description, _callout_info, destringid);
227}
228
229asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags, 120asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
230 unsigned offset_hi, unsigned offset_lo, 121 unsigned offset_hi, unsigned offset_lo,
231 unsigned nbytes_hi, unsigned nbytes_lo) 122 unsigned nbytes_hi, unsigned nbytes_lo)
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index f9748498fe58..13b867093499 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -156,15 +156,13 @@ static struct console udbg_console = {
156 .index = 0, 156 .index = 0,
157}; 157};
158 158
159static int early_console_initialized;
160
161/* 159/*
162 * Called by setup_system after ppc_md->probe and ppc_md->early_init. 160 * Called by setup_system after ppc_md->probe and ppc_md->early_init.
163 * Call it again after setting udbg_putc in ppc_md->setup_arch. 161 * Call it again after setting udbg_putc in ppc_md->setup_arch.
164 */ 162 */
165void __init register_early_udbg_console(void) 163void __init register_early_udbg_console(void)
166{ 164{
167 if (early_console_initialized) 165 if (early_console)
168 return; 166 return;
169 167
170 if (!udbg_putc) 168 if (!udbg_putc)
@@ -174,7 +172,7 @@ void __init register_early_udbg_console(void)
174 printk(KERN_INFO "early console immortal !\n"); 172 printk(KERN_INFO "early console immortal !\n");
175 udbg_console.flags &= ~CON_BOOT; 173 udbg_console.flags &= ~CON_BOOT;
176 } 174 }
177 early_console_initialized = 1; 175 early_console = &udbg_console;
178 register_console(&udbg_console); 176 register_console(&udbg_console);
179} 177}
180 178
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index bc77834dbf43..59f419b935f2 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -31,6 +31,16 @@
31#define UPROBE_TRAP_NR UINT_MAX 31#define UPROBE_TRAP_NR UINT_MAX
32 32
33/** 33/**
34 * is_trap_insn - check if the instruction is a trap variant
35 * @insn: instruction to be checked.
36 * Returns true if @insn is a trap variant.
37 */
38bool is_trap_insn(uprobe_opcode_t *insn)
39{
40 return (is_trap(*insn));
41}
42
43/**
34 * arch_uprobe_analyze_insn 44 * arch_uprobe_analyze_insn
35 * @mm: the probed address space. 45 * @mm: the probed address space.
36 * @arch_uprobe: the probepoint information. 46 * @arch_uprobe: the probepoint information.
@@ -43,12 +53,6 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
43 if (addr & 0x03) 53 if (addr & 0x03)
44 return -EINVAL; 54 return -EINVAL;
45 55
46 /*
47 * We currently don't support a uprobe on an already
48 * existing breakpoint instruction underneath
49 */
50 if (is_trap(auprobe->ainsn))
51 return -ENOTSUPP;
52 return 0; 56 return 0;
53} 57}
54 58
@@ -188,3 +192,16 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
188 192
189 return false; 193 return false;
190} 194}
195
196unsigned long
197arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
198{
199 unsigned long orig_ret_vaddr;
200
201 orig_ret_vaddr = regs->link;
202
203 /* Replace the return addr with trampoline addr */
204 regs->link = trampoline_vaddr;
205
206 return orig_ret_vaddr;
207}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 5e93438afb06..dbdc15aa8127 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1039,7 +1039,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1039 if (!vcpu_book3s) 1039 if (!vcpu_book3s)
1040 goto out; 1040 goto out;
1041 1041
1042 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) 1042 vcpu_book3s->shadow_vcpu =
1043 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); 1043 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1044 if (!vcpu_book3s->shadow_vcpu) 1044 if (!vcpu_book3s->shadow_vcpu)
1045 goto free_vcpu; 1045 goto free_vcpu;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7e2246fb2f31..5a535b73ea18 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -263,19 +263,14 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
263 vmemmap_list = vmem_back; 263 vmemmap_list = vmem_back;
264} 264}
265 265
266int __meminit vmemmap_populate(struct page *start_page, 266int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
267 unsigned long nr_pages, int node)
268{ 267{
269 unsigned long start = (unsigned long)start_page;
270 unsigned long end = (unsigned long)(start_page + nr_pages);
271 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 268 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
272 269
273 /* Align to the page size of the linear mapping. */ 270 /* Align to the page size of the linear mapping. */
274 start = _ALIGN_DOWN(start, page_size); 271 start = _ALIGN_DOWN(start, page_size);
275 272
276 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n", 273 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
277 start_page, nr_pages, node);
278 pr_debug(" -> map %lx..%lx\n", start, end);
279 274
280 for (; start < end; start += page_size) { 275 for (; start < end; start += page_size) {
281 void *p; 276 void *p;
@@ -298,7 +293,7 @@ int __meminit vmemmap_populate(struct page *start_page,
298 return 0; 293 return 0;
299} 294}
300 295
301void vmemmap_free(struct page *memmap, unsigned long nr_pages) 296void vmemmap_free(unsigned long start, unsigned long end)
302{ 297{
303} 298}
304 299
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index f1f7409a4183..cd76c454942f 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -352,13 +352,9 @@ void __init mem_init(void)
352 struct page *page = pfn_to_page(pfn); 352 struct page *page = pfn_to_page(pfn);
353 if (memblock_is_reserved(paddr)) 353 if (memblock_is_reserved(paddr))
354 continue; 354 continue;
355 ClearPageReserved(page); 355 free_highmem_page(page);
356 init_page_count(page);
357 __free_page(page);
358 totalhigh_pages++;
359 reservedpages--; 356 reservedpages--;
360 } 357 }
361 totalram_pages += totalhigh_pages;
362 printk(KERN_DEBUG "High memory: %luk\n", 358 printk(KERN_DEBUG "High memory: %luk\n",
363 totalhigh_pages << (PAGE_SHIFT-10)); 359 totalhigh_pages << (PAGE_SHIFT-10));
364 } 360 }
@@ -405,39 +401,14 @@ void __init mem_init(void)
405 401
406void free_initmem(void) 402void free_initmem(void)
407{ 403{
408 unsigned long addr;
409
410 ppc_md.progress = ppc_printk_progress; 404 ppc_md.progress = ppc_printk_progress;
411 405 free_initmem_default(POISON_FREE_INITMEM);
412 addr = (unsigned long)__init_begin;
413 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
414 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
415 ClearPageReserved(virt_to_page(addr));
416 init_page_count(virt_to_page(addr));
417 free_page(addr);
418 totalram_pages++;
419 }
420 pr_info("Freeing unused kernel memory: %luk freed\n",
421 ((unsigned long)__init_end -
422 (unsigned long)__init_begin) >> 10);
423} 406}
424 407
425#ifdef CONFIG_BLK_DEV_INITRD 408#ifdef CONFIG_BLK_DEV_INITRD
426void __init free_initrd_mem(unsigned long start, unsigned long end) 409void __init free_initrd_mem(unsigned long start, unsigned long end)
427{ 410{
428 if (start >= end) 411 free_reserved_area(start, end, 0, "initrd");
429 return;
430
431 start = _ALIGN_DOWN(start, PAGE_SIZE);
432 end = _ALIGN_UP(end, PAGE_SIZE);
433 pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
434
435 for (; start < end; start += PAGE_SIZE) {
436 ClearPageReserved(virt_to_page(start));
437 init_page_count(virt_to_page(start));
438 free_page(start);
439 totalram_pages++;
440 }
441} 412}
442#endif 413#endif
443 414
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index bba87ca2b4d7..fa33c546e778 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -22,6 +22,7 @@
22#include <linux/pfn.h> 22#include <linux/pfn.h>
23#include <linux/cpuset.h> 23#include <linux/cpuset.h>
24#include <linux/node.h> 24#include <linux/node.h>
25#include <linux/slab.h>
25#include <asm/sparsemem.h> 26#include <asm/sparsemem.h>
26#include <asm/prom.h> 27#include <asm/prom.h>
27#include <asm/smp.h> 28#include <asm/smp.h>
@@ -62,14 +63,11 @@ static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
62 */ 63 */
63static void __init setup_node_to_cpumask_map(void) 64static void __init setup_node_to_cpumask_map(void)
64{ 65{
65 unsigned int node, num = 0; 66 unsigned int node;
66 67
67 /* setup nr_node_ids if not done yet */ 68 /* setup nr_node_ids if not done yet */
68 if (nr_node_ids == MAX_NUMNODES) { 69 if (nr_node_ids == MAX_NUMNODES)
69 for_each_node_mask(node, node_possible_map) 70 setup_nr_node_ids();
70 num = node;
71 nr_node_ids = num + 1;
72 }
73 71
74 /* allocate the map */ 72 /* allocate the map */
75 for (node = 0; node < nr_node_ids; node++) 73 for (node = 0; node < nr_node_ids; node++)
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 0effe9f5a1ea..7be93367d92f 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -274,6 +274,8 @@ config 440EPX
274 select IBM_EMAC_EMAC4 274 select IBM_EMAC_EMAC4
275 select IBM_EMAC_RGMII 275 select IBM_EMAC_RGMII
276 select IBM_EMAC_ZMII 276 select IBM_EMAC_ZMII
277 select USB_EHCI_BIG_ENDIAN_MMIO
278 select USB_EHCI_BIG_ENDIAN_DESC
277 279
278config 440GRX 280config 440GRX
279 bool 281 bool
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index c16999802ecf..381a592826a2 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -7,6 +7,8 @@ config PPC_MPC512x
7 select PPC_PCI_CHOICE 7 select PPC_PCI_CHOICE
8 select FSL_PCI if PCI 8 select FSL_PCI if PCI
9 select ARCH_WANT_OPTIONAL_GPIOLIB 9 select ARCH_WANT_OPTIONAL_GPIOLIB
10 select USB_EHCI_BIG_ENDIAN_MMIO
11 select USB_EHCI_BIG_ENDIAN_DESC
10 12
11config MPC5121_ADS 13config MPC5121_ADS
12 bool "Freescale MPC5121E ADS" 14 bool "Freescale MPC5121E ADS"
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index d30235b7e3f7..db6ac389ef8c 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -172,12 +172,9 @@ static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb;
172 172
173static inline void mpc512x_free_bootmem(struct page *page) 173static inline void mpc512x_free_bootmem(struct page *page)
174{ 174{
175 __ClearPageReserved(page);
176 BUG_ON(PageTail(page)); 175 BUG_ON(PageTail(page));
177 BUG_ON(atomic_read(&page->_count) > 1); 176 BUG_ON(atomic_read(&page->_count) > 1);
178 atomic_set(&page->_count, 1); 177 free_reserved_page(page);
179 __free_page(page);
180 totalram_pages++;
181} 178}
182 179
183void mpc512x_release_bootmem(void) 180void mpc512x_release_bootmem(void)
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 53aaefeb3386..9978f594cac0 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -113,34 +113,10 @@ config CBE_THERM
113 default m 113 default m
114 depends on CBE_RAS && SPU_BASE 114 depends on CBE_RAS && SPU_BASE
115 115
116config CBE_CPUFREQ
117 tristate "CBE frequency scaling"
118 depends on CBE_RAS && CPU_FREQ
119 default m
120 help
121 This adds the cpufreq driver for Cell BE processors.
122 For details, take a look at <file:Documentation/cpu-freq/>.
123 If you don't have such processor, say N
124
125config CBE_CPUFREQ_PMI_ENABLE
126 bool "CBE frequency scaling using PMI interface"
127 depends on CBE_CPUFREQ
128 default n
129 help
130 Select this, if you want to use the PMI interface
131 to switch frequencies. Using PMI, the
132 processor will not only be able to run at lower speed,
133 but also at lower core voltage.
134
135config CBE_CPUFREQ_PMI
136 tristate
137 depends on CBE_CPUFREQ_PMI_ENABLE
138 default CBE_CPUFREQ
139
140config PPC_PMI 116config PPC_PMI
141 tristate 117 tristate
142 default y 118 default y
143 depends on CBE_CPUFREQ_PMI || PPC_IBM_CELL_POWERBUTTON 119 depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON
144 help 120 help
145 PMI (Platform Management Interrupt) is a way to 121 PMI (Platform Management Interrupt) is a way to
146 communicate with the BMC (Baseboard Management Controller). 122 communicate with the BMC (Baseboard Management Controller).
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index a4a89350bcfc..fe053e7c73ee 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -5,9 +5,6 @@ obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
5obj-$(CONFIG_CBE_RAS) += ras.o 5obj-$(CONFIG_CBE_RAS) += ras.o
6 6
7obj-$(CONFIG_CBE_THERM) += cbe_thermal.o 7obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
8obj-$(CONFIG_CBE_CPUFREQ_PMI) += cbe_cpufreq_pmi.o
9obj-$(CONFIG_CBE_CPUFREQ) += cbe-cpufreq.o
10cbe-cpufreq-y += cbe_cpufreq_pervasive.o cbe_cpufreq.o
11obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o 8obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o
12 9
13obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o 10obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
deleted file mode 100644
index d4c39e32f147..000000000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ /dev/null
@@ -1,209 +0,0 @@
1/*
2 * cpufreq driver for the cell processor
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/cpufreq.h>
24#include <linux/module.h>
25#include <linux/of_platform.h>
26
27#include <asm/machdep.h>
28#include <asm/prom.h>
29#include <asm/cell-regs.h>
30#include "cbe_cpufreq.h"
31
32static DEFINE_MUTEX(cbe_switch_mutex);
33
34
35/* the CBE supports an 8 step frequency scaling */
36static struct cpufreq_frequency_table cbe_freqs[] = {
37 {1, 0},
38 {2, 0},
39 {3, 0},
40 {4, 0},
41 {5, 0},
42 {6, 0},
43 {8, 0},
44 {10, 0},
45 {0, CPUFREQ_TABLE_END},
46};
47
48/*
49 * hardware specific functions
50 */
51
52static int set_pmode(unsigned int cpu, unsigned int slow_mode)
53{
54 int rc;
55
56 if (cbe_cpufreq_has_pmi)
57 rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
58 else
59 rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
60
61 pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
62
63 return rc;
64}
65
66/*
67 * cpufreq functions
68 */
69
70static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
71{
72 const u32 *max_freqp;
73 u32 max_freq;
74 int i, cur_pmode;
75 struct device_node *cpu;
76
77 cpu = of_get_cpu_node(policy->cpu, NULL);
78
79 if (!cpu)
80 return -ENODEV;
81
82 pr_debug("init cpufreq on CPU %d\n", policy->cpu);
83
84 /*
85 * Let's check we can actually get to the CELL regs
86 */
87 if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
88 !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
89 pr_info("invalid CBE regs pointers for cpufreq\n");
90 return -EINVAL;
91 }
92
93 max_freqp = of_get_property(cpu, "clock-frequency", NULL);
94
95 of_node_put(cpu);
96
97 if (!max_freqp)
98 return -EINVAL;
99
100 /* we need the freq in kHz */
101 max_freq = *max_freqp / 1000;
102
103 pr_debug("max clock-frequency is at %u kHz\n", max_freq);
104 pr_debug("initializing frequency table\n");
105
106 /* initialize frequency table */
107 for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
108 cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index;
109 pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
110 }
111
112 /* if DEBUG is enabled set_pmode() measures the latency
113 * of a transition */
114 policy->cpuinfo.transition_latency = 25000;
115
116 cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
117 pr_debug("current pmode is at %d\n",cur_pmode);
118
119 policy->cur = cbe_freqs[cur_pmode].frequency;
120
121#ifdef CONFIG_SMP
122 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
123#endif
124
125 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
126
127 /* this ensures that policy->cpuinfo_min
128 * and policy->cpuinfo_max are set correctly */
129 return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
130}
131
132static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
133{
134 cpufreq_frequency_table_put_attr(policy->cpu);
135 return 0;
136}
137
138static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
139{
140 return cpufreq_frequency_table_verify(policy, cbe_freqs);
141}
142
143static int cbe_cpufreq_target(struct cpufreq_policy *policy,
144 unsigned int target_freq,
145 unsigned int relation)
146{
147 int rc;
148 struct cpufreq_freqs freqs;
149 unsigned int cbe_pmode_new;
150
151 cpufreq_frequency_table_target(policy,
152 cbe_freqs,
153 target_freq,
154 relation,
155 &cbe_pmode_new);
156
157 freqs.old = policy->cur;
158 freqs.new = cbe_freqs[cbe_pmode_new].frequency;
159 freqs.cpu = policy->cpu;
160
161 mutex_lock(&cbe_switch_mutex);
162 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
163
164 pr_debug("setting frequency for cpu %d to %d kHz, " \
165 "1/%d of max frequency\n",
166 policy->cpu,
167 cbe_freqs[cbe_pmode_new].frequency,
168 cbe_freqs[cbe_pmode_new].index);
169
170 rc = set_pmode(policy->cpu, cbe_pmode_new);
171
172 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
173 mutex_unlock(&cbe_switch_mutex);
174
175 return rc;
176}
177
178static struct cpufreq_driver cbe_cpufreq_driver = {
179 .verify = cbe_cpufreq_verify,
180 .target = cbe_cpufreq_target,
181 .init = cbe_cpufreq_cpu_init,
182 .exit = cbe_cpufreq_cpu_exit,
183 .name = "cbe-cpufreq",
184 .owner = THIS_MODULE,
185 .flags = CPUFREQ_CONST_LOOPS,
186};
187
188/*
189 * module init and destoy
190 */
191
192static int __init cbe_cpufreq_init(void)
193{
194 if (!machine_is(cell))
195 return -ENODEV;
196
197 return cpufreq_register_driver(&cbe_cpufreq_driver);
198}
199
200static void __exit cbe_cpufreq_exit(void)
201{
202 cpufreq_unregister_driver(&cbe_cpufreq_driver);
203}
204
205module_init(cbe_cpufreq_init);
206module_exit(cbe_cpufreq_exit);
207
208MODULE_LICENSE("GPL");
209MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.h b/arch/powerpc/platforms/cell/cbe_cpufreq.h
deleted file mode 100644
index c1d86bfa92ff..000000000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * cbe_cpufreq.h
3 *
4 * This file contains the definitions used by the cbe_cpufreq driver.
5 *
6 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
7 *
8 * Author: Christian Krafft <krafft@de.ibm.com>
9 *
10 */
11
12#include <linux/cpufreq.h>
13#include <linux/types.h>
14
15int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
16int cbe_cpufreq_get_pmode(int cpu);
17
18int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
19
20#if defined(CONFIG_CBE_CPUFREQ_PMI) || defined(CONFIG_CBE_CPUFREQ_PMI_MODULE)
21extern bool cbe_cpufreq_has_pmi;
22#else
23#define cbe_cpufreq_has_pmi (0)
24#endif
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
deleted file mode 100644
index 20472e487b6f..000000000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * pervasive backend for the cbe_cpufreq driver
3 *
4 * This driver makes use of the pervasive unit to
5 * engage the desired frequency.
6 *
7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
8 *
9 * Author: Christian Krafft <krafft@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/io.h>
27#include <linux/kernel.h>
28#include <linux/time.h>
29#include <asm/machdep.h>
30#include <asm/hw_irq.h>
31#include <asm/cell-regs.h>
32
33#include "cbe_cpufreq.h"
34
35/* to write to MIC register */
36static u64 MIC_Slow_Fast_Timer_table[] = {
37 [0 ... 7] = 0x007fc00000000000ull,
38};
39
40/* more values for the MIC */
41static u64 MIC_Slow_Next_Timer_table[] = {
42 0x0000240000000000ull,
43 0x0000268000000000ull,
44 0x000029C000000000ull,
45 0x00002D0000000000ull,
46 0x0000300000000000ull,
47 0x0000334000000000ull,
48 0x000039C000000000ull,
49 0x00003FC000000000ull,
50};
51
52
53int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
54{
55 struct cbe_pmd_regs __iomem *pmd_regs;
56 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
57 unsigned long flags;
58 u64 value;
59#ifdef DEBUG
60 long time;
61#endif
62
63 local_irq_save(flags);
64
65 mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
66 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
67
68#ifdef DEBUG
69 time = jiffies;
70#endif
71
72 out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
73 out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
74
75 out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
76 out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
77
78 value = in_be64(&pmd_regs->pmcr);
79 /* set bits to zero */
80 value &= 0xFFFFFFFFFFFFFFF8ull;
81 /* set bits to next pmode */
82 value |= pmode;
83
84 out_be64(&pmd_regs->pmcr, value);
85
86#ifdef DEBUG
87 /* wait until new pmode appears in status register */
88 value = in_be64(&pmd_regs->pmsr) & 0x07;
89 while (value != pmode) {
90 cpu_relax();
91 value = in_be64(&pmd_regs->pmsr) & 0x07;
92 }
93
94 time = jiffies - time;
95 time = jiffies_to_msecs(time);
96 pr_debug("had to wait %lu ms for a transition using " \
97 "pervasive unit\n", time);
98#endif
99 local_irq_restore(flags);
100
101 return 0;
102}
103
104
105int cbe_cpufreq_get_pmode(int cpu)
106{
107 int ret;
108 struct cbe_pmd_regs __iomem *pmd_regs;
109
110 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
111 ret = in_be64(&pmd_regs->pmsr) & 0x07;
112
113 return ret;
114}
115
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
deleted file mode 100644
index 60a07a4f9326..000000000000
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
+++ /dev/null
@@ -1,156 +0,0 @@
1/*
2 * pmi backend for the cbe_cpufreq driver
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/timer.h>
26#include <linux/module.h>
27#include <linux/of_platform.h>
28
29#include <asm/processor.h>
30#include <asm/prom.h>
31#include <asm/pmi.h>
32#include <asm/cell-regs.h>
33
34#ifdef DEBUG
35#include <asm/time.h>
36#endif
37
38#include "cbe_cpufreq.h"
39
40static u8 pmi_slow_mode_limit[MAX_CBE];
41
42bool cbe_cpufreq_has_pmi = false;
43EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
44
45/*
46 * hardware specific functions
47 */
48
49int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
50{
51 int ret;
52 pmi_message_t pmi_msg;
53#ifdef DEBUG
54 long time;
55#endif
56 pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
57 pmi_msg.data1 = cbe_cpu_to_node(cpu);
58 pmi_msg.data2 = pmode;
59
60#ifdef DEBUG
61 time = jiffies;
62#endif
63 pmi_send_message(pmi_msg);
64
65#ifdef DEBUG
66 time = jiffies - time;
67 time = jiffies_to_msecs(time);
68 pr_debug("had to wait %lu ms for a transition using " \
69 "PMI\n", time);
70#endif
71 ret = pmi_msg.data2;
72 pr_debug("PMI returned slow mode %d\n", ret);
73
74 return ret;
75}
76EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
77
78
79static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
80{
81 u8 node, slow_mode;
82
83 BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
84
85 node = pmi_msg.data1;
86 slow_mode = pmi_msg.data2;
87
88 pmi_slow_mode_limit[node] = slow_mode;
89
90 pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
91}
92
93static int pmi_notifier(struct notifier_block *nb,
94 unsigned long event, void *data)
95{
96 struct cpufreq_policy *policy = data;
97 struct cpufreq_frequency_table *cbe_freqs;
98 u8 node;
99
100 /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
101 * and CPUFREQ_NOTIFY policy events?)
102 */
103 if (event == CPUFREQ_START)
104 return 0;
105
106 cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
107 node = cbe_cpu_to_node(policy->cpu);
108
109 pr_debug("got notified, event=%lu, node=%u\n", event, node);
110
111 if (pmi_slow_mode_limit[node] != 0) {
112 pr_debug("limiting node %d to slow mode %d\n",
113 node, pmi_slow_mode_limit[node]);
114
115 cpufreq_verify_within_limits(policy, 0,
116
117 cbe_freqs[pmi_slow_mode_limit[node]].frequency);
118 }
119
120 return 0;
121}
122
123static struct notifier_block pmi_notifier_block = {
124 .notifier_call = pmi_notifier,
125};
126
127static struct pmi_handler cbe_pmi_handler = {
128 .type = PMI_TYPE_FREQ_CHANGE,
129 .handle_pmi_message = cbe_cpufreq_handle_pmi,
130};
131
132
133
134static int __init cbe_cpufreq_pmi_init(void)
135{
136 cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
137
138 if (!cbe_cpufreq_has_pmi)
139 return -ENODEV;
140
141 cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
142
143 return 0;
144}
145
146static void __exit cbe_cpufreq_pmi_exit(void)
147{
148 cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
149 pmi_unregister_handler(&cbe_pmi_handler);
150}
151
152module_init(cbe_cpufreq_pmi_init);
153module_exit(cbe_cpufreq_pmi_exit);
154
155MODULE_LICENSE("GPL");
156MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index 890f30e70f98..be1e7958909e 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -273,10 +273,9 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
273 273
274 freqs.old = policy->cur; 274 freqs.old = policy->cur;
275 freqs.new = pas_freqs[pas_astate_new].frequency; 275 freqs.new = pas_freqs[pas_astate_new].frequency;
276 freqs.cpu = policy->cpu;
277 276
278 mutex_lock(&pas_switch_mutex); 277 mutex_lock(&pas_switch_mutex);
279 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 278 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
280 279
281 pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n", 280 pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
282 policy->cpu, 281 policy->cpu,
@@ -288,7 +287,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
288 for_each_online_cpu(i) 287 for_each_online_cpu(i)
289 set_astate(i, pas_astate_new); 288 set_astate(i, pas_astate_new);
290 289
291 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 290 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
292 mutex_unlock(&pas_switch_mutex); 291 mutex_unlock(&pas_switch_mutex);
293 292
294 ppc_proc_freq = freqs.new * 1000ul; 293 ppc_proc_freq = freqs.new * 1000ul;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 311b804353b1..3104fad82480 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -335,7 +335,8 @@ static int pmu_set_cpu_speed(int low_speed)
335 return 0; 335 return 0;
336} 336}
337 337
338static int do_set_cpu_speed(int speed_mode, int notify) 338static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
339 int notify)
339{ 340{
340 struct cpufreq_freqs freqs; 341 struct cpufreq_freqs freqs;
341 unsigned long l3cr; 342 unsigned long l3cr;
@@ -343,13 +344,12 @@ static int do_set_cpu_speed(int speed_mode, int notify)
343 344
344 freqs.old = cur_freq; 345 freqs.old = cur_freq;
345 freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; 346 freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
346 freqs.cpu = smp_processor_id();
347 347
348 if (freqs.old == freqs.new) 348 if (freqs.old == freqs.new)
349 return 0; 349 return 0;
350 350
351 if (notify) 351 if (notify)
352 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 352 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
353 if (speed_mode == CPUFREQ_LOW && 353 if (speed_mode == CPUFREQ_LOW &&
354 cpu_has_feature(CPU_FTR_L3CR)) { 354 cpu_has_feature(CPU_FTR_L3CR)) {
355 l3cr = _get_L3CR(); 355 l3cr = _get_L3CR();
@@ -366,7 +366,7 @@ static int do_set_cpu_speed(int speed_mode, int notify)
366 _set_L3CR(prev_l3cr); 366 _set_L3CR(prev_l3cr);
367 } 367 }
368 if (notify) 368 if (notify)
369 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 369 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
370 cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; 370 cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
371 371
372 return 0; 372 return 0;
@@ -393,7 +393,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy,
393 target_freq, relation, &newstate)) 393 target_freq, relation, &newstate))
394 return -EINVAL; 394 return -EINVAL;
395 395
396 rc = do_set_cpu_speed(newstate, 1); 396 rc = do_set_cpu_speed(policy, newstate, 1);
397 397
398 ppc_proc_freq = cur_freq * 1000ul; 398 ppc_proc_freq = cur_freq * 1000ul;
399 return rc; 399 return rc;
@@ -442,7 +442,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
442 no_schedule = 1; 442 no_schedule = 1;
443 sleep_freq = cur_freq; 443 sleep_freq = cur_freq;
444 if (cur_freq == low_freq && !is_pmu_based) 444 if (cur_freq == low_freq && !is_pmu_based)
445 do_set_cpu_speed(CPUFREQ_HIGH, 0); 445 do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
446 return 0; 446 return 0;
447} 447}
448 448
@@ -458,7 +458,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
458 * is that we force a switch to whatever it was, which is 458 * is that we force a switch to whatever it was, which is
459 * probably high speed due to our suspend() routine 459 * probably high speed due to our suspend() routine
460 */ 460 */
461 do_set_cpu_speed(sleep_freq == low_freq ? 461 do_set_cpu_speed(policy, sleep_freq == low_freq ?
462 CPUFREQ_LOW : CPUFREQ_HIGH, 0); 462 CPUFREQ_LOW : CPUFREQ_HIGH, 0);
463 463
464 ppc_proc_freq = cur_freq * 1000ul; 464 ppc_proc_freq = cur_freq * 1000ul;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 9650c6029c82..7ba423431cfe 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -339,11 +339,10 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy,
339 339
340 freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; 340 freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
341 freqs.new = g5_cpu_freqs[newstate].frequency; 341 freqs.new = g5_cpu_freqs[newstate].frequency;
342 freqs.cpu = 0;
343 342
344 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 343 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
345 rc = g5_switch_freq(newstate); 344 rc = g5_switch_freq(newstate);
346 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 345 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
347 346
348 mutex_unlock(&g5_switch_mutex); 347 mutex_unlock(&g5_switch_mutex);
349 348
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2372c609fa2b..9a432de363b8 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -72,6 +72,7 @@ unsigned long memory_block_size_bytes(void)
72 return get_memblock_size(); 72 return get_memblock_size();
73} 73}
74 74
75#ifdef CONFIG_MEMORY_HOTREMOVE
75static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) 76static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
76{ 77{
77 unsigned long start, start_pfn; 78 unsigned long start, start_pfn;
@@ -153,6 +154,17 @@ static int pseries_remove_memory(struct device_node *np)
153 ret = pseries_remove_memblock(base, lmb_size); 154 ret = pseries_remove_memblock(base, lmb_size);
154 return ret; 155 return ret;
155} 156}
157#else
158static inline int pseries_remove_memblock(unsigned long base,
159 unsigned int memblock_size)
160{
161 return -EOPNOTSUPP;
162}
163static inline int pseries_remove_memory(struct device_node *np)
164{
165 return -EOPNOTSUPP;
166}
167#endif /* CONFIG_MEMORY_HOTREMOVE */
156 168
157static int pseries_add_memory(struct device_node *np) 169static int pseries_add_memory(struct device_node *np)
158{ 170{
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 4d806b419606..4644efa06941 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -23,8 +23,8 @@
23#include "pseries.h" 23#include "pseries.h"
24 24
25struct cpuidle_driver pseries_idle_driver = { 25struct cpuidle_driver pseries_idle_driver = {
26 .name = "pseries_idle", 26 .name = "pseries_idle",
27 .owner = THIS_MODULE, 27 .owner = THIS_MODULE,
28}; 28};
29 29
30#define MAX_IDLE_STATE_COUNT 2 30#define MAX_IDLE_STATE_COUNT 2
@@ -33,10 +33,8 @@ static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
33static struct cpuidle_device __percpu *pseries_cpuidle_devices; 33static struct cpuidle_device __percpu *pseries_cpuidle_devices;
34static struct cpuidle_state *cpuidle_state_table; 34static struct cpuidle_state *cpuidle_state_table;
35 35
36static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) 36static inline void idle_loop_prolog(unsigned long *in_purr)
37{ 37{
38
39 *kt_before = ktime_get();
40 *in_purr = mfspr(SPRN_PURR); 38 *in_purr = mfspr(SPRN_PURR);
41 /* 39 /*
42 * Indicate to the HV that we are idle. Now would be 40 * Indicate to the HV that we are idle. Now would be
@@ -45,12 +43,10 @@ static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
45 get_lppaca()->idle = 1; 43 get_lppaca()->idle = 1;
46} 44}
47 45
48static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before) 46static inline void idle_loop_epilog(unsigned long in_purr)
49{ 47{
50 get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; 48 get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
51 get_lppaca()->idle = 0; 49 get_lppaca()->idle = 0;
52
53 return ktime_to_us(ktime_sub(ktime_get(), kt_before));
54} 50}
55 51
56static int snooze_loop(struct cpuidle_device *dev, 52static int snooze_loop(struct cpuidle_device *dev,
@@ -58,10 +54,9 @@ static int snooze_loop(struct cpuidle_device *dev,
58 int index) 54 int index)
59{ 55{
60 unsigned long in_purr; 56 unsigned long in_purr;
61 ktime_t kt_before;
62 int cpu = dev->cpu; 57 int cpu = dev->cpu;
63 58
64 idle_loop_prolog(&in_purr, &kt_before); 59 idle_loop_prolog(&in_purr);
65 local_irq_enable(); 60 local_irq_enable();
66 set_thread_flag(TIF_POLLING_NRFLAG); 61 set_thread_flag(TIF_POLLING_NRFLAG);
67 62
@@ -75,8 +70,8 @@ static int snooze_loop(struct cpuidle_device *dev,
75 clear_thread_flag(TIF_POLLING_NRFLAG); 70 clear_thread_flag(TIF_POLLING_NRFLAG);
76 smp_mb(); 71 smp_mb();
77 72
78 dev->last_residency = 73 idle_loop_epilog(in_purr);
79 (int)idle_loop_epilog(in_purr, kt_before); 74
80 return index; 75 return index;
81} 76}
82 77
@@ -102,9 +97,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
102 int index) 97 int index)
103{ 98{
104 unsigned long in_purr; 99 unsigned long in_purr;
105 ktime_t kt_before;
106 100
107 idle_loop_prolog(&in_purr, &kt_before); 101 idle_loop_prolog(&in_purr);
108 get_lppaca()->donate_dedicated_cpu = 1; 102 get_lppaca()->donate_dedicated_cpu = 1;
109 103
110 ppc64_runlatch_off(); 104 ppc64_runlatch_off();
@@ -112,8 +106,9 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
112 check_and_cede_processor(); 106 check_and_cede_processor();
113 107
114 get_lppaca()->donate_dedicated_cpu = 0; 108 get_lppaca()->donate_dedicated_cpu = 0;
115 dev->last_residency = 109
116 (int)idle_loop_epilog(in_purr, kt_before); 110 idle_loop_epilog(in_purr);
111
117 return index; 112 return index;
118} 113}
119 114
@@ -122,9 +117,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
122 int index) 117 int index)
123{ 118{
124 unsigned long in_purr; 119 unsigned long in_purr;
125 ktime_t kt_before;
126 120
127 idle_loop_prolog(&in_purr, &kt_before); 121 idle_loop_prolog(&in_purr);
128 122
129 /* 123 /*
130 * Yield the processor to the hypervisor. We return if 124 * Yield the processor to the hypervisor. We return if
@@ -135,8 +129,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
135 */ 129 */
136 check_and_cede_processor(); 130 check_and_cede_processor();
137 131
138 dev->last_residency = 132 idle_loop_epilog(in_purr);
139 (int)idle_loop_epilog(in_purr, kt_before); 133
140 return index; 134 return index;
141} 135}
142 136
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 13f85defabed..3e34cd224b7c 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2947,7 +2947,7 @@ static void sysrq_handle_xmon(int key)
2947 2947
2948static struct sysrq_key_op sysrq_xmon_op = { 2948static struct sysrq_key_op sysrq_xmon_op = {
2949 .handler = sysrq_handle_xmon, 2949 .handler = sysrq_handle_xmon,
2950 .help_msg = "Xmon", 2950 .help_msg = "xmon(x)",
2951 .action_msg = "Entering xmon", 2951 .action_msg = "Entering xmon",
2952}; 2952};
2953 2953
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index eb8fb629f00b..2c9789da0e24 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -91,6 +91,7 @@ config S390
91 select ARCH_INLINE_WRITE_UNLOCK_BH 91 select ARCH_INLINE_WRITE_UNLOCK_BH
92 select ARCH_INLINE_WRITE_UNLOCK_IRQ 92 select ARCH_INLINE_WRITE_UNLOCK_IRQ
93 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 93 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
94 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 95 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
95 select ARCH_WANT_IPC_PARSE_VERSION 96 select ARCH_WANT_IPC_PARSE_VERSION
96 select BUILDTIME_EXTABLE_SORT 97 select BUILDTIME_EXTABLE_SORT
@@ -131,7 +132,6 @@ config S390
131 select HAVE_PERF_EVENTS 132 select HAVE_PERF_EVENTS
132 select HAVE_REGS_AND_STACK_ACCESS_API 133 select HAVE_REGS_AND_STACK_ACCESS_API
133 select HAVE_SYSCALL_TRACEPOINTS 134 select HAVE_SYSCALL_TRACEPOINTS
134 select HAVE_SYSCALL_WRAPPERS
135 select HAVE_UID16 if 32BIT 135 select HAVE_UID16 if 32BIT
136 select HAVE_VIRT_CPU_ACCOUNTING 136 select HAVE_VIRT_CPU_ACCOUNTING
137 select VIRT_TO_BUS 137 select VIRT_TO_BUS
@@ -375,19 +375,6 @@ config PACK_STACK
375 375
376 Say Y if you are unsure. 376 Say Y if you are unsure.
377 377
378config SMALL_STACK
379 def_bool n
380 prompt "Use 8kb for kernel stack instead of 16kb"
381 depends on PACK_STACK && 64BIT && !LOCKDEP
382 help
383 If you say Y here and the compiler supports the -mkernel-backchain
384 option the kernel will use a smaller kernel stack size. The reduced
385 size is 8kb instead of 16kb. This allows to run more threads on a
386 system and reduces the pressure on the memory management for higher
387 order page allocations.
388
389 Say N if you are unsure.
390
391config CHECK_STACK 378config CHECK_STACK
392 def_bool y 379 def_bool y
393 prompt "Detect kernel stack overflow" 380 prompt "Detect kernel stack overflow"
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index fc32a2df4974..c56878e1245f 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -17,20 +17,6 @@ config STRICT_DEVMEM
17 17
18 If you are unsure, say Y. 18 If you are unsure, say Y.
19 19
20config DEBUG_STRICT_USER_COPY_CHECKS
21 def_bool n
22 prompt "Strict user copy size checks"
23 ---help---
24 Enabling this option turns a certain set of sanity checks for user
25 copy operations into compile time warnings.
26
27 The copy_from_user() etc checks are there to help test if there
28 are sufficient security checks on the length argument of
29 the copy operation, by having gcc prove that the argument is
30 within bounds.
31
32 If unsure, or if you run an older (pre 4.4) gcc, say N.
33
34config S390_PTDUMP 20config S390_PTDUMP
35 bool "Export kernel pagetable layout to userspace via debugfs" 21 bool "Export kernel pagetable layout to userspace via debugfs"
36 depends on DEBUG_KERNEL 22 depends on DEBUG_KERNEL
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 7e3ce78d4290..a7d68a467ce8 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -55,22 +55,12 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
55ifeq ($(call cc-option-yn,-mkernel-backchain),y) 55ifeq ($(call cc-option-yn,-mkernel-backchain),y)
56cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK 56cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
57aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK 57aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
58cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
59aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
60ifdef CONFIG_SMALL_STACK
61STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
62endif
63endif 58endif
64 59
65# new style option for packed stacks 60# new style option for packed stacks
66ifeq ($(call cc-option-yn,-mpacked-stack),y) 61ifeq ($(call cc-option-yn,-mpacked-stack),y)
67cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK 62cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
68aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK 63aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
69cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
70aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
71ifdef CONFIG_SMALL_STACK
72STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
73endif
74endif 64endif
75 65
76ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) 66ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 9fd4a40c6752..bb5dd496614f 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -105,9 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
105int hypfs_dbfs_init(void) 105int hypfs_dbfs_init(void)
106{ 106{
107 dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); 107 dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
108 if (IS_ERR(dbfs_dir)) 108 return PTR_RET(dbfs_dir);
109 return PTR_ERR(dbfs_dir);
110 return 0;
111} 109}
112 110
113void hypfs_dbfs_exit(void) 111void hypfs_dbfs_exit(void)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 15422933c60b..4d8604e311f3 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -61,8 +61,6 @@ extern const char _sb_findmap[];
61 61
62#ifndef CONFIG_64BIT 62#ifndef CONFIG_64BIT
63 63
64#define __BITOPS_ALIGN 3
65#define __BITOPS_WORDSIZE 32
66#define __BITOPS_OR "or" 64#define __BITOPS_OR "or"
67#define __BITOPS_AND "nr" 65#define __BITOPS_AND "nr"
68#define __BITOPS_XOR "xr" 66#define __BITOPS_XOR "xr"
@@ -81,8 +79,6 @@ extern const char _sb_findmap[];
81 79
82#else /* CONFIG_64BIT */ 80#else /* CONFIG_64BIT */
83 81
84#define __BITOPS_ALIGN 7
85#define __BITOPS_WORDSIZE 64
86#define __BITOPS_OR "ogr" 82#define __BITOPS_OR "ogr"
87#define __BITOPS_AND "ngr" 83#define __BITOPS_AND "ngr"
88#define __BITOPS_XOR "xgr" 84#define __BITOPS_XOR "xgr"
@@ -101,8 +97,7 @@ extern const char _sb_findmap[];
101 97
102#endif /* CONFIG_64BIT */ 98#endif /* CONFIG_64BIT */
103 99
104#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 100#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
105#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
106 101
107#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
108/* 103/*
@@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
114 109
115 addr = (unsigned long) ptr; 110 addr = (unsigned long) ptr;
116 /* calculate address for CS */ 111 /* calculate address for CS */
117 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 112 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
118 /* make OR mask */ 113 /* make OR mask */
119 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 114 mask = 1UL << (nr & (BITS_PER_LONG - 1));
120 /* Do the atomic update. */ 115 /* Do the atomic update. */
121 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 116 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
122} 117}
@@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
130 125
131 addr = (unsigned long) ptr; 126 addr = (unsigned long) ptr;
132 /* calculate address for CS */ 127 /* calculate address for CS */
133 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 128 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
134 /* make AND mask */ 129 /* make AND mask */
135 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 130 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
136 /* Do the atomic update. */ 131 /* Do the atomic update. */
137 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 132 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
138} 133}
@@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
146 141
147 addr = (unsigned long) ptr; 142 addr = (unsigned long) ptr;
148 /* calculate address for CS */ 143 /* calculate address for CS */
149 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 144 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
150 /* make XOR mask */ 145 /* make XOR mask */
151 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 146 mask = 1UL << (nr & (BITS_PER_LONG - 1));
152 /* Do the atomic update. */ 147 /* Do the atomic update. */
153 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 148 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
154} 149}
@@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
163 158
164 addr = (unsigned long) ptr; 159 addr = (unsigned long) ptr;
165 /* calculate address for CS */ 160 /* calculate address for CS */
166 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 161 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
167 /* make OR/test mask */ 162 /* make OR/test mask */
168 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 163 mask = 1UL << (nr & (BITS_PER_LONG - 1));
169 /* Do the atomic update. */ 164 /* Do the atomic update. */
170 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 165 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
171 __BITOPS_BARRIER(); 166 barrier();
172 return (old & mask) != 0; 167 return (old & mask) != 0;
173} 168}
174 169
@@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
182 177
183 addr = (unsigned long) ptr; 178 addr = (unsigned long) ptr;
184 /* calculate address for CS */ 179 /* calculate address for CS */
185 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 180 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
186 /* make AND/test mask */ 181 /* make AND/test mask */
187 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 182 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
188 /* Do the atomic update. */ 183 /* Do the atomic update. */
189 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 184 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
190 __BITOPS_BARRIER(); 185 barrier();
191 return (old ^ new) != 0; 186 return (old ^ new) != 0;
192} 187}
193 188
@@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
201 196
202 addr = (unsigned long) ptr; 197 addr = (unsigned long) ptr;
203 /* calculate address for CS */ 198 /* calculate address for CS */
204 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 199 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
205 /* make XOR/test mask */ 200 /* make XOR/test mask */
206 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 201 mask = 1UL << (nr & (BITS_PER_LONG - 1));
207 /* Do the atomic update. */ 202 /* Do the atomic update. */
208 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 203 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
209 __BITOPS_BARRIER(); 204 barrier();
210 return (old & mask) != 0; 205 return (old & mask) != 0;
211} 206}
212#endif /* CONFIG_SMP */ 207#endif /* CONFIG_SMP */
@@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
218{ 213{
219 unsigned long addr; 214 unsigned long addr;
220 215
221 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 216 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
222 asm volatile( 217 asm volatile(
223 " oc %O0(1,%R0),%1" 218 " oc %O0(1,%R0),%1"
224 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 219 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
229{ 224{
230 unsigned long addr; 225 unsigned long addr;
231 226
232 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 227 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
233 *(unsigned char *) addr |= 1 << (nr & 7); 228 *(unsigned char *) addr |= 1 << (nr & 7);
234} 229}
235 230
@@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
246{ 241{
247 unsigned long addr; 242 unsigned long addr;
248 243
249 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 244 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
250 asm volatile( 245 asm volatile(
251 " nc %O0(1,%R0),%1" 246 " nc %O0(1,%R0),%1"
252 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); 247 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
@@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
257{ 252{
258 unsigned long addr; 253 unsigned long addr;
259 254
260 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 255 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
261 *(unsigned char *) addr &= ~(1 << (nr & 7)); 256 *(unsigned char *) addr &= ~(1 << (nr & 7));
262} 257}
263 258
@@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
273{ 268{
274 unsigned long addr; 269 unsigned long addr;
275 270
276 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 271 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
277 asm volatile( 272 asm volatile(
278 " xc %O0(1,%R0),%1" 273 " xc %O0(1,%R0),%1"
279 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 274 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
284{ 279{
285 unsigned long addr; 280 unsigned long addr;
286 281
287 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 282 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
288 *(unsigned char *) addr ^= 1 << (nr & 7); 283 *(unsigned char *) addr ^= 1 << (nr & 7);
289} 284}
290 285
@@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
302 unsigned long addr; 297 unsigned long addr;
303 unsigned char ch; 298 unsigned char ch;
304 299
305 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 300 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
306 ch = *(unsigned char *) addr; 301 ch = *(unsigned char *) addr;
307 asm volatile( 302 asm volatile(
308 " oc %O0(1,%R0),%1" 303 " oc %O0(1,%R0),%1"
@@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
321 unsigned long addr; 316 unsigned long addr;
322 unsigned char ch; 317 unsigned char ch;
323 318
324 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 319 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
325 ch = *(unsigned char *) addr; 320 ch = *(unsigned char *) addr;
326 asm volatile( 321 asm volatile(
327 " nc %O0(1,%R0),%1" 322 " nc %O0(1,%R0),%1"
@@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
340 unsigned long addr; 335 unsigned long addr;
341 unsigned char ch; 336 unsigned char ch;
342 337
343 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 338 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
344 ch = *(unsigned char *) addr; 339 ch = *(unsigned char *) addr;
345 asm volatile( 340 asm volatile(
346 " xc %O0(1,%R0),%1" 341 " xc %O0(1,%R0),%1"
@@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
376 unsigned long addr; 371 unsigned long addr;
377 unsigned char ch; 372 unsigned char ch;
378 373
379 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 374 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
380 ch = *(volatile unsigned char *) addr; 375 ch = *(volatile unsigned char *) addr;
381 return (ch >> (nr & 7)) & 1; 376 return (ch >> (nr & 7)) & 1;
382} 377}
@@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
384static inline int 379static inline int
385__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { 380__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
386 return (((volatile char *) addr) 381 return (((volatile char *) addr)
387 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; 382 [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
388} 383}
389 384
390#define test_bit(nr,addr) \ 385#define test_bit(nr,addr) \
@@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr,
693 688
694 if (offset >= size) 689 if (offset >= size)
695 return size; 690 return size;
696 bit = offset & (__BITOPS_WORDSIZE - 1); 691 bit = offset & (BITS_PER_LONG - 1);
697 offset -= bit; 692 offset -= bit;
698 size -= offset; 693 size -= offset;
699 p = addr + offset / __BITOPS_WORDSIZE; 694 p = addr + offset / BITS_PER_LONG;
700 if (bit) { 695 if (bit) {
701 set = __flo_word(0, *p & (~0UL << bit)); 696 set = __flo_word(0, *p & (~0UL << bit));
702 if (set >= size) 697 if (set >= size)
703 return size + offset; 698 return size + offset;
704 if (set < __BITOPS_WORDSIZE) 699 if (set < BITS_PER_LONG)
705 return set + offset; 700 return set + offset;
706 offset += __BITOPS_WORDSIZE; 701 offset += BITS_PER_LONG;
707 size -= __BITOPS_WORDSIZE; 702 size -= BITS_PER_LONG;
708 p++; 703 p++;
709 } 704 }
710 return offset + find_first_bit_left(p, size); 705 return offset + find_first_bit_left(p, size);
@@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr,
736 731
737 if (offset >= size) 732 if (offset >= size)
738 return size; 733 return size;
739 bit = offset & (__BITOPS_WORDSIZE - 1); 734 bit = offset & (BITS_PER_LONG - 1);
740 offset -= bit; 735 offset -= bit;
741 size -= offset; 736 size -= offset;
742 p = addr + offset / __BITOPS_WORDSIZE; 737 p = addr + offset / BITS_PER_LONG;
743 if (bit) { 738 if (bit) {
744 /* 739 /*
745 * __ffz_word returns __BITOPS_WORDSIZE 740 * __ffz_word returns BITS_PER_LONG
746 * if no zero bit is present in the word. 741 * if no zero bit is present in the word.
747 */ 742 */
748 set = __ffz_word(bit, *p >> bit); 743 set = __ffz_word(bit, *p >> bit);
749 if (set >= size) 744 if (set >= size)
750 return size + offset; 745 return size + offset;
751 if (set < __BITOPS_WORDSIZE) 746 if (set < BITS_PER_LONG)
752 return set + offset; 747 return set + offset;
753 offset += __BITOPS_WORDSIZE; 748 offset += BITS_PER_LONG;
754 size -= __BITOPS_WORDSIZE; 749 size -= BITS_PER_LONG;
755 p++; 750 p++;
756 } 751 }
757 return offset + find_first_zero_bit(p, size); 752 return offset + find_first_zero_bit(p, size);
@@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr,
773 768
774 if (offset >= size) 769 if (offset >= size)
775 return size; 770 return size;
776 bit = offset & (__BITOPS_WORDSIZE - 1); 771 bit = offset & (BITS_PER_LONG - 1);
777 offset -= bit; 772 offset -= bit;
778 size -= offset; 773 size -= offset;
779 p = addr + offset / __BITOPS_WORDSIZE; 774 p = addr + offset / BITS_PER_LONG;
780 if (bit) { 775 if (bit) {
781 /* 776 /*
782 * __ffs_word returns __BITOPS_WORDSIZE 777 * __ffs_word returns BITS_PER_LONG
783 * if no one bit is present in the word. 778 * if no one bit is present in the word.
784 */ 779 */
785 set = __ffs_word(0, *p & (~0UL << bit)); 780 set = __ffs_word(0, *p & (~0UL << bit));
786 if (set >= size) 781 if (set >= size)
787 return size + offset; 782 return size + offset;
788 if (set < __BITOPS_WORDSIZE) 783 if (set < BITS_PER_LONG)
789 return set + offset; 784 return set + offset;
790 offset += __BITOPS_WORDSIZE; 785 offset += BITS_PER_LONG;
791 size -= __BITOPS_WORDSIZE; 786 size -= BITS_PER_LONG;
792 p++; 787 p++;
793 } 788 }
794 return offset + find_first_bit(p, size); 789 return offset + find_first_bit(p, size);
@@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
843 838
844 if (offset >= size) 839 if (offset >= size)
845 return size; 840 return size;
846 bit = offset & (__BITOPS_WORDSIZE - 1); 841 bit = offset & (BITS_PER_LONG - 1);
847 offset -= bit; 842 offset -= bit;
848 size -= offset; 843 size -= offset;
849 p = addr + offset / __BITOPS_WORDSIZE; 844 p = addr + offset / BITS_PER_LONG;
850 if (bit) { 845 if (bit) {
851 /* 846 /*
852 * s390 version of ffz returns __BITOPS_WORDSIZE 847 * s390 version of ffz returns BITS_PER_LONG
853 * if no zero bit is present in the word. 848 * if no zero bit is present in the word.
854 */ 849 */
855 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); 850 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
856 if (set >= size) 851 if (set >= size)
857 return size + offset; 852 return size + offset;
858 if (set < __BITOPS_WORDSIZE) 853 if (set < BITS_PER_LONG)
859 return set + offset; 854 return set + offset;
860 offset += __BITOPS_WORDSIZE; 855 offset += BITS_PER_LONG;
861 size -= __BITOPS_WORDSIZE; 856 size -= BITS_PER_LONG;
862 p++; 857 p++;
863 } 858 }
864 return offset + find_first_zero_bit_le(p, size); 859 return offset + find_first_zero_bit_le(p, size);
@@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
885 880
886 if (offset >= size) 881 if (offset >= size)
887 return size; 882 return size;
888 bit = offset & (__BITOPS_WORDSIZE - 1); 883 bit = offset & (BITS_PER_LONG - 1);
889 offset -= bit; 884 offset -= bit;
890 size -= offset; 885 size -= offset;
891 p = addr + offset / __BITOPS_WORDSIZE; 886 p = addr + offset / BITS_PER_LONG;
892 if (bit) { 887 if (bit) {
893 /* 888 /*
894 * s390 version of ffz returns __BITOPS_WORDSIZE 889 * s390 version of ffz returns BITS_PER_LONG
895 * if no zero bit is present in the word. 890 * if no zero bit is present in the word.
896 */ 891 */
897 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); 892 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
898 if (set >= size) 893 if (set >= size)
899 return size + offset; 894 return size + offset;
900 if (set < __BITOPS_WORDSIZE) 895 if (set < BITS_PER_LONG)
901 return set + offset; 896 return set + offset;
902 offset += __BITOPS_WORDSIZE; 897 offset += BITS_PER_LONG;
903 size -= __BITOPS_WORDSIZE; 898 size -= BITS_PER_LONG;
904 p++; 899 p++;
905 } 900 }
906 return offset + find_first_bit_le(p, size); 901 return offset + find_first_bit_le(p, size);
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index e6061617a50b..f201af8be580 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
220#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) 220#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
221 221
222extern struct ccw_device *ccw_device_probe_console(void); 222extern struct ccw_device *ccw_device_probe_console(void);
223extern int ccw_device_force_console(void); 223extern void ccw_device_wait_idle(struct ccw_device *);
224extern int ccw_device_force_console(struct ccw_device *);
224 225
225int ccw_device_siosl(struct ccw_device *); 226int ccw_device_siosl(struct ccw_device *);
226 227
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index ad2b924167d7..ffb898961c8d 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
296 return 0; 296 return 0;
297} 297}
298 298
299extern void wait_cons_dev(void);
300
301extern void css_schedule_reprobe(void); 299extern void css_schedule_reprobe(void);
302 300
303extern void reipl_ccw_dev(struct ccw_dev_id *id); 301extern void reipl_ccw_dev(struct ccw_dev_id *id);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index f8c6df6cd1f0..c1e7c646727c 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -70,6 +70,22 @@ typedef u32 compat_ulong_t;
70typedef u64 compat_u64; 70typedef u64 compat_u64;
71typedef u32 compat_uptr_t; 71typedef u32 compat_uptr_t;
72 72
73typedef struct {
74 u32 mask;
75 u32 addr;
76} __aligned(8) psw_compat_t;
77
78typedef struct {
79 psw_compat_t psw;
80 u32 gprs[NUM_GPRS];
81 u32 acrs[NUM_ACRS];
82 u32 orig_gpr2;
83} s390_compat_regs;
84
85typedef struct {
86 u32 gprs_high[NUM_GPRS];
87} s390_compat_regs_high;
88
73struct compat_timespec { 89struct compat_timespec {
74 compat_time_t tv_sec; 90 compat_time_t tv_sec;
75 s32 tv_nsec; 91 s32 tv_nsec;
@@ -124,18 +140,33 @@ struct compat_flock64 {
124}; 140};
125 141
126struct compat_statfs { 142struct compat_statfs {
127 s32 f_type; 143 u32 f_type;
128 s32 f_bsize; 144 u32 f_bsize;
129 s32 f_blocks; 145 u32 f_blocks;
130 s32 f_bfree; 146 u32 f_bfree;
131 s32 f_bavail; 147 u32 f_bavail;
132 s32 f_files; 148 u32 f_files;
133 s32 f_ffree; 149 u32 f_ffree;
150 compat_fsid_t f_fsid;
151 u32 f_namelen;
152 u32 f_frsize;
153 u32 f_flags;
154 u32 f_spare[4];
155};
156
157struct compat_statfs64 {
158 u32 f_type;
159 u32 f_bsize;
160 u64 f_blocks;
161 u64 f_bfree;
162 u64 f_bavail;
163 u64 f_files;
164 u64 f_ffree;
134 compat_fsid_t f_fsid; 165 compat_fsid_t f_fsid;
135 s32 f_namelen; 166 u32 f_namelen;
136 s32 f_frsize; 167 u32 f_frsize;
137 s32 f_flags; 168 u32 f_flags;
138 s32 f_spare[5]; 169 u32 f_spare[4];
139}; 170};
140 171
141#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff 172#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
@@ -248,8 +279,6 @@ static inline int is_compat_task(void)
248 return is_32bit_task(); 279 return is_32bit_task();
249} 280}
250 281
251#endif
252
253static inline void __user *arch_compat_alloc_user_space(long len) 282static inline void __user *arch_compat_alloc_user_space(long len)
254{ 283{
255 unsigned long stack; 284 unsigned long stack;
@@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len)
260 return (void __user *) (stack - len); 289 return (void __user *) (stack - len);
261} 290}
262 291
292#endif
293
263struct compat_ipc64_perm { 294struct compat_ipc64_perm {
264 compat_key_t key; 295 compat_key_t key;
265 __compat_uid32_t uid; 296 __compat_uid32_t uid;
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1bfdf24b85a2..78f4f8711d58 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -119,6 +119,8 @@
119 */ 119 */
120 120
121#include <asm/ptrace.h> 121#include <asm/ptrace.h>
122#include <asm/compat.h>
123#include <asm/syscall.h>
122#include <asm/user.h> 124#include <asm/user.h>
123 125
124typedef s390_fp_regs elf_fpregset_t; 126typedef s390_fp_regs elf_fpregset_t;
@@ -180,18 +182,31 @@ extern unsigned long elf_hwcap;
180extern char elf_platform[]; 182extern char elf_platform[];
181#define ELF_PLATFORM (elf_platform) 183#define ELF_PLATFORM (elf_platform)
182 184
183#ifdef CONFIG_64BIT 185#ifndef CONFIG_COMPAT
186#define SET_PERSONALITY(ex) \
187do { \
188 set_personality(PER_LINUX | \
189 (current->personality & (~PER_MASK))); \
190 current_thread_info()->sys_call_table = \
191 (unsigned long) &sys_call_table; \
192} while (0)
193#else /* CONFIG_COMPAT */
184#define SET_PERSONALITY(ex) \ 194#define SET_PERSONALITY(ex) \
185do { \ 195do { \
186 if (personality(current->personality) != PER_LINUX32) \ 196 if (personality(current->personality) != PER_LINUX32) \
187 set_personality(PER_LINUX | \ 197 set_personality(PER_LINUX | \
188 (current->personality & ~PER_MASK)); \ 198 (current->personality & ~PER_MASK)); \
189 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ 199 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
190 set_thread_flag(TIF_31BIT); \ 200 set_thread_flag(TIF_31BIT); \
191 else \ 201 current_thread_info()->sys_call_table = \
202 (unsigned long) &sys_call_table_emu; \
203 } else { \
192 clear_thread_flag(TIF_31BIT); \ 204 clear_thread_flag(TIF_31BIT); \
205 current_thread_info()->sys_call_table = \
206 (unsigned long) &sys_call_table; \
207 } \
193} while (0) 208} while (0)
194#endif /* CONFIG_64BIT */ 209#endif /* CONFIG_COMPAT */
195 210
196#define STACK_RND_MASK 0x7ffUL 211#define STACK_RND_MASK 0x7ffUL
197 212
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 593753ee07f3..bd90359d6d22 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -114,7 +114,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
114#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ 114#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
115({ \ 115({ \
116 pte_t __pte = huge_ptep_get(__ptep); \ 116 pte_t __pte = huge_ptep_get(__ptep); \
117 if (pte_write(__pte)) { \ 117 if (huge_pte_write(__pte)) { \
118 huge_ptep_invalidate(__mm, __addr, __ptep); \ 118 huge_ptep_invalidate(__mm, __addr, __ptep); \
119 set_huge_pte_at(__mm, __addr, __ptep, \ 119 set_huge_pte_at(__mm, __addr, __ptep, \
120 huge_pte_wrprotect(__pte)); \ 120 huge_pte_wrprotect(__pte)); \
@@ -127,4 +127,58 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
127 huge_ptep_invalidate(vma->vm_mm, address, ptep); 127 huge_ptep_invalidate(vma->vm_mm, address, ptep);
128} 128}
129 129
130static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
131{
132 pte_t pte;
133 pmd_t pmd;
134
135 pmd = mk_pmd_phys(page_to_phys(page), pgprot);
136 pte_val(pte) = pmd_val(pmd);
137 return pte;
138}
139
140static inline int huge_pte_write(pte_t pte)
141{
142 pmd_t pmd;
143
144 pmd_val(pmd) = pte_val(pte);
145 return pmd_write(pmd);
146}
147
148static inline int huge_pte_dirty(pte_t pte)
149{
150 /* No dirty bit in the segment table entry. */
151 return 0;
152}
153
154static inline pte_t huge_pte_mkwrite(pte_t pte)
155{
156 pmd_t pmd;
157
158 pmd_val(pmd) = pte_val(pte);
159 pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
160 return pte;
161}
162
163static inline pte_t huge_pte_mkdirty(pte_t pte)
164{
165 /* No dirty bit in the segment table entry. */
166 return pte;
167}
168
169static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
170{
171 pmd_t pmd;
172
173 pmd_val(pmd) = pte_val(pte);
174 pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
175 return pte;
176}
177
178static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
179 pte_t *ptep)
180{
181 pmd_clear((pmd_t *) ptep);
182}
183
130#endif /* _ASM_S390_HUGETLB_H */ 184#endif /* _ASM_S390_HUGETLB_H */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 05333b7f0469..6c1801235db9 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
140struct zpci_dev *zpci_alloc_device(void); 140struct zpci_dev *zpci_alloc_device(void);
141int zpci_create_device(struct zpci_dev *); 141int zpci_create_device(struct zpci_dev *);
142int zpci_enable_device(struct zpci_dev *); 142int zpci_enable_device(struct zpci_dev *);
143int zpci_disable_device(struct zpci_dev *);
143void zpci_stop_device(struct zpci_dev *); 144void zpci_stop_device(struct zpci_dev *);
144void zpci_free_device(struct zpci_dev *); 145void zpci_free_device(struct zpci_dev *);
145int zpci_scan_device(struct zpci_dev *); 146int zpci_scan_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 6bbec4265b6e..1ca5d1047c71 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id;
7extern debug_info_t *pci_debug_err_id; 7extern debug_info_t *pci_debug_err_id;
8 8
9#ifdef CONFIG_PCI_DEBUG 9#ifdef CONFIG_PCI_DEBUG
10#define zpci_dbg(fmt, args...) \ 10#define zpci_dbg(imp, fmt, args...) \
11 do { \ 11 debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
12 if (pci_debug_msg_id->level >= 2) \
13 debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\
14 } while (0)
15 12
16#else /* !CONFIG_PCI_DEBUG */ 13#else /* !CONFIG_PCI_DEBUG */
17#define zpci_dbg(fmt, args...) do { } while (0) 14#define zpci_dbg(imp, fmt, args...) do { } while (0)
18#endif 15#endif
19 16
20#define zpci_err(text...) \ 17#define zpci_err(text...) \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 1486a98d5dad..e6a2bdd4d705 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -1,10 +1,6 @@
1#ifndef _ASM_S390_PCI_INSN_H 1#ifndef _ASM_S390_PCI_INSN_H
2#define _ASM_S390_PCI_INSN_H 2#define _ASM_S390_PCI_INSN_H
3 3
4#include <linux/delay.h>
5
6#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
7
8/* Load/Store status codes */ 4/* Load/Store status codes */
9#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4 5#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
10#define ZPCI_PCI_ST_FUNC_IN_ERR 8 6#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@@ -82,199 +78,12 @@ struct zpci_fib {
82 u64 reserved7; 78 u64 reserved7;
83} __packed; 79} __packed;
84 80
85/* Modify PCI Function Controls */
86static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
87{
88 u8 cc;
89
90 asm volatile (
91 " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
92 " ipm %[cc]\n"
93 " srl %[cc],28\n"
94 : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
95 : : "cc");
96 *status = req >> 24 & 0xff;
97 return cc;
98}
99
100static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
101{
102 u8 cc, status;
103
104 do {
105 cc = __mpcifc(req, fib, &status);
106 if (cc == 2)
107 msleep(ZPCI_INSN_BUSY_DELAY);
108 } while (cc == 2);
109
110 if (cc)
111 printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
112 __func__, cc, status);
113 return (cc) ? -EIO : 0;
114}
115
116/* Refresh PCI Translations */
117static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
118{
119 register u64 __addr asm("2") = addr;
120 register u64 __range asm("3") = range;
121 u8 cc;
122
123 asm volatile (
124 " .insn rre,0xb9d30000,%[fn],%[addr]\n"
125 " ipm %[cc]\n"
126 " srl %[cc],28\n"
127 : [cc] "=d" (cc), [fn] "+d" (fn)
128 : [addr] "d" (__addr), "d" (__range)
129 : "cc");
130 *status = fn >> 24 & 0xff;
131 return cc;
132}
133
134static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
135{
136 u8 cc, status;
137
138 do {
139 cc = __rpcit(fn, addr, range, &status);
140 if (cc == 2)
141 udelay(ZPCI_INSN_BUSY_DELAY);
142 } while (cc == 2);
143
144 if (cc)
145 printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
146 __func__, cc, status, addr, range);
147 return (cc) ? -EIO : 0;
148}
149
150/* Store PCI function controls */
151static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
152{
153 u64 fn = (u64) handle << 32 | space << 16;
154 u8 cc;
155
156 asm volatile (
157 " .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
158 " ipm %[cc]\n"
159 " srl %[cc],28\n"
160 : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
161 : : "cc");
162 *status = fn >> 24 & 0xff;
163 return cc;
164}
165
166/* Set Interruption Controls */
167static inline void sic_instr(u16 ctl, char *unused, u8 isc)
168{
169 asm volatile (
170 " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
171 : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
172}
173
174/* PCI Load */
175static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
176{
177 register u64 __req asm("2") = req;
178 register u64 __offset asm("3") = offset;
179 u64 __data;
180 u8 cc;
181
182 asm volatile (
183 " .insn rre,0xb9d20000,%[data],%[req]\n"
184 " ipm %[cc]\n"
185 " srl %[cc],28\n"
186 : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
187 : "d" (__offset)
188 : "cc");
189 *status = __req >> 24 & 0xff;
190 *data = __data;
191 return cc;
192}
193
194static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
195{
196 u8 cc, status;
197
198 do {
199 cc = __pcilg(data, req, offset, &status);
200 if (cc == 2)
201 udelay(ZPCI_INSN_BUSY_DELAY);
202 } while (cc == 2);
203
204 if (cc) {
205 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
206 __func__, cc, status, req, offset);
207 /* TODO: on IO errors set data to 0xff...
208 * here or in users of pcilg (le conversion)?
209 */
210 }
211 return (cc) ? -EIO : 0;
212}
213
214/* PCI Store */
215static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
216{
217 register u64 __req asm("2") = req;
218 register u64 __offset asm("3") = offset;
219 u8 cc;
220
221 asm volatile (
222 " .insn rre,0xb9d00000,%[data],%[req]\n"
223 " ipm %[cc]\n"
224 " srl %[cc],28\n"
225 : [cc] "=d" (cc), [req] "+d" (__req)
226 : "d" (__offset), [data] "d" (data)
227 : "cc");
228 *status = __req >> 24 & 0xff;
229 return cc;
230}
231
232static inline int pcistg_instr(u64 data, u64 req, u64 offset)
233{
234 u8 cc, status;
235
236 do {
237 cc = __pcistg(data, req, offset, &status);
238 if (cc == 2)
239 udelay(ZPCI_INSN_BUSY_DELAY);
240 } while (cc == 2);
241
242 if (cc)
243 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
244 __func__, cc, status, req, offset);
245 return (cc) ? -EIO : 0;
246}
247
248/* PCI Store Block */
249static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
250{
251 u8 cc;
252
253 asm volatile (
254 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
255 " ipm %[cc]\n"
256 " srl %[cc],28\n"
257 : [cc] "=d" (cc), [req] "+d" (req)
258 : [offset] "d" (offset), [data] "Q" (*data)
259 : "cc");
260 *status = req >> 24 & 0xff;
261 return cc;
262}
263
264static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
265{
266 u8 cc, status;
267
268 do {
269 cc = __pcistb(data, req, offset, &status);
270 if (cc == 2)
271 udelay(ZPCI_INSN_BUSY_DELAY);
272 } while (cc == 2);
273 81
274 if (cc) 82int s390pci_mod_fc(u64 req, struct zpci_fib *fib);
275 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", 83int s390pci_refresh_trans(u64 fn, u64 addr, u64 range);
276 __func__, cc, status, req, offset); 84int s390pci_load(u64 *data, u64 req, u64 offset);
277 return (cc) ? -EIO : 0; 85int s390pci_store(u64 data, u64 req, u64 offset);
278} 86int s390pci_store_block(const u64 *data, u64 req, u64 offset);
87void set_irq_ctrl(u16 ctl, char *unused, u8 isc);
279 88
280#endif 89#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 5fd81f31d6c7..83a9caa6ae53 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
36 u64 data; \ 36 u64 data; \
37 int rc; \ 37 int rc; \
38 \ 38 \
39 rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \ 39 rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \
40 if (rc) \ 40 if (rc) \
41 data = -1ULL; \ 41 data = -1ULL; \
42 return (RETTYPE) data; \ 42 return (RETTYPE) data; \
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
50 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ 50 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
51 u64 data = (VALTYPE) val; \ 51 u64 data = (VALTYPE) val; \
52 \ 52 \
53 pcistg_instr(data, req, ZPCI_OFFSET(addr)); \ 53 s390pci_store(data, req, ZPCI_OFFSET(addr)); \
54} 54}
55 55
56zpci_read(8, u64) 56zpci_read(8, u64)
@@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
83 val = 0; /* let FW report error */ 83 val = 0; /* let FW report error */
84 break; 84 break;
85 } 85 }
86 return pcistg_instr(val, req, offset); 86 return s390pci_store(val, req, offset);
87} 87}
88 88
89static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) 89static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
90{ 90{
91 u64 data; 91 u64 data;
92 u8 cc; 92 int cc;
93
94 cc = s390pci_load(&data, req, offset);
95 if (cc)
96 goto out;
93 97
94 cc = pcilg_instr(&data, req, offset);
95 switch (len) { 98 switch (len) {
96 case 1: 99 case 1:
97 *((u8 *) dst) = (u8) data; 100 *((u8 *) dst) = (u8) data;
@@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
106 *((u64 *) dst) = (u64) data; 109 *((u64 *) dst) = (u64) data;
107 break; 110 break;
108 } 111 }
112out:
109 return cc; 113 return cc;
110} 114}
111 115
112static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) 116static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
113{ 117{
114 return pcistb_instr(data, req, offset); 118 return s390pci_store_block(data, req, offset);
115} 119}
116 120
117static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) 121static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3cb47cf02530..b4622915bd15 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -424,6 +424,13 @@ extern unsigned long MODULES_END;
424#define __S110 PAGE_RW 424#define __S110 PAGE_RW
425#define __S111 PAGE_RW 425#define __S111 PAGE_RW
426 426
427/*
428 * Segment entry (large page) protection definitions.
429 */
430#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
431#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
432#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
433
427static inline int mm_exclusive(struct mm_struct *mm) 434static inline int mm_exclusive(struct mm_struct *mm)
428{ 435{
429 return likely(mm == current->active_mm && 436 return likely(mm == current->active_mm &&
@@ -764,6 +771,8 @@ void gmap_disable(struct gmap *gmap);
764int gmap_map_segment(struct gmap *gmap, unsigned long from, 771int gmap_map_segment(struct gmap *gmap, unsigned long from,
765 unsigned long to, unsigned long length); 772 unsigned long to, unsigned long length);
766int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 773int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
774unsigned long __gmap_translate(unsigned long address, struct gmap *);
775unsigned long gmap_translate(unsigned long address, struct gmap *);
767unsigned long __gmap_fault(unsigned long address, struct gmap *); 776unsigned long __gmap_fault(unsigned long address, struct gmap *);
768unsigned long gmap_fault(unsigned long address, struct gmap *); 777unsigned long gmap_fault(unsigned long address, struct gmap *);
769void gmap_discard(unsigned long from, unsigned long to, struct gmap *); 778void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
@@ -912,26 +921,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
912#ifdef CONFIG_HUGETLB_PAGE 921#ifdef CONFIG_HUGETLB_PAGE
913static inline pte_t pte_mkhuge(pte_t pte) 922static inline pte_t pte_mkhuge(pte_t pte)
914{ 923{
915 /*
916 * PROT_NONE needs to be remapped from the pte type to the ste type.
917 * The HW invalid bit is also different for pte and ste. The pte
918 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
919 * bit, so we don't have to clear it.
920 */
921 if (pte_val(pte) & _PAGE_INVALID) {
922 if (pte_val(pte) & _PAGE_SWT)
923 pte_val(pte) |= _HPAGE_TYPE_NONE;
924 pte_val(pte) |= _SEGMENT_ENTRY_INV;
925 }
926 /*
927 * Clear SW pte bits, there are no SW bits in a segment table entry.
928 */
929 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC |
930 _PAGE_SWR | _PAGE_SWW);
931 /*
932 * Also set the change-override bit because we don't need dirty bit
933 * tracking for hugetlbfs pages.
934 */
935 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); 924 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
936 return pte; 925 return pte;
937} 926}
@@ -1276,31 +1265,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1276 } 1265 }
1277} 1266}
1278 1267
1279#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1268#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1280
1281#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
1282#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
1283#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
1284
1285#define __HAVE_ARCH_PGTABLE_DEPOSIT
1286extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
1287
1288#define __HAVE_ARCH_PGTABLE_WITHDRAW
1289extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
1290
1291static inline int pmd_trans_splitting(pmd_t pmd)
1292{
1293 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1294}
1295
1296static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1297 pmd_t *pmdp, pmd_t entry)
1298{
1299 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
1300 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1301 *pmdp = entry;
1302}
1303
1304static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1269static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1305{ 1270{
1306 /* 1271 /*
@@ -1321,10 +1286,11 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1321 return pmd; 1286 return pmd;
1322} 1287}
1323 1288
1324static inline pmd_t pmd_mkhuge(pmd_t pmd) 1289static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1325{ 1290{
1326 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1291 pmd_t __pmd;
1327 return pmd; 1292 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1293 return __pmd;
1328} 1294}
1329 1295
1330static inline pmd_t pmd_mkwrite(pmd_t pmd) 1296static inline pmd_t pmd_mkwrite(pmd_t pmd)
@@ -1334,6 +1300,34 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
1334 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; 1300 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
1335 return pmd; 1301 return pmd;
1336} 1302}
1303#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1304
1305#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1306
1307#define __HAVE_ARCH_PGTABLE_DEPOSIT
1308extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
1309
1310#define __HAVE_ARCH_PGTABLE_WITHDRAW
1311extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
1312
1313static inline int pmd_trans_splitting(pmd_t pmd)
1314{
1315 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1316}
1317
1318static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1319 pmd_t *pmdp, pmd_t entry)
1320{
1321 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
1322 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1323 *pmdp = entry;
1324}
1325
1326static inline pmd_t pmd_mkhuge(pmd_t pmd)
1327{
1328 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1329 return pmd;
1330}
1337 1331
1338static inline pmd_t pmd_wrprotect(pmd_t pmd) 1332static inline pmd_t pmd_wrprotect(pmd_t pmd)
1339{ 1333{
@@ -1430,13 +1424,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1430 } 1424 }
1431} 1425}
1432 1426
1433static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1434{
1435 pmd_t __pmd;
1436 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1437 return __pmd;
1438}
1439
1440#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) 1427#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1441#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1428#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1442 1429
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 94e749c90230..6b499870662f 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
161 161
162extern void show_code(struct pt_regs *regs); 162extern void show_code(struct pt_regs *regs);
163extern void print_fn_code(unsigned char *code, unsigned long len); 163extern void print_fn_code(unsigned char *code, unsigned long len);
164extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]); 164extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
165 unsigned int len);
165 166
166unsigned long get_wchan(struct task_struct *p); 167unsigned long get_wchan(struct task_struct *p);
167#define task_pt_regs(tsk) ((struct pt_regs *) \ 168#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 3ee5da3bc10c..559512a455da 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -9,9 +9,7 @@
9#include <uapi/asm/ptrace.h> 9#include <uapi/asm/ptrace.h>
10 10
11#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
12#ifndef __s390x__ 12
13#else /* __s390x__ */
14#endif /* __s390x__ */
15extern long psw_kernel_bits; 13extern long psw_kernel_bits;
16extern long psw_user_bits; 14extern long psw_user_bits;
17 15
@@ -77,8 +75,6 @@ struct per_struct_kernel {
77#define PER_CONTROL_SUSPENSION 0x00400000UL 75#define PER_CONTROL_SUSPENSION 0x00400000UL
78#define PER_CONTROL_ALTERATION 0x00200000UL 76#define PER_CONTROL_ALTERATION 0x00200000UL
79 77
80#ifdef __s390x__
81#endif /* __s390x__ */
82/* 78/*
83 * These are defined as per linux/ptrace.h, which see. 79 * These are defined as per linux/ptrace.h, which see.
84 */ 80 */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index fe7b99759e12..cd29d2f4e4f3 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -23,6 +23,7 @@
23 * type here is what we want [need] for both 32 bit and 64 bit systems. 23 * type here is what we want [need] for both 32 bit and 64 bit systems.
24 */ 24 */
25extern const unsigned int sys_call_table[]; 25extern const unsigned int sys_call_table[];
26extern const unsigned int sys_call_table_emu[];
26 27
27static inline long syscall_get_nr(struct task_struct *task, 28static inline long syscall_get_nr(struct task_struct *task,
28 struct pt_regs *regs) 29 struct pt_regs *regs)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 9e2cfe0349c3..eb5f64d26d06 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -14,13 +14,8 @@
14#define THREAD_ORDER 1 14#define THREAD_ORDER 1
15#define ASYNC_ORDER 1 15#define ASYNC_ORDER 1
16#else /* CONFIG_64BIT */ 16#else /* CONFIG_64BIT */
17#ifndef __SMALL_STACK
18#define THREAD_ORDER 2 17#define THREAD_ORDER 2
19#define ASYNC_ORDER 2 18#define ASYNC_ORDER 2
20#else
21#define THREAD_ORDER 1
22#define ASYNC_ORDER 1
23#endif
24#endif /* CONFIG_64BIT */ 19#endif /* CONFIG_64BIT */
25 20
26#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
@@ -41,6 +36,7 @@ struct thread_info {
41 struct task_struct *task; /* main task structure */ 36 struct task_struct *task; /* main task structure */
42 struct exec_domain *exec_domain; /* execution domain */ 37 struct exec_domain *exec_domain; /* execution domain */
43 unsigned long flags; /* low level flags */ 38 unsigned long flags; /* low level flags */
39 unsigned long sys_call_table; /* System call table address */
44 unsigned int cpu; /* current CPU */ 40 unsigned int cpu; /* current CPU */
45 int preempt_count; /* 0 => preemptable, <0 => BUG */ 41 int preempt_count; /* 0 => preemptable, <0 => BUG */
46 struct restart_block restart_block; 42 struct restart_block restart_block;
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index a6667a952969..651886353551 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -54,12 +54,4 @@
54#define __ARCH_WANT_SYS_VFORK 54#define __ARCH_WANT_SYS_VFORK
55#define __ARCH_WANT_SYS_CLONE 55#define __ARCH_WANT_SYS_CLONE
56 56
57/*
58 * "Conditional" syscalls
59 *
60 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
61 * but it doesn't work on all toolchains, so we just do it by hand
62 */
63#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
64
65#endif /* _ASM_S390_UNISTD_H_ */ 57#endif /* _ASM_S390_UNISTD_H_ */
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index a5ca214b34fd..3aa9f1ec5b29 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -215,12 +215,6 @@ typedef struct
215 unsigned long addr; 215 unsigned long addr;
216} __attribute__ ((aligned(8))) psw_t; 216} __attribute__ ((aligned(8))) psw_t;
217 217
218typedef struct
219{
220 __u32 mask;
221 __u32 addr;
222} __attribute__ ((aligned(8))) psw_compat_t;
223
224#ifndef __s390x__ 218#ifndef __s390x__
225 219
226#define PSW_MASK_PER 0x40000000UL 220#define PSW_MASK_PER 0x40000000UL
@@ -295,20 +289,6 @@ typedef struct
295 unsigned long orig_gpr2; 289 unsigned long orig_gpr2;
296} s390_regs; 290} s390_regs;
297 291
298typedef struct
299{
300 psw_compat_t psw;
301 __u32 gprs[NUM_GPRS];
302 __u32 acrs[NUM_ACRS];
303 __u32 orig_gpr2;
304} s390_compat_regs;
305
306typedef struct
307{
308 __u32 gprs_high[NUM_GPRS];
309} s390_compat_regs_high;
310
311
312/* 292/*
313 * Now for the user space program event recording (trace) definitions. 293 * Now for the user space program event recording (trace) definitions.
314 * The following structures are used only for the ptrace interface, don't 294 * The following structures are used only for the ptrace interface, don't
diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h
index 5acca0a34c20..a61d538756f2 100644
--- a/arch/s390/include/uapi/asm/statfs.h
+++ b/arch/s390/include/uapi/asm/statfs.h
@@ -7,9 +7,6 @@
7#ifndef _S390_STATFS_H 7#ifndef _S390_STATFS_H
8#define _S390_STATFS_H 8#define _S390_STATFS_H
9 9
10#ifndef __s390x__
11#include <asm-generic/statfs.h>
12#else
13/* 10/*
14 * We can't use <asm-generic/statfs.h> because in 64-bit mode 11 * We can't use <asm-generic/statfs.h> because in 64-bit mode
15 * we mix ints of different sizes in our struct statfs. 12 * we mix ints of different sizes in our struct statfs.
@@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t;
21#endif 18#endif
22 19
23struct statfs { 20struct statfs {
24 int f_type; 21 unsigned int f_type;
25 int f_bsize; 22 unsigned int f_bsize;
26 long f_blocks; 23 unsigned long f_blocks;
27 long f_bfree; 24 unsigned long f_bfree;
28 long f_bavail; 25 unsigned long f_bavail;
29 long f_files; 26 unsigned long f_files;
30 long f_ffree; 27 unsigned long f_ffree;
31 __kernel_fsid_t f_fsid; 28 __kernel_fsid_t f_fsid;
32 int f_namelen; 29 unsigned int f_namelen;
33 int f_frsize; 30 unsigned int f_frsize;
34 int f_flags; 31 unsigned int f_flags;
35 int f_spare[4]; 32 unsigned int f_spare[4];
36}; 33};
37 34
38struct statfs64 { 35struct statfs64 {
39 int f_type; 36 unsigned int f_type;
40 int f_bsize; 37 unsigned int f_bsize;
41 long f_blocks; 38 unsigned long f_blocks;
42 long f_bfree; 39 unsigned long f_bfree;
43 long f_bavail; 40 unsigned long f_bavail;
44 long f_files; 41 unsigned long f_files;
45 long f_ffree; 42 unsigned long f_ffree;
46 __kernel_fsid_t f_fsid; 43 __kernel_fsid_t f_fsid;
47 int f_namelen; 44 unsigned int f_namelen;
48 int f_frsize; 45 unsigned int f_frsize;
49 int f_flags; 46 unsigned int f_flags;
50 int f_spare[4]; 47 unsigned int f_spare[4];
51}; 48};
52 49
53struct compat_statfs64 {
54 __u32 f_type;
55 __u32 f_bsize;
56 __u64 f_blocks;
57 __u64 f_bfree;
58 __u64 f_bavail;
59 __u64 f_files;
60 __u64 f_ffree;
61 __kernel_fsid_t f_fsid;
62 __u32 f_namelen;
63 __u32 f_frsize;
64 __u32 f_flags;
65 __u32 f_spare[4];
66};
67
68#endif /* __s390x__ */
69#endif 50#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2ac311ef5c9b..1386fcaf4ef6 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -14,16 +14,25 @@ endif
14CFLAGS_smp.o := -Wno-nonnull 14CFLAGS_smp.o := -Wno-nonnull
15 15
16# 16#
17# Disable tailcall optimizations for stack / callchain walking functions
18# since this might generate broken code when accessing register 15 and
19# passing its content to other functions.
20#
21CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
22CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
23
24#
17# Pass UTS_MACHINE for user_regset definition 25# Pass UTS_MACHINE for user_regset definition
18# 26#
19CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' 27CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
20 28
21CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
22 30
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ 31obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ 32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ 33obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o
26 sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
35obj-y += dumpstack.o
27 36
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 37obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 38obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fface87056eb..7a82f9f70100 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -35,6 +35,7 @@ int main(void)
35 DEFINE(__TI_task, offsetof(struct thread_info, task)); 35 DEFINE(__TI_task, offsetof(struct thread_info, task));
36 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); 36 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
37 DEFINE(__TI_flags, offsetof(struct thread_info, flags)); 37 DEFINE(__TI_flags, offsetof(struct thread_info, flags));
38 DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
38 DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); 39 DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
39 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); 40 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
40 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); 41 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 19f26de27fae..8b6e4f5288a2 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -288,51 +288,13 @@ asmlinkage long sys32_getegid16(void)
288 return high2lowgid(from_kgid_munged(current_user_ns(), current_egid())); 288 return high2lowgid(from_kgid_munged(current_user_ns(), current_egid()));
289} 289}
290 290
291/*
292 * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.
293 *
294 * This is really horribly ugly.
295 */
296#ifdef CONFIG_SYSVIPC 291#ifdef CONFIG_SYSVIPC
297asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr) 292COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
293 unsigned long, third, compat_uptr_t, ptr)
298{ 294{
299 if (call >> 16) /* hack for backward compatibility */ 295 if (call >> 16) /* hack for backward compatibility */
300 return -EINVAL; 296 return -EINVAL;
301 switch (call) { 297 return compat_sys_ipc(call, first, second, third, ptr, third);
302 case SEMTIMEDOP:
303 return compat_sys_semtimedop(first, compat_ptr(ptr),
304 second, compat_ptr(third));
305 case SEMOP:
306 /* struct sembuf is the same on 32 and 64bit :)) */
307 return sys_semtimedop(first, compat_ptr(ptr),
308 second, NULL);
309 case SEMGET:
310 return sys_semget(first, second, third);
311 case SEMCTL:
312 return compat_sys_semctl(first, second, third,
313 compat_ptr(ptr));
314 case MSGSND:
315 return compat_sys_msgsnd(first, second, third,
316 compat_ptr(ptr));
317 case MSGRCV:
318 return compat_sys_msgrcv(first, second, 0, third,
319 0, compat_ptr(ptr));
320 case MSGGET:
321 return sys_msgget((key_t) first, second);
322 case MSGCTL:
323 return compat_sys_msgctl(first, second, compat_ptr(ptr));
324 case SHMAT:
325 return compat_sys_shmat(first, second, third,
326 0, compat_ptr(ptr));
327 case SHMDT:
328 return sys_shmdt(compat_ptr(ptr));
329 case SHMGET:
330 return sys_shmget(first, (unsigned)second, third);
331 case SHMCTL:
332 return compat_sys_shmctl(first, second, compat_ptr(ptr));
333 }
334
335 return -ENOSYS;
336} 298}
337#endif 299#endif
338 300
@@ -373,48 +335,6 @@ asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 coun
373 return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count); 335 return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count);
374} 336}
375 337
376asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, size_t count)
377{
378 mm_segment_t old_fs = get_fs();
379 int ret;
380 off_t of;
381
382 if (offset && get_user(of, offset))
383 return -EFAULT;
384
385 set_fs(KERNEL_DS);
386 ret = sys_sendfile(out_fd, in_fd,
387 offset ? (off_t __force __user *) &of : NULL, count);
388 set_fs(old_fs);
389
390 if (offset && put_user(of, offset))
391 return -EFAULT;
392
393 return ret;
394}
395
396asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
397 compat_loff_t __user *offset, s32 count)
398{
399 mm_segment_t old_fs = get_fs();
400 int ret;
401 loff_t lof;
402
403 if (offset && get_user(lof, offset))
404 return -EFAULT;
405
406 set_fs(KERNEL_DS);
407 ret = sys_sendfile64(out_fd, in_fd,
408 offset ? (loff_t __force __user *) &lof : NULL,
409 count);
410 set_fs(old_fs);
411
412 if (offset && put_user(lof, offset))
413 return -EFAULT;
414
415 return ret;
416}
417
418struct stat64_emu31 { 338struct stat64_emu31 {
419 unsigned long long st_dev; 339 unsigned long long st_dev;
420 unsigned int __pad1; 340 unsigned int __pad1;
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 00d92a5a6f6c..976518c0592a 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -94,7 +94,6 @@ long sys32_getuid16(void);
94long sys32_geteuid16(void); 94long sys32_geteuid16(void);
95long sys32_getgid16(void); 95long sys32_getgid16(void);
96long sys32_getegid16(void); 96long sys32_getegid16(void);
97long sys32_ipc(u32 call, int first, int second, int third, u32 ptr);
98long sys32_truncate64(const char __user * path, unsigned long high, 97long sys32_truncate64(const char __user * path, unsigned long high,
99 unsigned long low); 98 unsigned long low);
100long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low); 99long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low);
@@ -106,10 +105,6 @@ long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count,
106long sys32_pwrite64(unsigned int fd, const char __user *ubuf, 105long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
107 size_t count, u32 poshi, u32 poslo); 106 size_t count, u32 poshi, u32 poslo);
108compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count); 107compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count);
109long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
110 size_t count);
111long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset,
112 s32 count);
113long sys32_stat64(const char __user * filename, struct stat64_emu31 __user * statbuf); 108long sys32_stat64(const char __user * filename, struct stat64_emu31 __user * statbuf);
114long sys32_lstat64(const char __user * filename, 109long sys32_lstat64(const char __user * filename,
115 struct stat64_emu31 __user * statbuf); 110 struct stat64_emu31 __user * statbuf);
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6de049fbe62d..c439ac9ced09 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -362,6 +362,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
362 /* set extra registers only for synchronous signals */ 362 /* set extra registers only for synchronous signals */
363 regs->gprs[4] = regs->int_code & 127; 363 regs->gprs[4] = regs->int_code & 127;
364 regs->gprs[5] = regs->int_parm_long; 364 regs->gprs[5] = regs->int_parm_long;
365 regs->gprs[6] = task_thread_info(current)->last_break;
365 } 366 }
366 367
367 /* Place signal number on stack to allow backtrace from handler. */ 368 /* Place signal number on stack to allow backtrace from handler. */
@@ -421,6 +422,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
421 regs->gprs[2] = map_signal(sig); 422 regs->gprs[2] = map_signal(sig);
422 regs->gprs[3] = (__force __u64) &frame->info; 423 regs->gprs[3] = (__force __u64) &frame->info;
423 regs->gprs[4] = (__force __u64) &frame->uc; 424 regs->gprs[4] = (__force __u64) &frame->uc;
425 regs->gprs[5] = task_thread_info(current)->last_break;
424 return 0; 426 return 0;
425 427
426give_sigsegv: 428give_sigsegv:
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 3c98c4dc5aca..17644c8e10e1 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -258,11 +258,6 @@ ENTRY(sys32_mmap2_wrapper)
258 llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * 258 llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
259 jg sys32_mmap2 # branch to system call 259 jg sys32_mmap2 # branch to system call
260 260
261ENTRY(compat_sys_getrusage_wrapper)
262 lgfr %r2,%r2 # int
263 llgtr %r3,%r3 # struct rusage_emu31 *
264 jg compat_sys_getrusage # branch to system call
265
266ENTRY(compat_sys_gettimeofday_wrapper) 261ENTRY(compat_sys_gettimeofday_wrapper)
267 llgtr %r2,%r2 # struct timeval_emu31 * 262 llgtr %r2,%r2 # struct timeval_emu31 *
268 llgtr %r3,%r3 # struct timezone * 263 llgtr %r3,%r3 # struct timezone *
@@ -393,14 +388,6 @@ ENTRY(compat_sys_sysinfo_wrapper)
393 llgtr %r2,%r2 # struct sysinfo_emu31 * 388 llgtr %r2,%r2 # struct sysinfo_emu31 *
394 jg compat_sys_sysinfo # branch to system call 389 jg compat_sys_sysinfo # branch to system call
395 390
396ENTRY(sys32_ipc_wrapper)
397 llgfr %r2,%r2 # uint
398 lgfr %r3,%r3 # int
399 lgfr %r4,%r4 # int
400 lgfr %r5,%r5 # int
401 llgfr %r6,%r6 # u32
402 jg sys32_ipc # branch to system call
403
404ENTRY(sys32_fsync_wrapper) 391ENTRY(sys32_fsync_wrapper)
405 llgfr %r2,%r2 # unsigned int 392 llgfr %r2,%r2 # unsigned int
406 jg sys_fsync # branch to system call 393 jg sys_fsync # branch to system call
@@ -666,13 +653,6 @@ ENTRY(sys32_capset_wrapper)
666 llgtr %r3,%r3 # const cap_user_data_t 653 llgtr %r3,%r3 # const cap_user_data_t
667 jg sys_capset # branch to system call 654 jg sys_capset # branch to system call
668 655
669ENTRY(sys32_sendfile_wrapper)
670 lgfr %r2,%r2 # int
671 lgfr %r3,%r3 # int
672 llgtr %r4,%r4 # __kernel_off_emu31_t *
673 llgfr %r5,%r5 # size_t
674 jg sys32_sendfile # branch to system call
675
676#sys32_vfork_wrapper # done in vfork_glue 656#sys32_vfork_wrapper # done in vfork_glue
677 657
678ENTRY(sys32_truncate64_wrapper) 658ENTRY(sys32_truncate64_wrapper)
@@ -938,13 +918,6 @@ ENTRY(sys_epoll_wait_wrapper)
938 lgfr %r5,%r5 # int 918 lgfr %r5,%r5 # int
939 jg sys_epoll_wait # branch to system call 919 jg sys_epoll_wait # branch to system call
940 920
941ENTRY(sys32_lookup_dcookie_wrapper)
942 sllg %r2,%r2,32 # get high word of 64bit dcookie
943 or %r2,%r3 # get low word of 64bit dcookie
944 llgtr %r3,%r4 # char *
945 llgfr %r4,%r5 # size_t
946 jg sys_lookup_dcookie
947
948ENTRY(sys32_fadvise64_wrapper) 921ENTRY(sys32_fadvise64_wrapper)
949 lgfr %r2,%r2 # int 922 lgfr %r2,%r2 # int
950 sllg %r3,%r3,32 # get high word of 64bit loff_t 923 sllg %r3,%r3,32 # get high word of 64bit loff_t
@@ -1264,29 +1237,12 @@ ENTRY(sys_tee_wrapper)
1264 llgfr %r5,%r5 # unsigned int 1237 llgfr %r5,%r5 # unsigned int
1265 jg sys_tee 1238 jg sys_tee
1266 1239
1267ENTRY(compat_sys_vmsplice_wrapper)
1268 lgfr %r2,%r2 # int
1269 llgtr %r3,%r3 # compat_iovec *
1270 llgfr %r4,%r4 # unsigned int
1271 llgfr %r5,%r5 # unsigned int
1272 jg compat_sys_vmsplice
1273
1274ENTRY(sys_getcpu_wrapper) 1240ENTRY(sys_getcpu_wrapper)
1275 llgtr %r2,%r2 # unsigned * 1241 llgtr %r2,%r2 # unsigned *
1276 llgtr %r3,%r3 # unsigned * 1242 llgtr %r3,%r3 # unsigned *
1277 llgtr %r4,%r4 # struct getcpu_cache * 1243 llgtr %r4,%r4 # struct getcpu_cache *
1278 jg sys_getcpu 1244 jg sys_getcpu
1279 1245
1280ENTRY(compat_sys_epoll_pwait_wrapper)
1281 lgfr %r2,%r2 # int
1282 llgtr %r3,%r3 # struct compat_epoll_event *
1283 lgfr %r4,%r4 # int
1284 lgfr %r5,%r5 # int
1285 llgtr %r6,%r6 # compat_sigset_t *
1286 llgf %r0,164(%r15) # compat_size_t
1287 stg %r0,160(%r15)
1288 jg compat_sys_epoll_pwait
1289
1290ENTRY(compat_sys_utimes_wrapper) 1246ENTRY(compat_sys_utimes_wrapper)
1291 llgtr %r2,%r2 # char * 1247 llgtr %r2,%r2 # char *
1292 llgtr %r3,%r3 # struct compat_timeval * 1248 llgtr %r3,%r3 # struct compat_timeval *
@@ -1299,12 +1255,6 @@ ENTRY(compat_sys_utimensat_wrapper)
1299 lgfr %r5,%r5 # int 1255 lgfr %r5,%r5 # int
1300 jg compat_sys_utimensat 1256 jg compat_sys_utimensat
1301 1257
1302ENTRY(compat_sys_signalfd_wrapper)
1303 lgfr %r2,%r2 # int
1304 llgtr %r3,%r3 # compat_sigset_t *
1305 llgfr %r4,%r4 # compat_size_t
1306 jg compat_sys_signalfd
1307
1308ENTRY(sys_eventfd_wrapper) 1258ENTRY(sys_eventfd_wrapper)
1309 llgfr %r2,%r2 # unsigned int 1259 llgfr %r2,%r2 # unsigned int
1310 jg sys_eventfd 1260 jg sys_eventfd
@@ -1323,13 +1273,6 @@ ENTRY(sys_timerfd_create_wrapper)
1323 lgfr %r3,%r3 # int 1273 lgfr %r3,%r3 # int
1324 jg sys_timerfd_create 1274 jg sys_timerfd_create
1325 1275
1326ENTRY(compat_sys_signalfd4_wrapper)
1327 lgfr %r2,%r2 # int
1328 llgtr %r3,%r3 # compat_sigset_t *
1329 llgfr %r4,%r4 # compat_size_t
1330 lgfr %r5,%r5 # int
1331 jg compat_sys_signalfd4
1332
1333ENTRY(sys_eventfd2_wrapper) 1276ENTRY(sys_eventfd2_wrapper)
1334 llgfr %r2,%r2 # unsigned int 1277 llgfr %r2,%r2 # unsigned int
1335 lgfr %r3,%r3 # int 1278 lgfr %r3,%r3 # int
@@ -1361,13 +1304,6 @@ ENTRY(sys32_readahead_wrapper)
1361 lgfr %r5,%r5 # s32 1304 lgfr %r5,%r5 # s32
1362 jg sys32_readahead # branch to system call 1305 jg sys32_readahead # branch to system call
1363 1306
1364ENTRY(sys32_sendfile64_wrapper)
1365 lgfr %r2,%r2 # int
1366 lgfr %r3,%r3 # int
1367 llgtr %r4,%r4 # compat_loff_t *
1368 lgfr %r5,%r5 # s32
1369 jg sys32_sendfile64 # branch to system call
1370
1371ENTRY(sys_tkill_wrapper) 1307ENTRY(sys_tkill_wrapper)
1372 lgfr %r2,%r2 # pid_t 1308 lgfr %r2,%r2 # pid_t
1373 lgfr %r3,%r3 # int 1309 lgfr %r3,%r3 # int
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 3ad5e9540160..7f4a4a8c847c 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1696,14 +1696,15 @@ static struct insn *find_insn(unsigned char *code)
1696 * insn_to_mnemonic - decode an s390 instruction 1696 * insn_to_mnemonic - decode an s390 instruction
1697 * @instruction: instruction to decode 1697 * @instruction: instruction to decode
1698 * @buf: buffer to fill with mnemonic 1698 * @buf: buffer to fill with mnemonic
1699 * @len: length of buffer
1699 * 1700 *
1700 * Decode the instruction at @instruction and store the corresponding 1701 * Decode the instruction at @instruction and store the corresponding
1701 * mnemonic into @buf. 1702 * mnemonic into @buf of length @len.
1702 * @buf is left unchanged if the instruction could not be decoded. 1703 * @buf is left unchanged if the instruction could not be decoded.
1703 * Returns: 1704 * Returns:
1704 * %0 on success, %-ENOENT if the instruction was not found. 1705 * %0 on success, %-ENOENT if the instruction was not found.
1705 */ 1706 */
1706int insn_to_mnemonic(unsigned char *instruction, char buf[8]) 1707int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
1707{ 1708{
1708 struct insn *insn; 1709 struct insn *insn;
1709 1710
@@ -1711,10 +1712,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8])
1711 if (!insn) 1712 if (!insn)
1712 return -ENOENT; 1713 return -ENOENT;
1713 if (insn->name[0] == '\0') 1714 if (insn->name[0] == '\0')
1714 snprintf(buf, 8, "%s", 1715 snprintf(buf, len, "%s",
1715 long_insn_name[(int) insn->name[1]]); 1716 long_insn_name[(int) insn->name[1]]);
1716 else 1717 else
1717 snprintf(buf, 8, "%.5s", insn->name); 1718 snprintf(buf, len, "%.5s", insn->name);
1718 return 0; 1719 return 0;
1719} 1720}
1720EXPORT_SYMBOL_GPL(insn_to_mnemonic); 1721EXPORT_SYMBOL_GPL(insn_to_mnemonic);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
new file mode 100644
index 000000000000..298297477257
--- /dev/null
+++ b/arch/s390/kernel/dumpstack.c
@@ -0,0 +1,212 @@
1/*
2 * Stack dumping functions
3 *
4 * Copyright IBM Corp. 1999, 2013
5 */
6
7#include <linux/kallsyms.h>
8#include <linux/hardirq.h>
9#include <linux/kprobes.h>
10#include <linux/utsname.h>
11#include <linux/export.h>
12#include <linux/kdebug.h>
13#include <linux/ptrace.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <asm/processor.h>
17#include <asm/debug.h>
18#include <asm/ipl.h>
19
20#ifndef CONFIG_64BIT
21#define LONG "%08lx "
22#define FOURLONG "%08lx %08lx %08lx %08lx\n"
23static int kstack_depth_to_print = 12;
24#else /* CONFIG_64BIT */
25#define LONG "%016lx "
26#define FOURLONG "%016lx %016lx %016lx %016lx\n"
27static int kstack_depth_to_print = 20;
28#endif /* CONFIG_64BIT */
29
30/*
31 * For show_trace we have tree different stack to consider:
32 * - the panic stack which is used if the kernel stack has overflown
33 * - the asynchronous interrupt stack (cpu related)
34 * - the synchronous kernel stack (process related)
35 * The stack trace can start at any of the three stack and can potentially
36 * touch all of them. The order is: panic stack, async stack, sync stack.
37 */
38static unsigned long
39__show_trace(unsigned long sp, unsigned long low, unsigned long high)
40{
41 struct stack_frame *sf;
42 struct pt_regs *regs;
43
44 while (1) {
45 sp = sp & PSW_ADDR_INSN;
46 if (sp < low || sp > high - sizeof(*sf))
47 return sp;
48 sf = (struct stack_frame *) sp;
49 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
50 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
51 /* Follow the backchain. */
52 while (1) {
53 low = sp;
54 sp = sf->back_chain & PSW_ADDR_INSN;
55 if (!sp)
56 break;
57 if (sp <= low || sp > high - sizeof(*sf))
58 return sp;
59 sf = (struct stack_frame *) sp;
60 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
61 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
62 }
63 /* Zero backchain detected, check for interrupt frame. */
64 sp = (unsigned long) (sf + 1);
65 if (sp <= low || sp > high - sizeof(*regs))
66 return sp;
67 regs = (struct pt_regs *) sp;
68 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
69 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
70 low = sp;
71 sp = regs->gprs[15];
72 }
73}
74
75static void show_trace(struct task_struct *task, unsigned long *stack)
76{
77 register unsigned long __r15 asm ("15");
78 unsigned long sp;
79
80 sp = (unsigned long) stack;
81 if (!sp)
82 sp = task ? task->thread.ksp : __r15;
83 printk("Call Trace:\n");
84#ifdef CONFIG_CHECK_STACK
85 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
86 S390_lowcore.panic_stack);
87#endif
88 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
89 S390_lowcore.async_stack);
90 if (task)
91 __show_trace(sp, (unsigned long) task_stack_page(task),
92 (unsigned long) task_stack_page(task) + THREAD_SIZE);
93 else
94 __show_trace(sp, S390_lowcore.thread_info,
95 S390_lowcore.thread_info + THREAD_SIZE);
96 if (!task)
97 task = current;
98 debug_show_held_locks(task);
99}
100
101void show_stack(struct task_struct *task, unsigned long *sp)
102{
103 register unsigned long *__r15 asm ("15");
104 unsigned long *stack;
105 int i;
106
107 if (!sp)
108 stack = task ? (unsigned long *) task->thread.ksp : __r15;
109 else
110 stack = sp;
111
112 for (i = 0; i < kstack_depth_to_print; i++) {
113 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
114 break;
115 if ((i * sizeof(long) % 32) == 0)
116 printk("%s ", i == 0 ? "" : "\n");
117 printk(LONG, *stack++);
118 }
119 printk("\n");
120 show_trace(task, sp);
121}
122
123static void show_last_breaking_event(struct pt_regs *regs)
124{
125#ifdef CONFIG_64BIT
126 printk("Last Breaking-Event-Address:\n");
127 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
128 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
129#endif
130}
131
132static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
133{
134 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
135}
136
137void show_registers(struct pt_regs *regs)
138{
139 char *mode;
140
141 mode = user_mode(regs) ? "User" : "Krnl";
142 printk("%s PSW : %p %p",
143 mode, (void *) regs->psw.mask,
144 (void *) regs->psw.addr);
145 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
146 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
147 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
148 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
149 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
150 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
151 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
152 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
153#ifdef CONFIG_64BIT
154 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
155#endif
156 printk("\n%s GPRS: " FOURLONG, mode,
157 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
158 printk(" " FOURLONG,
159 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
160 printk(" " FOURLONG,
161 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
162 printk(" " FOURLONG,
163 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
164 show_code(regs);
165}
166
167void show_regs(struct pt_regs *regs)
168{
169 show_regs_print_info(KERN_DEFAULT);
170 show_registers(regs);
171 /* Show stack backtrace if pt_regs is from kernel mode */
172 if (!user_mode(regs))
173 show_trace(NULL, (unsigned long *) regs->gprs[15]);
174 show_last_breaking_event(regs);
175}
176
177static DEFINE_SPINLOCK(die_lock);
178
179void die(struct pt_regs *regs, const char *str)
180{
181 static int die_counter;
182
183 oops_enter();
184 lgr_info_log();
185 debug_stop_all();
186 console_verbose();
187 spin_lock_irq(&die_lock);
188 bust_spinlocks(1);
189 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
190#ifdef CONFIG_PREEMPT
191 printk("PREEMPT ");
192#endif
193#ifdef CONFIG_SMP
194 printk("SMP ");
195#endif
196#ifdef CONFIG_DEBUG_PAGEALLOC
197 printk("DEBUG_PAGEALLOC");
198#endif
199 printk("\n");
200 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
201 print_modules();
202 show_regs(regs);
203 bust_spinlocks(0);
204 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
205 spin_unlock_irq(&die_lock);
206 if (in_interrupt())
207 panic("Fatal exception in interrupt");
208 if (panic_on_oops)
209 panic("Fatal exception: panic_on_oops");
210 oops_exit();
211 do_exit(SIGSEGV);
212}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 94feff7d6132..4d5e6f8a7978 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -45,6 +45,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
45 45
46STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 46STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
47STACK_SIZE = 1 << STACK_SHIFT 47STACK_SIZE = 1 << STACK_SHIFT
48STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
48 49
49#define BASED(name) name-system_call(%r13) 50#define BASED(name) name-system_call(%r13)
50 51
@@ -97,10 +98,10 @@ STACK_SIZE = 1 << STACK_SHIFT
97 sra %r14,\shift 98 sra %r14,\shift
98 jnz 1f 99 jnz 1f
99 CHECK_STACK 1<<\shift,\savearea 100 CHECK_STACK 1<<\shift,\savearea
101 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
100 j 2f 102 j 2f
1011: l %r15,\stack # load target stack 1031: l %r15,\stack # load target stack
1022: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 1042: la %r11,STACK_FRAME_OVERHEAD(%r15)
103 la %r11,STACK_FRAME_OVERHEAD(%r15)
104 .endm 105 .endm
105 106
106 .macro ADD64 high,low,timer 107 .macro ADD64 high,low,timer
@@ -150,7 +151,7 @@ ENTRY(__switch_to)
150 l %r4,__THREAD_info(%r2) # get thread_info of prev 151 l %r4,__THREAD_info(%r2) # get thread_info of prev
151 l %r5,__THREAD_info(%r3) # get thread_info of next 152 l %r5,__THREAD_info(%r3) # get thread_info of next
152 lr %r15,%r5 153 lr %r15,%r5
153 ahi %r15,STACK_SIZE # end of kernel stack of next 154 ahi %r15,STACK_INIT # end of kernel stack of next
154 st %r3,__LC_CURRENT # store task struct of next 155 st %r3,__LC_CURRENT # store task struct of next
155 st %r5,__LC_THREAD_INFO # store thread info of next 156 st %r5,__LC_THREAD_INFO # store thread info of next
156 st %r15,__LC_KERNEL_STACK # store end of kernel stack 157 st %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -178,7 +179,6 @@ sysc_stm:
178 l %r13,__LC_SVC_NEW_PSW+4 179 l %r13,__LC_SVC_NEW_PSW+4
179sysc_per: 180sysc_per:
180 l %r15,__LC_KERNEL_STACK 181 l %r15,__LC_KERNEL_STACK
181 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
182 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 182 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
183sysc_vtime: 183sysc_vtime:
184 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 184 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
@@ -188,6 +188,7 @@ sysc_vtime:
188 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 188 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
189sysc_do_svc: 189sysc_do_svc:
190 oi __TI_flags+3(%r12),_TIF_SYSCALL 190 oi __TI_flags+3(%r12),_TIF_SYSCALL
191 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
191 lh %r8,__PT_INT_CODE+2(%r11) 192 lh %r8,__PT_INT_CODE+2(%r11)
192 sla %r8,2 # shift and test for svc0 193 sla %r8,2 # shift and test for svc0
193 jnz sysc_nr_ok 194 jnz sysc_nr_ok
@@ -198,7 +199,6 @@ sysc_do_svc:
198 lr %r8,%r1 199 lr %r8,%r1
199 sla %r8,2 200 sla %r8,2
200sysc_nr_ok: 201sysc_nr_ok:
201 l %r10,BASED(.Lsys_call_table) # 31 bit system call table
202 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 202 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
203 st %r2,__PT_ORIG_GPR2(%r11) 203 st %r2,__PT_ORIG_GPR2(%r11)
204 st %r7,STACK_FRAME_OVERHEAD(%r15) 204 st %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -359,11 +359,11 @@ ENTRY(pgm_check_handler)
359 tm __LC_PGM_ILC+3,0x80 # check for per exception 359 tm __LC_PGM_ILC+3,0x80 # check for per exception
360 jnz pgm_svcper # -> single stepped svc 360 jnz pgm_svcper # -> single stepped svc
3610: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 3610: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
362 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
362 j 2f 363 j 2f
3631: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 3641: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
364 l %r15,__LC_KERNEL_STACK 365 l %r15,__LC_KERNEL_STACK
3652: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 3662: la %r11,STACK_FRAME_OVERHEAD(%r15)
366 la %r11,STACK_FRAME_OVERHEAD(%r15)
367 stm %r0,%r7,__PT_R0(%r11) 367 stm %r0,%r7,__PT_R0(%r11)
368 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 368 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
369 stm %r8,%r9,__PT_PSW(%r11) 369 stm %r8,%r9,__PT_PSW(%r11)
@@ -485,7 +485,6 @@ io_work:
485# 485#
486io_work_user: 486io_work_user:
487 l %r1,__LC_KERNEL_STACK 487 l %r1,__LC_KERNEL_STACK
488 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
489 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 488 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
490 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 489 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
491 la %r11,STACK_FRAME_OVERHEAD(%r1) 490 la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -646,7 +645,6 @@ mcck_skip:
646 tm __PT_PSW+1(%r11),0x01 # returning to user ? 645 tm __PT_PSW+1(%r11),0x01 # returning to user ?
647 jno mcck_return 646 jno mcck_return
648 l %r1,__LC_KERNEL_STACK # switch to kernel stack 647 l %r1,__LC_KERNEL_STACK # switch to kernel stack
649 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
650 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 648 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
651 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 649 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
652 la %r11,STACK_FRAME_OVERHEAD(%r15) 650 la %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -674,6 +672,7 @@ mcck_panic:
674 sra %r14,PAGE_SHIFT 672 sra %r14,PAGE_SHIFT
675 jz 0f 673 jz 0f
676 l %r15,__LC_PANIC_STACK 674 l %r15,__LC_PANIC_STACK
675 j mcck_skip
6770: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 6760: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
678 j mcck_skip 677 j mcck_skip
679 678
@@ -714,12 +713,10 @@ ENTRY(restart_int_handler)
714 */ 713 */
715stack_overflow: 714stack_overflow:
716 l %r15,__LC_PANIC_STACK # change to panic stack 715 l %r15,__LC_PANIC_STACK # change to panic stack
717 ahi %r15,-__PT_SIZE # create pt_regs 716 la %r11,STACK_FRAME_OVERHEAD(%r15)
718 stm %r0,%r7,__PT_R0(%r15) 717 stm %r0,%r7,__PT_R0(%r11)
719 stm %r8,%r9,__PT_PSW(%r15) 718 stm %r8,%r9,__PT_PSW(%r11)
720 mvc __PT_R8(32,%r11),0(%r14) 719 mvc __PT_R8(32,%r11),0(%r14)
721 lr %r15,%r11
722 ahi %r15,-STACK_FRAME_OVERHEAD
723 l %r1,BASED(1f) 720 l %r1,BASED(1f)
724 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 721 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
725 lr %r2,%r11 # pass pointer to pt_regs 722 lr %r2,%r11 # pass pointer to pt_regs
@@ -799,15 +796,14 @@ cleanup_system_call:
799 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 796 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
800 # set up saved register 11 797 # set up saved register 11
801 l %r15,__LC_KERNEL_STACK 798 l %r15,__LC_KERNEL_STACK
802 ahi %r15,-__PT_SIZE 799 la %r9,STACK_FRAME_OVERHEAD(%r15)
803 st %r15,12(%r11) # r11 pt_regs pointer 800 st %r9,12(%r11) # r11 pt_regs pointer
804 # fill pt_regs 801 # fill pt_regs
805 mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC 802 mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
806 stm %r0,%r7,__PT_R0(%r15) 803 stm %r0,%r7,__PT_R0(%r9)
807 mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW 804 mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
808 mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC 805 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
809 # setup saved register 15 806 # setup saved register 15
810 ahi %r15,-STACK_FRAME_OVERHEAD
811 st %r15,28(%r11) # r15 stack pointer 807 st %r15,28(%r11) # r15 stack pointer
812 # set new psw address and exit 808 # set new psw address and exit
813 l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 809 l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
@@ -910,7 +906,6 @@ cleanup_idle_wait:
910.Ltrace_enter: .long do_syscall_trace_enter 906.Ltrace_enter: .long do_syscall_trace_enter
911.Ltrace_exit: .long do_syscall_trace_exit 907.Ltrace_exit: .long do_syscall_trace_exit
912.Lschedule_tail: .long schedule_tail 908.Lschedule_tail: .long schedule_tail
913.Lsys_call_table: .long sys_call_table
914.Lsysc_per: .long sysc_per + 0x80000000 909.Lsysc_per: .long sysc_per + 0x80000000
915#ifdef CONFIG_TRACE_IRQFLAGS 910#ifdef CONFIG_TRACE_IRQFLAGS
916.Lhardirqs_on: .long trace_hardirqs_on_caller 911.Lhardirqs_on: .long trace_hardirqs_on_caller
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index c3a736a3ed44..aa0ab02e9595 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -7,6 +7,7 @@
7#include <asm/cputime.h> 7#include <asm/cputime.h>
8 8
9extern void *restart_stack; 9extern void *restart_stack;
10extern unsigned long suspend_zero_pages;
10 11
11void system_call(void); 12void system_call(void);
12void pgm_check_handler(void); 13void pgm_check_handler(void);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2e6d60c55f90..4c17eece707e 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -39,6 +39,7 @@ __PT_R15 = __PT_GPRS + 120
39 39
40STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 40STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
41STACK_SIZE = 1 << STACK_SHIFT 41STACK_SIZE = 1 << STACK_SHIFT
42STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
42 43
43_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 44_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
44 _TIF_MCCK_PENDING | _TIF_PER_TRAP ) 45 _TIF_MCCK_PENDING | _TIF_PER_TRAP )
@@ -124,10 +125,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
124 srag %r14,%r14,\shift 125 srag %r14,%r14,\shift
125 jnz 1f 126 jnz 1f
126 CHECK_STACK 1<<\shift,\savearea 127 CHECK_STACK 1<<\shift,\savearea
128 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
127 j 2f 129 j 2f
1281: lg %r15,\stack # load target stack 1301: lg %r15,\stack # load target stack
1292: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 1312: la %r11,STACK_FRAME_OVERHEAD(%r15)
130 la %r11,STACK_FRAME_OVERHEAD(%r15)
131 .endm 132 .endm
132 133
133 .macro UPDATE_VTIME scratch,enter_timer 134 .macro UPDATE_VTIME scratch,enter_timer
@@ -177,7 +178,7 @@ ENTRY(__switch_to)
177 lg %r4,__THREAD_info(%r2) # get thread_info of prev 178 lg %r4,__THREAD_info(%r2) # get thread_info of prev
178 lg %r5,__THREAD_info(%r3) # get thread_info of next 179 lg %r5,__THREAD_info(%r3) # get thread_info of next
179 lgr %r15,%r5 180 lgr %r15,%r5
180 aghi %r15,STACK_SIZE # end of kernel stack of next 181 aghi %r15,STACK_INIT # end of kernel stack of next
181 stg %r3,__LC_CURRENT # store task struct of next 182 stg %r3,__LC_CURRENT # store task struct of next
182 stg %r5,__LC_THREAD_INFO # store thread info of next 183 stg %r5,__LC_THREAD_INFO # store thread info of next
183 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 184 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -203,10 +204,8 @@ sysc_stmg:
203 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 204 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
204 lg %r10,__LC_LAST_BREAK 205 lg %r10,__LC_LAST_BREAK
205 lg %r12,__LC_THREAD_INFO 206 lg %r12,__LC_THREAD_INFO
206 larl %r13,system_call
207sysc_per: 207sysc_per:
208 lg %r15,__LC_KERNEL_STACK 208 lg %r15,__LC_KERNEL_STACK
209 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
210 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 209 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
211sysc_vtime: 210sysc_vtime:
212 UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER 211 UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
@@ -217,6 +216,7 @@ sysc_vtime:
217 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 216 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
218sysc_do_svc: 217sysc_do_svc:
219 oi __TI_flags+7(%r12),_TIF_SYSCALL 218 oi __TI_flags+7(%r12),_TIF_SYSCALL
219 lg %r10,__TI_sysc_table(%r12) # address of system call table
220 llgh %r8,__PT_INT_CODE+2(%r11) 220 llgh %r8,__PT_INT_CODE+2(%r11)
221 slag %r8,%r8,2 # shift and test for svc 0 221 slag %r8,%r8,2 # shift and test for svc 0
222 jnz sysc_nr_ok 222 jnz sysc_nr_ok
@@ -227,13 +227,6 @@ sysc_do_svc:
227 sth %r1,__PT_INT_CODE+2(%r11) 227 sth %r1,__PT_INT_CODE+2(%r11)
228 slag %r8,%r1,2 228 slag %r8,%r1,2
229sysc_nr_ok: 229sysc_nr_ok:
230 larl %r10,sys_call_table # 64 bit system call table
231#ifdef CONFIG_COMPAT
232 tm __TI_flags+5(%r12),(_TIF_31BIT>>16)
233 jno sysc_noemu
234 larl %r10,sys_call_table_emu # 31 bit system call table
235sysc_noemu:
236#endif
237 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 230 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
238 stg %r2,__PT_ORIG_GPR2(%r11) 231 stg %r2,__PT_ORIG_GPR2(%r11)
239 stg %r7,STACK_FRAME_OVERHEAD(%r15) 232 stg %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -389,6 +382,7 @@ ENTRY(pgm_check_handler)
389 tm __LC_PGM_ILC+3,0x80 # check for per exception 382 tm __LC_PGM_ILC+3,0x80 # check for per exception
390 jnz pgm_svcper # -> single stepped svc 383 jnz pgm_svcper # -> single stepped svc
3910: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 3840: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
385 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
392 j 2f 386 j 2f
3931: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER 3871: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
394 LAST_BREAK %r14 388 LAST_BREAK %r14
@@ -398,8 +392,7 @@ ENTRY(pgm_check_handler)
398 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 392 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
399 jz 2f 393 jz 2f
400 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 394 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
4012: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 3952: la %r11,STACK_FRAME_OVERHEAD(%r15)
402 la %r11,STACK_FRAME_OVERHEAD(%r15)
403 stmg %r0,%r7,__PT_R0(%r11) 396 stmg %r0,%r7,__PT_R0(%r11)
404 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 397 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
405 stmg %r8,%r9,__PT_PSW(%r11) 398 stmg %r8,%r9,__PT_PSW(%r11)
@@ -526,7 +519,6 @@ io_work:
526# 519#
527io_work_user: 520io_work_user:
528 lg %r1,__LC_KERNEL_STACK 521 lg %r1,__LC_KERNEL_STACK
529 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
530 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 522 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
531 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 523 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
532 la %r11,STACK_FRAME_OVERHEAD(%r1) 524 la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -688,7 +680,6 @@ mcck_skip:
688 tm __PT_PSW+1(%r11),0x01 # returning to user ? 680 tm __PT_PSW+1(%r11),0x01 # returning to user ?
689 jno mcck_return 681 jno mcck_return
690 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 682 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
691 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
692 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 683 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
693 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 684 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
694 la %r11,STACK_FRAME_OVERHEAD(%r1) 685 la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -755,14 +746,12 @@ ENTRY(restart_int_handler)
755 * Setup a pt_regs so that show_trace can provide a good call trace. 746 * Setup a pt_regs so that show_trace can provide a good call trace.
756 */ 747 */
757stack_overflow: 748stack_overflow:
758 lg %r11,__LC_PANIC_STACK # change to panic stack 749 lg %r15,__LC_PANIC_STACK # change to panic stack
759 aghi %r11,-__PT_SIZE # create pt_regs 750 la %r11,STACK_FRAME_OVERHEAD(%r15)
760 stmg %r0,%r7,__PT_R0(%r11) 751 stmg %r0,%r7,__PT_R0(%r11)
761 stmg %r8,%r9,__PT_PSW(%r11) 752 stmg %r8,%r9,__PT_PSW(%r11)
762 mvc __PT_R8(64,%r11),0(%r14) 753 mvc __PT_R8(64,%r11),0(%r14)
763 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 754 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
764 lgr %r15,%r11
765 aghi %r15,-STACK_FRAME_OVERHEAD
766 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 755 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
767 lgr %r2,%r11 # pass pointer to pt_regs 756 lgr %r2,%r11 # pass pointer to pt_regs
768 jg kernel_stack_overflow 757 jg kernel_stack_overflow
@@ -846,15 +835,14 @@ cleanup_system_call:
846 mvc __TI_last_break(8,%r12),16(%r11) 835 mvc __TI_last_break(8,%r12),16(%r11)
8470: # set up saved register r11 8360: # set up saved register r11
848 lg %r15,__LC_KERNEL_STACK 837 lg %r15,__LC_KERNEL_STACK
849 aghi %r15,-__PT_SIZE 838 la %r9,STACK_FRAME_OVERHEAD(%r15)
850 stg %r15,24(%r11) # r11 pt_regs pointer 839 stg %r9,24(%r11) # r11 pt_regs pointer
851 # fill pt_regs 840 # fill pt_regs
852 mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC 841 mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
853 stmg %r0,%r7,__PT_R0(%r15) 842 stmg %r0,%r7,__PT_R0(%r9)
854 mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW 843 mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
855 mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC 844 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
856 # setup saved register r15 845 # setup saved register r15
857 aghi %r15,-STACK_FRAME_OVERHEAD
858 stg %r15,56(%r11) # r15 stack pointer 846 stg %r15,56(%r11) # r15 stack pointer
859 # set new psw address and exit 847 # set new psw address and exit
860 larl %r9,sysc_do_svc 848 larl %r9,sysc_do_svc
@@ -1011,6 +999,7 @@ sys_call_table:
1011#ifdef CONFIG_COMPAT 999#ifdef CONFIG_COMPAT
1012 1000
1013#define SYSCALL(esa,esame,emu) .long emu 1001#define SYSCALL(esa,esame,emu) .long emu
1002 .globl sys_call_table_emu
1014sys_call_table_emu: 1003sys_call_table_emu:
1015#include "syscalls.S" 1004#include "syscalls.S"
1016#undef SYSCALL 1005#undef SYSCALL
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1630f439cd2a..4f5ef62934a4 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -33,7 +33,7 @@ struct irq_class {
33}; 33};
34 34
35/* 35/*
36 * The list of "main" irq classes on s390. This is the list of interrrupts 36 * The list of "main" irq classes on s390. This is the list of interrupts
37 * that appear both in /proc/stat ("intr" line) and /proc/interrupts. 37 * that appear both in /proc/stat ("intr" line) and /proc/interrupts.
38 * Historically only external and I/O interrupts have been part of /proc/stat. 38 * Historically only external and I/O interrupts have been part of /proc/stat.
39 * We can't add the split external and I/O sub classes since the first field 39 * We can't add the split external and I/O sub classes since the first field
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b3de27700016..ac2178161ec3 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
13#include <linux/reboot.h> 13#include <linux/reboot.h>
14#include <linux/ftrace.h> 14#include <linux/ftrace.h>
15#include <linux/debug_locks.h> 15#include <linux/debug_locks.h>
16#include <linux/suspend.h>
16#include <asm/cio.h> 17#include <asm/cio.h>
17#include <asm/setup.h> 18#include <asm/setup.h>
18#include <asm/pgtable.h> 19#include <asm/pgtable.h>
@@ -67,6 +68,35 @@ void setup_regs(void)
67 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); 68 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
68} 69}
69 70
71/*
72 * PM notifier callback for kdump
73 */
74static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
75 void *ptr)
76{
77 switch (action) {
78 case PM_SUSPEND_PREPARE:
79 case PM_HIBERNATION_PREPARE:
80 if (crashk_res.start)
81 crash_map_reserved_pages();
82 break;
83 case PM_POST_SUSPEND:
84 case PM_POST_HIBERNATION:
85 if (crashk_res.start)
86 crash_unmap_reserved_pages();
87 break;
88 default:
89 return NOTIFY_DONE;
90 }
91 return NOTIFY_OK;
92}
93
94static int __init machine_kdump_pm_init(void)
95{
96 pm_notifier(machine_kdump_pm_cb, 0);
97 return 0;
98}
99arch_initcall(machine_kdump_pm_init);
70#endif 100#endif
71 101
72/* 102/*
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 536d64579d9a..2bc3eddae34a 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -61,18 +61,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
61 return sf->gprs[8]; 61 return sf->gprs[8];
62} 62}
63 63
64/* 64void arch_cpu_idle(void)
65 * The idle loop on a S390...
66 */
67static void default_idle(void)
68{ 65{
69 if (cpu_is_offline(smp_processor_id()))
70 cpu_die();
71 local_irq_disable();
72 if (need_resched()) {
73 local_irq_enable();
74 return;
75 }
76 local_mcck_disable(); 66 local_mcck_disable();
77 if (test_thread_flag(TIF_MCCK_PENDING)) { 67 if (test_thread_flag(TIF_MCCK_PENDING)) {
78 local_mcck_enable(); 68 local_mcck_enable();
@@ -83,19 +73,15 @@ static void default_idle(void)
83 vtime_stop_cpu(); 73 vtime_stop_cpu();
84} 74}
85 75
86void cpu_idle(void) 76void arch_cpu_idle_exit(void)
87{ 77{
88 for (;;) { 78 if (test_thread_flag(TIF_MCCK_PENDING))
89 tick_nohz_idle_enter(); 79 s390_handle_mcck();
90 rcu_idle_enter(); 80}
91 while (!need_resched() && !test_thread_flag(TIF_MCCK_PENDING)) 81
92 default_idle(); 82void arch_cpu_idle_dead(void)
93 rcu_idle_exit(); 83{
94 tick_nohz_idle_exit(); 84 cpu_die();
95 if (test_thread_flag(TIF_MCCK_PENDING))
96 s390_handle_mcck();
97 schedule_preempt_disabled();
98 }
99} 85}
100 86
101extern void __kprobes kernel_thread_starter(void); 87extern void __kprobes kernel_thread_starter(void);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 29268859d8ee..0f419c5765c8 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -377,11 +377,14 @@ static void __init setup_lowcore(void)
377 PSW_MASK_DAT | PSW_MASK_MCHECK; 377 PSW_MASK_DAT | PSW_MASK_MCHECK;
378 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 378 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
379 lc->clock_comparator = -1ULL; 379 lc->clock_comparator = -1ULL;
380 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 380 lc->kernel_stack = ((unsigned long) &init_thread_union)
381 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
381 lc->async_stack = (unsigned long) 382 lc->async_stack = (unsigned long)
382 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; 383 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
384 + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
383 lc->panic_stack = (unsigned long) 385 lc->panic_stack = (unsigned long)
384 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; 386 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
387 + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
385 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 388 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
386 lc->thread_info = (unsigned long) &init_thread_union; 389 lc->thread_info = (unsigned long) &init_thread_union;
387 lc->machine_flags = S390_lowcore.machine_flags; 390 lc->machine_flags = S390_lowcore.machine_flags;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 549c9d173c0f..8074cb4b7cbf 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -181,8 +181,10 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
181 lc = pcpu->lowcore; 181 lc = pcpu->lowcore;
182 memcpy(lc, &S390_lowcore, 512); 182 memcpy(lc, &S390_lowcore, 512);
183 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 183 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
184 lc->async_stack = pcpu->async_stack + ASYNC_SIZE; 184 lc->async_stack = pcpu->async_stack + ASYNC_SIZE
185 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE; 185 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
186 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
187 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
186 lc->cpu_nr = cpu; 188 lc->cpu_nr = cpu;
187#ifndef CONFIG_64BIT 189#ifndef CONFIG_64BIT
188 if (MACHINE_HAS_IEEE) { 190 if (MACHINE_HAS_IEEE) {
@@ -253,7 +255,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
253 struct _lowcore *lc = pcpu->lowcore; 255 struct _lowcore *lc = pcpu->lowcore;
254 struct thread_info *ti = task_thread_info(tsk); 256 struct thread_info *ti = task_thread_info(tsk);
255 257
256 lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE; 258 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
259 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
257 lc->thread_info = (unsigned long) task_thread_info(tsk); 260 lc->thread_info = (unsigned long) task_thread_info(tsk);
258 lc->current_task = (unsigned long) tsk; 261 lc->current_task = (unsigned long) tsk;
259 lc->user_timer = ti->user_timer; 262 lc->user_timer = ti->user_timer;
@@ -711,8 +714,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
711 set_cpu_online(smp_processor_id(), true); 714 set_cpu_online(smp_processor_id(), true);
712 inc_irq_stat(CPU_RST); 715 inc_irq_stat(CPU_RST);
713 local_irq_enable(); 716 local_irq_enable();
714 /* cpu_idle will call schedule for us */ 717 cpu_startup_entry(CPUHP_ONLINE);
715 cpu_idle();
716} 718}
717 719
718/* Upping and downing of CPUs */ 720/* Upping and downing of CPUs */
@@ -810,8 +812,10 @@ void __init smp_prepare_boot_cpu(void)
810 pcpu->state = CPU_STATE_CONFIGURED; 812 pcpu->state = CPU_STATE_CONFIGURED;
811 pcpu->address = boot_cpu_address; 813 pcpu->address = boot_cpu_address;
812 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); 814 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
813 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; 815 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
814 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; 816 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
817 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
818 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
815 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 819 S390_lowcore.percpu_offset = __per_cpu_offset[0];
816 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 820 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
817 set_cpu_present(0, true); 821 set_cpu_present(0, true);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index aa1494d0e380..c479d2f9605b 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -41,6 +41,7 @@ struct page_key_data {
41static struct page_key_data *page_key_data; 41static struct page_key_data *page_key_data;
42static struct page_key_data *page_key_rp, *page_key_wp; 42static struct page_key_data *page_key_rp, *page_key_wp;
43static unsigned long page_key_rx, page_key_wx; 43static unsigned long page_key_rx, page_key_wx;
44unsigned long suspend_zero_pages;
44 45
45/* 46/*
46 * For each page in the hibernation image one additional byte is 47 * For each page in the hibernation image one additional byte is
@@ -149,6 +150,36 @@ int pfn_is_nosave(unsigned long pfn)
149 return 0; 150 return 0;
150} 151}
151 152
153/*
154 * PM notifier callback for suspend
155 */
156static int suspend_pm_cb(struct notifier_block *nb, unsigned long action,
157 void *ptr)
158{
159 switch (action) {
160 case PM_SUSPEND_PREPARE:
161 case PM_HIBERNATION_PREPARE:
162 suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER);
163 if (!suspend_zero_pages)
164 return NOTIFY_BAD;
165 break;
166 case PM_POST_SUSPEND:
167 case PM_POST_HIBERNATION:
168 free_pages(suspend_zero_pages, LC_ORDER);
169 break;
170 default:
171 return NOTIFY_DONE;
172 }
173 return NOTIFY_OK;
174}
175
176static int __init suspend_pm_init(void)
177{
178 pm_notifier(suspend_pm_cb, 0);
179 return 0;
180}
181arch_initcall(suspend_pm_init);
182
152void save_processor_state(void) 183void save_processor_state(void)
153{ 184{
154 /* swsusp_arch_suspend() actually saves all cpu register contents. 185 /* swsusp_arch_suspend() actually saves all cpu register contents.
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index d4ca4e0617b5..c487be4cfc81 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -36,8 +36,8 @@ ENTRY(swsusp_arch_suspend)
36 /* Store prefix register on stack */ 36 /* Store prefix register on stack */
37 stpx __SF_EMPTY(%r15) 37 stpx __SF_EMPTY(%r15)
38 38
39 /* Save prefix register contents for lowcore */ 39 /* Save prefix register contents for lowcore copy */
40 llgf %r4,__SF_EMPTY(%r15) 40 llgf %r10,__SF_EMPTY(%r15)
41 41
42 /* Get pointer to save area */ 42 /* Get pointer to save area */
43 lghi %r1,0x1000 43 lghi %r1,0x1000
@@ -91,7 +91,18 @@ ENTRY(swsusp_arch_suspend)
91 xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) 91 xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
92 spx __SF_EMPTY(%r15) 92 spx __SF_EMPTY(%r15)
93 93
94 /* Save absolute zero pages */
95 larl %r2,suspend_zero_pages
96 lg %r2,0(%r2)
97 lghi %r4,0
98 lghi %r3,2*PAGE_SIZE
99 lghi %r5,2*PAGE_SIZE
1001: mvcle %r2,%r4,0
101 jo 1b
102
103 /* Copy lowcore to absolute zero lowcore */
94 lghi %r2,0 104 lghi %r2,0
105 lgr %r4,%r10
95 lghi %r3,2*PAGE_SIZE 106 lghi %r3,2*PAGE_SIZE
96 lghi %r5,2*PAGE_SIZE 107 lghi %r5,2*PAGE_SIZE
971: mvcle %r2,%r4,0 1081: mvcle %r2,%r4,0
@@ -248,8 +259,20 @@ restore_registers:
248 /* Load old stack */ 259 /* Load old stack */
249 lg %r15,0x2f8(%r13) 260 lg %r15,0x2f8(%r13)
250 261
262 /* Save prefix register */
263 mvc __SF_EMPTY(4,%r15),0x318(%r13)
264
265 /* Restore absolute zero pages */
266 lghi %r2,0
267 larl %r4,suspend_zero_pages
268 lg %r4,0(%r4)
269 lghi %r3,2*PAGE_SIZE
270 lghi %r5,2*PAGE_SIZE
2711: mvcle %r2,%r4,0
272 jo 1b
273
251 /* Restore prefix register */ 274 /* Restore prefix register */
252 spx 0x318(%r13) 275 spx __SF_EMPTY(%r15)
253 276
254 /* Activate DAT */ 277 /* Activate DAT */
255 stosm __SF_EMPTY(%r15),0x04 278 stosm __SF_EMPTY(%r15),0x04
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index d0964d22adb5..23eb222c1658 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -132,19 +132,9 @@ SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
132 * to 132 * to
133 * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len 133 * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
134 */ 134 */
135SYSCALL_DEFINE(s390_fallocate)(int fd, int mode, loff_t offset, 135SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset,
136 u32 len_high, u32 len_low) 136 u32, len_high, u32, len_low)
137{ 137{
138 return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low); 138 return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
139} 139}
140#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
141asmlinkage long SyS_s390_fallocate(long fd, long mode, loff_t offset,
142 long len_high, long len_low)
143{
144 return SYSC_s390_fallocate((int) fd, (int) mode, offset,
145 (u32) len_high, (u32) len_low);
146}
147SYSCALL_ALIAS(sys_s390_fallocate, SyS_s390_fallocate);
148#endif
149
150#endif 140#endif
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 630b935d1284..d2baabed7148 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -85,7 +85,7 @@ SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending_wrapper)
85SYSCALL(sys_sethostname,sys_sethostname,sys32_sethostname_wrapper) 85SYSCALL(sys_sethostname,sys_sethostname,sys32_sethostname_wrapper)
86SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit_wrapper) /* 75 */ 86SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit_wrapper) /* 75 */
87SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit_wrapper) 87SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit_wrapper)
88SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage_wrapper) 88SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage)
89SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday_wrapper) 89SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday_wrapper)
90SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday_wrapper) 90SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday_wrapper)
91SYSCALL(sys_getgroups16,sys_ni_syscall,sys32_getgroups16_wrapper) /* 80 old getgroups16 syscall */ 91SYSCALL(sys_getgroups16,sys_ni_syscall,sys32_getgroups16_wrapper) /* 80 old getgroups16 syscall */
@@ -118,14 +118,14 @@ SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper)
118SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper) 118SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper)
119SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper) 119SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper)
120NI_SYSCALL /* old uname syscall */ 120NI_SYSCALL /* old uname syscall */
121SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,sys32_lookup_dcookie_wrapper) /* 110 */ 121SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */
122SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup) 122SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
123NI_SYSCALL /* old "idle" system call */ 123NI_SYSCALL /* old "idle" system call */
124NI_SYSCALL /* vm86old for i386 */ 124NI_SYSCALL /* vm86old for i386 */
125SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4) 125SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4)
126SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */ 126SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
127SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper) 127SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
128SYSCALL(sys_s390_ipc,sys_s390_ipc,sys32_ipc_wrapper) 128SYSCALL(sys_s390_ipc,sys_s390_ipc,compat_sys_s390_ipc)
129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper) 129SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
130SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn) 130SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
131SYSCALL(sys_clone,sys_clone,sys_clone_wrapper) /* 120 */ 131SYSCALL(sys_clone,sys_clone,sys_clone_wrapper) /* 120 */
@@ -195,7 +195,7 @@ SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
195SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper) 195SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
196SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */ 196SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */
197SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack) 197SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack)
198SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper) 198SYSCALL(sys_sendfile,sys_sendfile64,compat_sys_sendfile)
199NI_SYSCALL /* streams1 */ 199NI_SYSCALL /* streams1 */
200NI_SYSCALL /* streams2 */ 200NI_SYSCALL /* streams2 */
201SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */ 201SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */
@@ -231,7 +231,7 @@ SYSCALL(sys_madvise,sys_madvise,sys32_madvise_wrapper)
231SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper) /* 220 */ 231SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper) /* 220 */
232SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper) 232SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper)
233SYSCALL(sys_readahead,sys_readahead,sys32_readahead_wrapper) 233SYSCALL(sys_readahead,sys_readahead,sys32_readahead_wrapper)
234SYSCALL(sys_sendfile64,sys_ni_syscall,sys32_sendfile64_wrapper) 234SYSCALL(sys_sendfile64,sys_ni_syscall,compat_sys_sendfile64)
235SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper) 235SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper)
236SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */ 236SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */
237SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper) 237SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper)
@@ -317,20 +317,20 @@ SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list)
317SYSCALL(sys_splice,sys_splice,sys_splice_wrapper) 317SYSCALL(sys_splice,sys_splice,sys_splice_wrapper)
318SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper) 318SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper)
319SYSCALL(sys_tee,sys_tee,sys_tee_wrapper) 319SYSCALL(sys_tee,sys_tee,sys_tee_wrapper)
320SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper) 320SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice)
321NI_SYSCALL /* 310 sys_move_pages */ 321NI_SYSCALL /* 310 sys_move_pages */
322SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper) 322SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper)
323SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait_wrapper) 323SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait)
324SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper) 324SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper)
325SYSCALL(sys_s390_fallocate,sys_fallocate,sys_fallocate_wrapper) 325SYSCALL(sys_s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
326SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper) /* 315 */ 326SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper) /* 315 */
327SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper) 327SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd)
328NI_SYSCALL /* 317 old sys_timer_fd */ 328NI_SYSCALL /* 317 old sys_timer_fd */
329SYSCALL(sys_eventfd,sys_eventfd,sys_eventfd_wrapper) 329SYSCALL(sys_eventfd,sys_eventfd,sys_eventfd_wrapper)
330SYSCALL(sys_timerfd_create,sys_timerfd_create,sys_timerfd_create_wrapper) 330SYSCALL(sys_timerfd_create,sys_timerfd_create,sys_timerfd_create_wrapper)
331SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ 331SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
332SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime) 332SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime)
333SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4_wrapper) 333SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4)
334SYSCALL(sys_eventfd2,sys_eventfd2,sys_eventfd2_wrapper) 334SYSCALL(sys_eventfd2,sys_eventfd2,sys_eventfd2_wrapper)
335SYSCALL(sys_inotify_init1,sys_inotify_init1,sys_inotify_init1_wrapper) 335SYSCALL(sys_inotify_init1,sys_inotify_init1,sys_inotify_init1_wrapper)
336SYSCALL(sys_pipe2,sys_pipe2,sys_pipe2_wrapper) /* 325 */ 336SYSCALL(sys_pipe2,sys_pipe2,sys_pipe2_wrapper) /* 325 */
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 13dd63fba367..c5762324d9ee 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -12,49 +12,16 @@
12 * 'Traps.c' handles hardware traps and faults after we have saved some 12 * 'Traps.c' handles hardware traps and faults after we have saved some
13 * state in 'asm.s'. 13 * state in 'asm.s'.
14 */ 14 */
15#include <linux/sched.h> 15#include <linux/kprobes.h>
16#include <linux/kernel.h> 16#include <linux/kdebug.h>
17#include <linux/string.h> 17#include <linux/module.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h> 18#include <linux/ptrace.h>
20#include <linux/timer.h> 19#include <linux/sched.h>
21#include <linux/mm.h> 20#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/seq_file.h>
26#include <linux/delay.h>
27#include <linux/module.h>
28#include <linux/kdebug.h>
29#include <linux/kallsyms.h>
30#include <linux/reboot.h>
31#include <linux/kprobes.h>
32#include <linux/bug.h>
33#include <linux/utsname.h>
34#include <asm/uaccess.h>
35#include <asm/io.h>
36#include <linux/atomic.h>
37#include <asm/mathemu.h>
38#include <asm/cpcmd.h>
39#include <asm/lowcore.h>
40#include <asm/debug.h>
41#include <asm/ipl.h>
42#include "entry.h" 21#include "entry.h"
43 22
44int show_unhandled_signals = 1; 23int show_unhandled_signals = 1;
45 24
46#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
47
48#ifndef CONFIG_64BIT
49#define LONG "%08lx "
50#define FOURLONG "%08lx %08lx %08lx %08lx\n"
51static int kstack_depth_to_print = 12;
52#else /* CONFIG_64BIT */
53#define LONG "%016lx "
54#define FOURLONG "%016lx %016lx %016lx %016lx\n"
55static int kstack_depth_to_print = 20;
56#endif /* CONFIG_64BIT */
57
58static inline void __user *get_trap_ip(struct pt_regs *regs) 25static inline void __user *get_trap_ip(struct pt_regs *regs)
59{ 26{
60#ifdef CONFIG_64BIT 27#ifdef CONFIG_64BIT
@@ -72,215 +39,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
72#endif 39#endif
73} 40}
74 41
75/*
76 * For show_trace we have tree different stack to consider:
77 * - the panic stack which is used if the kernel stack has overflown
78 * - the asynchronous interrupt stack (cpu related)
79 * - the synchronous kernel stack (process related)
80 * The stack trace can start at any of the three stack and can potentially
81 * touch all of them. The order is: panic stack, async stack, sync stack.
82 */
83static unsigned long
84__show_trace(unsigned long sp, unsigned long low, unsigned long high)
85{
86 struct stack_frame *sf;
87 struct pt_regs *regs;
88
89 while (1) {
90 sp = sp & PSW_ADDR_INSN;
91 if (sp < low || sp > high - sizeof(*sf))
92 return sp;
93 sf = (struct stack_frame *) sp;
94 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
95 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
96 /* Follow the backchain. */
97 while (1) {
98 low = sp;
99 sp = sf->back_chain & PSW_ADDR_INSN;
100 if (!sp)
101 break;
102 if (sp <= low || sp > high - sizeof(*sf))
103 return sp;
104 sf = (struct stack_frame *) sp;
105 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
106 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
107 }
108 /* Zero backchain detected, check for interrupt frame. */
109 sp = (unsigned long) (sf + 1);
110 if (sp <= low || sp > high - sizeof(*regs))
111 return sp;
112 regs = (struct pt_regs *) sp;
113 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
114 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
115 low = sp;
116 sp = regs->gprs[15];
117 }
118}
119
120static void show_trace(struct task_struct *task, unsigned long *stack)
121{
122 register unsigned long __r15 asm ("15");
123 unsigned long sp;
124
125 sp = (unsigned long) stack;
126 if (!sp)
127 sp = task ? task->thread.ksp : __r15;
128 printk("Call Trace:\n");
129#ifdef CONFIG_CHECK_STACK
130 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
131 S390_lowcore.panic_stack);
132#endif
133 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
134 S390_lowcore.async_stack);
135 if (task)
136 __show_trace(sp, (unsigned long) task_stack_page(task),
137 (unsigned long) task_stack_page(task) + THREAD_SIZE);
138 else
139 __show_trace(sp, S390_lowcore.thread_info,
140 S390_lowcore.thread_info + THREAD_SIZE);
141 if (!task)
142 task = current;
143 debug_show_held_locks(task);
144}
145
146void show_stack(struct task_struct *task, unsigned long *sp)
147{
148 register unsigned long * __r15 asm ("15");
149 unsigned long *stack;
150 int i;
151
152 if (!sp)
153 stack = task ? (unsigned long *) task->thread.ksp : __r15;
154 else
155 stack = sp;
156
157 for (i = 0; i < kstack_depth_to_print; i++) {
158 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
159 break;
160 if ((i * sizeof(long) % 32) == 0)
161 printk("%s ", i == 0 ? "" : "\n");
162 printk(LONG, *stack++);
163 }
164 printk("\n");
165 show_trace(task, sp);
166}
167
168static void show_last_breaking_event(struct pt_regs *regs)
169{
170#ifdef CONFIG_64BIT
171 printk("Last Breaking-Event-Address:\n");
172 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
173 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
174#endif
175}
176
177/*
178 * The architecture-independent dump_stack generator
179 */
180void dump_stack(void)
181{
182 printk("CPU: %d %s %s %.*s\n",
183 task_thread_info(current)->cpu, print_tainted(),
184 init_utsname()->release,
185 (int)strcspn(init_utsname()->version, " "),
186 init_utsname()->version);
187 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
188 current->comm, current->pid, current,
189 (void *) current->thread.ksp);
190 show_stack(NULL, NULL);
191}
192EXPORT_SYMBOL(dump_stack);
193
194static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
195{
196 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
197}
198
199void show_registers(struct pt_regs *regs)
200{
201 char *mode;
202
203 mode = user_mode(regs) ? "User" : "Krnl";
204 printk("%s PSW : %p %p",
205 mode, (void *) regs->psw.mask,
206 (void *) regs->psw.addr);
207 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
208 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
209 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
210 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
211 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
212 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
213 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
214 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
215#ifdef CONFIG_64BIT
216 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
217#endif
218 printk("\n%s GPRS: " FOURLONG, mode,
219 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
220 printk(" " FOURLONG,
221 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
222 printk(" " FOURLONG,
223 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
224 printk(" " FOURLONG,
225 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
226
227 show_code(regs);
228}
229
230void show_regs(struct pt_regs *regs)
231{
232 printk("CPU: %d %s %s %.*s\n",
233 task_thread_info(current)->cpu, print_tainted(),
234 init_utsname()->release,
235 (int)strcspn(init_utsname()->version, " "),
236 init_utsname()->version);
237 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
238 current->comm, current->pid, current,
239 (void *) current->thread.ksp);
240 show_registers(regs);
241 /* Show stack backtrace if pt_regs is from kernel mode */
242 if (!user_mode(regs))
243 show_trace(NULL, (unsigned long *) regs->gprs[15]);
244 show_last_breaking_event(regs);
245}
246
247static DEFINE_SPINLOCK(die_lock);
248
249void die(struct pt_regs *regs, const char *str)
250{
251 static int die_counter;
252
253 oops_enter();
254 lgr_info_log();
255 debug_stop_all();
256 console_verbose();
257 spin_lock_irq(&die_lock);
258 bust_spinlocks(1);
259 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
260#ifdef CONFIG_PREEMPT
261 printk("PREEMPT ");
262#endif
263#ifdef CONFIG_SMP
264 printk("SMP ");
265#endif
266#ifdef CONFIG_DEBUG_PAGEALLOC
267 printk("DEBUG_PAGEALLOC");
268#endif
269 printk("\n");
270 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
271 print_modules();
272 show_regs(regs);
273 bust_spinlocks(0);
274 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
275 spin_unlock_irq(&die_lock);
276 if (in_interrupt())
277 panic("Fatal exception in interrupt");
278 if (panic_on_oops)
279 panic("Fatal exception: panic_on_oops");
280 oops_exit();
281 do_exit(SIGSEGV);
282}
283
284static inline void report_user_fault(struct pt_regs *regs, int signr) 42static inline void report_user_fault(struct pt_regs *regs, int signr)
285{ 43{
286 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 44 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index a0042acbd989..3fb09359eda6 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -158,8 +158,6 @@ void __kprobes vtime_stop_cpu(void)
158 unsigned long psw_mask; 158 unsigned long psw_mask;
159 159
160 trace_hardirqs_on(); 160 trace_hardirqs_on();
161 /* Don't trace preempt off for idle. */
162 stop_critical_timings();
163 161
164 /* Wait for external, I/O or machine check interrupt. */ 162 /* Wait for external, I/O or machine check interrupt. */
165 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | 163 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
@@ -169,9 +167,6 @@ void __kprobes vtime_stop_cpu(void)
169 /* Call the assembler magic in entry.S */ 167 /* Call the assembler magic in entry.S */
170 psw_idle(idle, psw_mask); 168 psw_idle(idle, psw_mask);
171 169
172 /* Reenable preemption tracer. */
173 start_critical_timings();
174
175 /* Account time spent with enabled wait psw loaded as idle time. */ 170 /* Account time spent with enabled wait psw loaded as idle time. */
176 idle->sequence++; 171 idle->sequence++;
177 smp_wmb(); 172 smp_wmb();
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index 2b29e62351d3..c2f582bb1cb2 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -67,7 +67,7 @@ TRACE_EVENT(kvm_s390_sie_fault,
67#define sie_intercept_code \ 67#define sie_intercept_code \
68 {0x04, "Instruction"}, \ 68 {0x04, "Instruction"}, \
69 {0x08, "Program interruption"}, \ 69 {0x08, "Program interruption"}, \
70 {0x0C, "Instruction and program interuption"}, \ 70 {0x0C, "Instruction and program interruption"}, \
71 {0x10, "External request"}, \ 71 {0x10, "External request"}, \
72 {0x14, "External interruption"}, \ 72 {0x14, "External interruption"}, \
73 {0x18, "I/O request"}, \ 73 {0x18, "I/O request"}, \
@@ -117,7 +117,7 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
117 __entry->instruction, 117 __entry->instruction,
118 insn_to_mnemonic((unsigned char *) 118 insn_to_mnemonic((unsigned char *)
119 &__entry->instruction, 119 &__entry->instruction,
120 __entry->insn) ? 120 __entry->insn, sizeof(__entry->insn)) ?
121 "unknown" : __entry->insn) 121 "unknown" : __entry->insn)
122 ); 122 );
123 123
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 6ab0d0b5cec8..20b0e97a7df2 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -3,7 +3,6 @@
3# 3#
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
6obj-y += usercopy.o
7obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
8obj-$(CONFIG_64BIT) += mem64.o 7obj-$(CONFIG_64BIT) += mem64.o
9lib-$(CONFIG_64BIT) += uaccess_mvcos.o 8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
diff --git a/arch/s390/lib/usercopy.c b/arch/s390/lib/usercopy.c
deleted file mode 100644
index 14b363fec8a2..000000000000
--- a/arch/s390/lib/usercopy.c
+++ /dev/null
@@ -1,8 +0,0 @@
1#include <linux/module.h>
2#include <linux/bug.h>
3
4void copy_from_user_overflow(void)
5{
6 WARN(1, "Buffer overflow detected!\n");
7}
8EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 479e94282910..9d84a1feefef 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -458,12 +458,10 @@ static int __init cmm_init(void)
458 if (rc) 458 if (rc)
459 goto out_pm; 459 goto out_pm;
460 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); 460 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
461 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; 461 if (!IS_ERR(cmm_thread_ptr))
462 if (rc) 462 return 0;
463 goto out_kthread;
464 return 0;
465 463
466out_kthread: 464 rc = PTR_ERR(cmm_thread_ptr);
467 unregister_pm_notifier(&cmm_power_notifier); 465 unregister_pm_notifier(&cmm_power_notifier);
468out_pm: 466out_pm:
469 unregister_oom_notifier(&cmm_oom_nb); 467 unregister_oom_notifier(&cmm_oom_nb);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 2fb9e63b8fc4..047c3e4c59a2 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
395 int fault; 395 int fault;
396 396
397 trans_exc_code = regs->int_parm_long; 397 trans_exc_code = regs->int_parm_long;
398 /* Protection exception is suppressing, decrement psw address. */ 398 /*
399 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); 399 * Protection exceptions are suppressing, decrement psw address.
400 * The exception to this rule are aborted transactions, for these
401 * the PSW already points to the correct location.
402 */
403 if (!(regs->int_code & 0x200))
404 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
400 /* 405 /*
401 * Check for low-address protection. This needs to be treated 406 * Check for low-address protection. This needs to be treated
402 * as a special case because the translation exception code 407 * as a special case because the translation exception code
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 532525ec88c1..121089d57802 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -39,7 +39,7 @@ int arch_prepare_hugepage(struct page *page)
39 if (!ptep) 39 if (!ptep)
40 return -ENOMEM; 40 return -ENOMEM;
41 41
42 pte = mk_pte(page, PAGE_RW); 42 pte_val(pte) = addr;
43 for (i = 0; i < PTRS_PER_PTE; i++) { 43 for (i = 0; i < PTRS_PER_PTE; i++) {
44 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); 44 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
45 pte_val(pte) += PAGE_SIZE; 45 pte_val(pte) += PAGE_SIZE;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 49ce6bb2c641..0b09b2342302 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -42,11 +42,10 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
42unsigned long empty_zero_page, zero_page_mask; 42unsigned long empty_zero_page, zero_page_mask;
43EXPORT_SYMBOL(empty_zero_page); 43EXPORT_SYMBOL(empty_zero_page);
44 44
45static unsigned long __init setup_zero_pages(void) 45static void __init setup_zero_pages(void)
46{ 46{
47 struct cpuid cpu_id; 47 struct cpuid cpu_id;
48 unsigned int order; 48 unsigned int order;
49 unsigned long size;
50 struct page *page; 49 struct page *page;
51 int i; 50 int i;
52 51
@@ -63,10 +62,18 @@ static unsigned long __init setup_zero_pages(void)
63 break; 62 break;
64 case 0x2097: /* z10 */ 63 case 0x2097: /* z10 */
65 case 0x2098: /* z10 */ 64 case 0x2098: /* z10 */
66 default: 65 case 0x2817: /* z196 */
66 case 0x2818: /* z196 */
67 order = 2; 67 order = 2;
68 break; 68 break;
69 case 0x2827: /* zEC12 */
70 default:
71 order = 5;
72 break;
69 } 73 }
74 /* Limit number of empty zero pages for small memory sizes */
75 if (order > 2 && totalram_pages <= 16384)
76 order = 2;
70 77
71 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 78 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
72 if (!empty_zero_page) 79 if (!empty_zero_page)
@@ -75,14 +82,11 @@ static unsigned long __init setup_zero_pages(void)
75 page = virt_to_page((void *) empty_zero_page); 82 page = virt_to_page((void *) empty_zero_page);
76 split_page(page, order); 83 split_page(page, order);
77 for (i = 1 << order; i > 0; i--) { 84 for (i = 1 << order; i > 0; i--) {
78 SetPageReserved(page); 85 mark_page_reserved(page);
79 page++; 86 page++;
80 } 87 }
81 88
82 size = PAGE_SIZE << order; 89 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
83 zero_page_mask = (size - 1) & PAGE_MASK;
84
85 return 1UL << order;
86} 90}
87 91
88/* 92/*
@@ -139,7 +143,7 @@ void __init mem_init(void)
139 143
140 /* this will put all low memory onto the freelists */ 144 /* this will put all low memory onto the freelists */
141 totalram_pages += free_all_bootmem(); 145 totalram_pages += free_all_bootmem();
142 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 146 setup_zero_pages(); /* Setup zeroed pages. */
143 147
144 reservedpages = 0; 148 reservedpages = 0;
145 149
@@ -158,34 +162,15 @@ void __init mem_init(void)
158 PFN_ALIGN((unsigned long)&_eshared) - 1); 162 PFN_ALIGN((unsigned long)&_eshared) - 1);
159} 163}
160 164
161void free_init_pages(char *what, unsigned long begin, unsigned long end)
162{
163 unsigned long addr = begin;
164
165 if (begin >= end)
166 return;
167 for (; addr < end; addr += PAGE_SIZE) {
168 ClearPageReserved(virt_to_page(addr));
169 init_page_count(virt_to_page(addr));
170 memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
171 PAGE_SIZE);
172 free_page(addr);
173 totalram_pages++;
174 }
175 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
176}
177
178void free_initmem(void) 165void free_initmem(void)
179{ 166{
180 free_init_pages("unused kernel memory", 167 free_initmem_default(0);
181 (unsigned long)&__init_begin,
182 (unsigned long)&__init_end);
183} 168}
184 169
185#ifdef CONFIG_BLK_DEV_INITRD 170#ifdef CONFIG_BLK_DEV_INITRD
186void __init free_initrd_mem(unsigned long start, unsigned long end) 171void __init free_initrd_mem(unsigned long start, unsigned long end)
187{ 172{
188 free_init_pages("initrd memory", start, end); 173 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
189} 174}
190#endif 175#endif
191 176
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d21040ed5e59..80adfbf75065 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -9,31 +9,25 @@
9#include <asm/pgtable.h> 9#include <asm/pgtable.h>
10#include <asm/page.h> 10#include <asm/page.h>
11 11
12static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
13{
14 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
15 : [addr] "+a" (addr) : [skey] "d" (skey));
16 return addr;
17}
18
12void storage_key_init_range(unsigned long start, unsigned long end) 19void storage_key_init_range(unsigned long start, unsigned long end)
13{ 20{
14 unsigned long boundary, function, size; 21 unsigned long boundary, size;
15 22
16 while (start < end) { 23 while (start < end) {
17 if (MACHINE_HAS_EDAT2) {
18 /* set storage keys for a 2GB frame */
19 function = 0x22000 | PAGE_DEFAULT_KEY;
20 size = 1UL << 31;
21 boundary = (start + size) & ~(size - 1);
22 if (boundary <= end) {
23 do {
24 start = pfmf(function, start);
25 } while (start < boundary);
26 continue;
27 }
28 }
29 if (MACHINE_HAS_EDAT1) { 24 if (MACHINE_HAS_EDAT1) {
30 /* set storage keys for a 1MB frame */ 25 /* set storage keys for a 1MB frame */
31 function = 0x21000 | PAGE_DEFAULT_KEY;
32 size = 1UL << 20; 26 size = 1UL << 20;
33 boundary = (start + size) & ~(size - 1); 27 boundary = (start + size) & ~(size - 1);
34 if (boundary <= end) { 28 if (boundary <= end) {
35 do { 29 do {
36 start = pfmf(function, start); 30 start = sske_frame(start, PAGE_DEFAULT_KEY);
37 } while (start < boundary); 31 } while (start < boundary);
38 continue; 32 continue;
39 } 33 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ae44d2a34313..bd954e96f51c 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -379,75 +379,183 @@ out_unmap:
379} 379}
380EXPORT_SYMBOL_GPL(gmap_map_segment); 380EXPORT_SYMBOL_GPL(gmap_map_segment);
381 381
382/* 382static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
383 * this function is assumed to be called with mmap_sem held
384 */
385unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
386{ 383{
387 unsigned long *table, vmaddr, segment; 384 unsigned long *table;
388 struct mm_struct *mm;
389 struct gmap_pgtable *mp;
390 struct gmap_rmap *rmap;
391 struct vm_area_struct *vma;
392 struct page *page;
393 pgd_t *pgd;
394 pud_t *pud;
395 pmd_t *pmd;
396 385
397 current->thread.gmap_addr = address;
398 mm = gmap->mm;
399 /* Walk the gmap address space page table */
400 table = gmap->table + ((address >> 53) & 0x7ff); 386 table = gmap->table + ((address >> 53) & 0x7ff);
401 if (unlikely(*table & _REGION_ENTRY_INV)) 387 if (unlikely(*table & _REGION_ENTRY_INV))
402 return -EFAULT; 388 return ERR_PTR(-EFAULT);
403 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 389 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
404 table = table + ((address >> 42) & 0x7ff); 390 table = table + ((address >> 42) & 0x7ff);
405 if (unlikely(*table & _REGION_ENTRY_INV)) 391 if (unlikely(*table & _REGION_ENTRY_INV))
406 return -EFAULT; 392 return ERR_PTR(-EFAULT);
407 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 393 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
408 table = table + ((address >> 31) & 0x7ff); 394 table = table + ((address >> 31) & 0x7ff);
409 if (unlikely(*table & _REGION_ENTRY_INV)) 395 if (unlikely(*table & _REGION_ENTRY_INV))
410 return -EFAULT; 396 return ERR_PTR(-EFAULT);
411 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 397 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
412 table = table + ((address >> 20) & 0x7ff); 398 table = table + ((address >> 20) & 0x7ff);
399 return table;
400}
401
402/**
403 * __gmap_translate - translate a guest address to a user space address
404 * @address: guest address
405 * @gmap: pointer to guest mapping meta data structure
406 *
407 * Returns user space address which corresponds to the guest address or
408 * -EFAULT if no such mapping exists.
409 * This function does not establish potentially missing page table entries.
410 * The mmap_sem of the mm that belongs to the address space must be held
411 * when this function gets called.
412 */
413unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
414{
415 unsigned long *segment_ptr, vmaddr, segment;
416 struct gmap_pgtable *mp;
417 struct page *page;
413 418
419 current->thread.gmap_addr = address;
420 segment_ptr = gmap_table_walk(address, gmap);
421 if (IS_ERR(segment_ptr))
422 return PTR_ERR(segment_ptr);
414 /* Convert the gmap address to an mm address. */ 423 /* Convert the gmap address to an mm address. */
415 segment = *table; 424 segment = *segment_ptr;
416 if (likely(!(segment & _SEGMENT_ENTRY_INV))) { 425 if (!(segment & _SEGMENT_ENTRY_INV)) {
417 page = pfn_to_page(segment >> PAGE_SHIFT); 426 page = pfn_to_page(segment >> PAGE_SHIFT);
418 mp = (struct gmap_pgtable *) page->index; 427 mp = (struct gmap_pgtable *) page->index;
419 return mp->vmaddr | (address & ~PMD_MASK); 428 return mp->vmaddr | (address & ~PMD_MASK);
420 } else if (segment & _SEGMENT_ENTRY_RO) { 429 } else if (segment & _SEGMENT_ENTRY_RO) {
421 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; 430 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
422 vma = find_vma(mm, vmaddr); 431 return vmaddr | (address & ~PMD_MASK);
423 if (!vma || vma->vm_start > vmaddr) 432 }
424 return -EFAULT; 433 return -EFAULT;
425 434}
426 /* Walk the parent mm page table */ 435EXPORT_SYMBOL_GPL(__gmap_translate);
427 pgd = pgd_offset(mm, vmaddr); 436
428 pud = pud_alloc(mm, pgd, vmaddr); 437/**
429 if (!pud) 438 * gmap_translate - translate a guest address to a user space address
430 return -ENOMEM; 439 * @address: guest address
431 pmd = pmd_alloc(mm, pud, vmaddr); 440 * @gmap: pointer to guest mapping meta data structure
432 if (!pmd) 441 *
433 return -ENOMEM; 442 * Returns user space address which corresponds to the guest address or
434 if (!pmd_present(*pmd) && 443 * -EFAULT if no such mapping exists.
435 __pte_alloc(mm, vma, pmd, vmaddr)) 444 * This function does not establish potentially missing page table entries.
436 return -ENOMEM; 445 */
437 /* pmd now points to a valid segment table entry. */ 446unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
438 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); 447{
439 if (!rmap) 448 unsigned long rc;
440 return -ENOMEM; 449
441 /* Link gmap segment table entry location to page table. */ 450 down_read(&gmap->mm->mmap_sem);
442 page = pmd_page(*pmd); 451 rc = __gmap_translate(address, gmap);
443 mp = (struct gmap_pgtable *) page->index; 452 up_read(&gmap->mm->mmap_sem);
444 rmap->entry = table; 453 return rc;
445 spin_lock(&mm->page_table_lock); 454}
455EXPORT_SYMBOL_GPL(gmap_translate);
456
457static int gmap_connect_pgtable(unsigned long segment,
458 unsigned long *segment_ptr,
459 struct gmap *gmap)
460{
461 unsigned long vmaddr;
462 struct vm_area_struct *vma;
463 struct gmap_pgtable *mp;
464 struct gmap_rmap *rmap;
465 struct mm_struct *mm;
466 struct page *page;
467 pgd_t *pgd;
468 pud_t *pud;
469 pmd_t *pmd;
470
471 mm = gmap->mm;
472 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
473 vma = find_vma(mm, vmaddr);
474 if (!vma || vma->vm_start > vmaddr)
475 return -EFAULT;
476 /* Walk the parent mm page table */
477 pgd = pgd_offset(mm, vmaddr);
478 pud = pud_alloc(mm, pgd, vmaddr);
479 if (!pud)
480 return -ENOMEM;
481 pmd = pmd_alloc(mm, pud, vmaddr);
482 if (!pmd)
483 return -ENOMEM;
484 if (!pmd_present(*pmd) &&
485 __pte_alloc(mm, vma, pmd, vmaddr))
486 return -ENOMEM;
487 /* pmd now points to a valid segment table entry. */
488 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
489 if (!rmap)
490 return -ENOMEM;
491 /* Link gmap segment table entry location to page table. */
492 page = pmd_page(*pmd);
493 mp = (struct gmap_pgtable *) page->index;
494 rmap->entry = segment_ptr;
495 spin_lock(&mm->page_table_lock);
496 if (*segment_ptr == segment) {
446 list_add(&rmap->list, &mp->mapper); 497 list_add(&rmap->list, &mp->mapper);
447 spin_unlock(&mm->page_table_lock);
448 /* Set gmap segment table entry to page table. */ 498 /* Set gmap segment table entry to page table. */
449 *table = pmd_val(*pmd) & PAGE_MASK; 499 *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
450 return vmaddr | (address & ~PMD_MASK); 500 rmap = NULL;
501 }
502 spin_unlock(&mm->page_table_lock);
503 kfree(rmap);
504 return 0;
505}
506
507static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
508{
509 struct gmap_rmap *rmap, *next;
510 struct gmap_pgtable *mp;
511 struct page *page;
512 int flush;
513
514 flush = 0;
515 spin_lock(&mm->page_table_lock);
516 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
517 mp = (struct gmap_pgtable *) page->index;
518 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
519 *rmap->entry =
520 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
521 list_del(&rmap->list);
522 kfree(rmap);
523 flush = 1;
524 }
525 spin_unlock(&mm->page_table_lock);
526 if (flush)
527 __tlb_flush_global();
528}
529
530/*
531 * this function is assumed to be called with mmap_sem held
532 */
533unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
534{
535 unsigned long *segment_ptr, segment;
536 struct gmap_pgtable *mp;
537 struct page *page;
538 int rc;
539
540 current->thread.gmap_addr = address;
541 segment_ptr = gmap_table_walk(address, gmap);
542 if (IS_ERR(segment_ptr))
543 return -EFAULT;
544 /* Convert the gmap address to an mm address. */
545 while (1) {
546 segment = *segment_ptr;
547 if (!(segment & _SEGMENT_ENTRY_INV)) {
548 /* Page table is present */
549 page = pfn_to_page(segment >> PAGE_SHIFT);
550 mp = (struct gmap_pgtable *) page->index;
551 return mp->vmaddr | (address & ~PMD_MASK);
552 }
553 if (!(segment & _SEGMENT_ENTRY_RO))
554 /* Nothing mapped in the gmap address space. */
555 break;
556 rc = gmap_connect_pgtable(segment, segment_ptr, gmap);
557 if (rc)
558 return rc;
451 } 559 }
452 return -EFAULT; 560 return -EFAULT;
453} 561}
@@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
511} 619}
512EXPORT_SYMBOL_GPL(gmap_discard); 620EXPORT_SYMBOL_GPL(gmap_discard);
513 621
514void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
515{
516 struct gmap_rmap *rmap, *next;
517 struct gmap_pgtable *mp;
518 struct page *page;
519 int flush;
520
521 flush = 0;
522 spin_lock(&mm->page_table_lock);
523 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
524 mp = (struct gmap_pgtable *) page->index;
525 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
526 *rmap->entry =
527 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
528 list_del(&rmap->list);
529 kfree(rmap);
530 flush = 1;
531 }
532 spin_unlock(&mm->page_table_lock);
533 if (flush)
534 __tlb_flush_global();
535}
536
537static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 622static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
538 unsigned long vmaddr) 623 unsigned long vmaddr)
539{ 624{
@@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table)
586{ 671{
587} 672}
588 673
589static inline void gmap_unmap_notifier(struct mm_struct *mm, 674static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
590 unsigned long *table) 675 unsigned long *table)
591{ 676{
592} 677}
593 678
@@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
653 unsigned int bit, mask; 738 unsigned int bit, mask;
654 739
655 if (mm_has_pgste(mm)) { 740 if (mm_has_pgste(mm)) {
656 gmap_unmap_notifier(mm, table); 741 gmap_disconnect_pgtable(mm, table);
657 return page_table_free_pgste(table); 742 return page_table_free_pgste(table);
658 } 743 }
659 /* Free 1K/2K page table fragment of a 4K page */ 744 /* Free 1K/2K page table fragment of a 4K page */
@@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
696 781
697 mm = tlb->mm; 782 mm = tlb->mm;
698 if (mm_has_pgste(mm)) { 783 if (mm_has_pgste(mm)) {
699 gmap_unmap_notifier(mm, table); 784 gmap_disconnect_pgtable(mm, table);
700 table = (unsigned long *) (__pa(table) | FRAG_MASK); 785 table = (unsigned long *) (__pa(table) | FRAG_MASK);
701 tlb_remove_table(tlb, table); 786 tlb_remove_table(tlb, table);
702 return; 787 return;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index ffab84db6907..35837054f734 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -191,19 +191,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
191/* 191/*
192 * Add a backed mem_map array to the virtual mem_map array. 192 * Add a backed mem_map array to the virtual mem_map array.
193 */ 193 */
194int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 194int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
195{ 195{
196 unsigned long address, start_addr, end_addr; 196 unsigned long address = start;
197 pgd_t *pg_dir; 197 pgd_t *pg_dir;
198 pud_t *pu_dir; 198 pud_t *pu_dir;
199 pmd_t *pm_dir; 199 pmd_t *pm_dir;
200 pte_t *pt_dir; 200 pte_t *pt_dir;
201 int ret = -ENOMEM; 201 int ret = -ENOMEM;
202 202
203 start_addr = (unsigned long) start; 203 for (address = start; address < end;) {
204 end_addr = (unsigned long) (start + nr);
205
206 for (address = start_addr; address < end_addr;) {
207 pg_dir = pgd_offset_k(address); 204 pg_dir = pgd_offset_k(address);
208 if (pgd_none(*pg_dir)) { 205 if (pgd_none(*pg_dir)) {
209 pu_dir = vmem_pud_alloc(); 206 pu_dir = vmem_pud_alloc();
@@ -262,14 +259,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
262 } 259 }
263 address += PAGE_SIZE; 260 address += PAGE_SIZE;
264 } 261 }
265 memset(start, 0, nr * sizeof(struct page)); 262 memset((void *)start, 0, end - start);
266 ret = 0; 263 ret = 0;
267out: 264out:
268 flush_tlb_kernel_range(start_addr, end_addr); 265 flush_tlb_kernel_range(start, end);
269 return ret; 266 return ret;
270} 267}
271 268
272void vmemmap_free(struct page *memmap, unsigned long nr_pages) 269void vmemmap_free(unsigned long start, unsigned long end)
273{ 270{
274} 271}
275 272
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 0972e91cced2..82f165f8078c 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -747,10 +747,9 @@ void bpf_jit_compile(struct sk_filter *fp)
747 747
748 if (!bpf_jit_enable) 748 if (!bpf_jit_enable)
749 return; 749 return;
750 addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL); 750 addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
751 if (addrs == NULL) 751 if (addrs == NULL)
752 return; 752 return;
753 memset(addrs, 0, fp->len * sizeof(*addrs));
754 memset(&jit, 0, sizeof(cjit)); 753 memset(&jit, 0, sizeof(cjit));
755 memset(&cjit, 0, sizeof(cjit)); 754 memset(&cjit, 0, sizeof(cjit));
756 755
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 584b93674ea4..ffeb17ce7f31 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -440,6 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
440 switch (id.machine) { 440 switch (id.machine) {
441 case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; 441 case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
442 case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; 442 case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
443 case 0x2827: ops->cpu_type = "s390/zEC12"; break;
443 default: return -ENODEV; 444 default: return -ENODEV;
444 } 445 }
445 } 446 }
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index f0f426a113ce..086a2e37935d 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -2,5 +2,5 @@
2# Makefile for the s390 PCI subsystem. 2# Makefile for the s390 PCI subsystem.
3# 3#
4 4
5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \ 5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \
6 pci_sysfs.o pci_event.o pci_debug.o 6 pci_event.o pci_debug.o pci_insn.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 27b4c17855b9..e6f15b5d8b7d 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -99,9 +99,6 @@ static int __read_mostly aisb_max;
99static struct kmem_cache *zdev_irq_cache; 99static struct kmem_cache *zdev_irq_cache;
100static struct kmem_cache *zdev_fmb_cache; 100static struct kmem_cache *zdev_fmb_cache;
101 101
102debug_info_t *pci_debug_msg_id;
103debug_info_t *pci_debug_err_id;
104
105static inline int irq_to_msi_nr(unsigned int irq) 102static inline int irq_to_msi_nr(unsigned int irq)
106{ 103{
107 return irq & ZPCI_MSI_MASK; 104 return irq & ZPCI_MSI_MASK;
@@ -179,7 +176,7 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
179 fib->aisb = (u64) bucket->aisb + aisb / 8; 176 fib->aisb = (u64) bucket->aisb + aisb / 8;
180 fib->aisbo = aisb & ZPCI_MSI_MASK; 177 fib->aisbo = aisb & ZPCI_MSI_MASK;
181 178
182 rc = mpcifc_instr(req, fib); 179 rc = s390pci_mod_fc(req, fib);
183 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); 180 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
184 181
185 free_page((unsigned long) fib); 182 free_page((unsigned long) fib);
@@ -209,7 +206,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args
209 fib->iota = args->iota; 206 fib->iota = args->iota;
210 fib->fmb_addr = args->fmb_addr; 207 fib->fmb_addr = args->fmb_addr;
211 208
212 rc = mpcifc_instr(req, fib); 209 rc = s390pci_mod_fc(req, fib);
213 free_page((unsigned long) fib); 210 free_page((unsigned long) fib);
214 return rc; 211 return rc;
215} 212}
@@ -249,10 +246,9 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
249 if (zdev->fmb) 246 if (zdev->fmb)
250 return -EINVAL; 247 return -EINVAL;
251 248
252 zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL); 249 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
253 if (!zdev->fmb) 250 if (!zdev->fmb)
254 return -ENOMEM; 251 return -ENOMEM;
255 memset(zdev->fmb, 0, sizeof(*zdev->fmb));
256 WARN_ON((u64) zdev->fmb & 0xf); 252 WARN_ON((u64) zdev->fmb & 0xf);
257 253
258 args.fmb_addr = virt_to_phys(zdev->fmb); 254 args.fmb_addr = virt_to_phys(zdev->fmb);
@@ -284,12 +280,12 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
284 u64 data; 280 u64 data;
285 int rc; 281 int rc;
286 282
287 rc = pcilg_instr(&data, req, offset); 283 rc = s390pci_load(&data, req, offset);
288 data = data << ((8 - len) * 8); 284 if (!rc) {
289 data = le64_to_cpu(data); 285 data = data << ((8 - len) * 8);
290 if (!rc) 286 data = le64_to_cpu(data);
291 *val = (u32) data; 287 *val = (u32) data;
292 else 288 } else
293 *val = 0xffffffff; 289 *val = 0xffffffff;
294 return rc; 290 return rc;
295} 291}
@@ -302,7 +298,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
302 298
303 data = cpu_to_le64(data); 299 data = cpu_to_le64(data);
304 data = data >> ((8 - len) * 8); 300 data = data >> ((8 - len) * 8);
305 rc = pcistg_instr(data, req, offset); 301 rc = s390pci_store(data, req, offset);
306 return rc; 302 return rc;
307} 303}
308 304
@@ -409,20 +405,28 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
409 int size, u32 *val) 405 int size, u32 *val)
410{ 406{
411 struct zpci_dev *zdev = get_zdev_by_bus(bus); 407 struct zpci_dev *zdev = get_zdev_by_bus(bus);
408 int ret;
412 409
413 if (!zdev || devfn != ZPCI_DEVFN) 410 if (!zdev || devfn != ZPCI_DEVFN)
414 return 0; 411 ret = -ENODEV;
415 return zpci_cfg_load(zdev, where, val, size); 412 else
413 ret = zpci_cfg_load(zdev, where, val, size);
414
415 return ret;
416} 416}
417 417
418static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, 418static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
419 int size, u32 val) 419 int size, u32 val)
420{ 420{
421 struct zpci_dev *zdev = get_zdev_by_bus(bus); 421 struct zpci_dev *zdev = get_zdev_by_bus(bus);
422 int ret;
422 423
423 if (!zdev || devfn != ZPCI_DEVFN) 424 if (!zdev || devfn != ZPCI_DEVFN)
424 return 0; 425 ret = -ENODEV;
425 return zpci_cfg_store(zdev, where, val, size); 426 else
427 ret = zpci_cfg_store(zdev, where, val, size);
428
429 return ret;
426} 430}
427 431
428static struct pci_ops pci_root_ops = { 432static struct pci_ops pci_root_ops = {
@@ -474,7 +478,7 @@ scan:
474 } 478 }
475 479
476 /* enable interrupts again */ 480 /* enable interrupts again */
477 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 481 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
478 482
479 /* check again to not lose initiative */ 483 /* check again to not lose initiative */
480 rmb(); 484 rmb();
@@ -596,19 +600,6 @@ static void zpci_map_resources(struct zpci_dev *zdev)
596 } 600 }
597}; 601};
598 602
599static void zpci_unmap_resources(struct pci_dev *pdev)
600{
601 resource_size_t len;
602 int i;
603
604 for (i = 0; i < PCI_BAR_COUNT; i++) {
605 len = pci_resource_len(pdev, i);
606 if (!len)
607 continue;
608 pci_iounmap(pdev, (void *) pdev->resource[i].start);
609 }
610};
611
612struct zpci_dev *zpci_alloc_device(void) 603struct zpci_dev *zpci_alloc_device(void)
613{ 604{
614 struct zpci_dev *zdev; 605 struct zpci_dev *zdev;
@@ -636,32 +627,6 @@ void zpci_free_device(struct zpci_dev *zdev)
636 kfree(zdev); 627 kfree(zdev);
637} 628}
638 629
639/* Called on removal of pci_dev, leaves zpci and bus device */
640static void zpci_remove_device(struct pci_dev *pdev)
641{
642 struct zpci_dev *zdev = get_zdev(pdev);
643
644 dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
645 zdev->state = ZPCI_FN_STATE_CONFIGURED;
646 zpci_dma_exit_device(zdev);
647 zpci_fmb_disable_device(zdev);
648 zpci_sysfs_remove_device(&pdev->dev);
649 zpci_unmap_resources(pdev);
650 list_del(&zdev->entry); /* can be called from init */
651 zdev->pdev = NULL;
652}
653
654static void zpci_scan_devices(void)
655{
656 struct zpci_dev *zdev;
657
658 mutex_lock(&zpci_list_lock);
659 list_for_each_entry(zdev, &zpci_list, entry)
660 if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
661 zpci_scan_device(zdev);
662 mutex_unlock(&zpci_list_lock);
663}
664
665/* 630/*
666 * Too late for any s390 specific setup, since interrupts must be set up 631 * Too late for any s390 specific setup, since interrupts must be set up
667 * already which requires DMA setup too and the pci scan will access the 632 * already which requires DMA setup too and the pci scan will access the
@@ -688,12 +653,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
688 return 0; 653 return 0;
689} 654}
690 655
691void pcibios_disable_device(struct pci_dev *pdev)
692{
693 zpci_remove_device(pdev);
694 pdev->sysdata = NULL;
695}
696
697int pcibios_add_platform_entries(struct pci_dev *pdev) 656int pcibios_add_platform_entries(struct pci_dev *pdev)
698{ 657{
699 return zpci_sysfs_add_device(&pdev->dev); 658 return zpci_sysfs_add_device(&pdev->dev);
@@ -789,7 +748,7 @@ static int __init zpci_irq_init(void)
789 spin_lock_init(&bucket->lock); 748 spin_lock_init(&bucket->lock);
790 /* set summary to 1 to be called every time for the ISC */ 749 /* set summary to 1 to be called every time for the ISC */
791 *zpci_irq_si = 1; 750 *zpci_irq_si = 1;
792 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 751 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
793 return 0; 752 return 0;
794 753
795out_ai: 754out_ai:
@@ -872,7 +831,19 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
872 spin_unlock(&zpci_iomap_lock); 831 spin_unlock(&zpci_iomap_lock);
873} 832}
874 833
875static int zpci_create_device_bus(struct zpci_dev *zdev) 834int pcibios_add_device(struct pci_dev *pdev)
835{
836 struct zpci_dev *zdev = get_zdev(pdev);
837
838 zdev->pdev = pdev;
839 zpci_debug_init_device(zdev);
840 zpci_fmb_enable_device(zdev);
841 zpci_map_resources(zdev);
842
843 return 0;
844}
845
846static int zpci_scan_bus(struct zpci_dev *zdev)
876{ 847{
877 struct resource *res; 848 struct resource *res;
878 LIST_HEAD(resources); 849 LIST_HEAD(resources);
@@ -909,8 +880,8 @@ static int zpci_create_device_bus(struct zpci_dev *zdev)
909 pci_add_resource(&resources, res); 880 pci_add_resource(&resources, res);
910 } 881 }
911 882
912 zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, 883 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
913 zdev, &resources); 884 zdev, &resources);
914 if (!zdev->bus) 885 if (!zdev->bus)
915 return -EIO; 886 return -EIO;
916 887
@@ -959,6 +930,13 @@ out:
959} 930}
960EXPORT_SYMBOL_GPL(zpci_enable_device); 931EXPORT_SYMBOL_GPL(zpci_enable_device);
961 932
933int zpci_disable_device(struct zpci_dev *zdev)
934{
935 zpci_dma_exit_device(zdev);
936 return clp_disable_fh(zdev);
937}
938EXPORT_SYMBOL_GPL(zpci_disable_device);
939
962int zpci_create_device(struct zpci_dev *zdev) 940int zpci_create_device(struct zpci_dev *zdev)
963{ 941{
964 int rc; 942 int rc;
@@ -967,9 +945,16 @@ int zpci_create_device(struct zpci_dev *zdev)
967 if (rc) 945 if (rc)
968 goto out; 946 goto out;
969 947
970 rc = zpci_create_device_bus(zdev); 948 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
949 rc = zpci_enable_device(zdev);
950 if (rc)
951 goto out_free;
952
953 zdev->state = ZPCI_FN_STATE_ONLINE;
954 }
955 rc = zpci_scan_bus(zdev);
971 if (rc) 956 if (rc)
972 goto out_bus; 957 goto out_disable;
973 958
974 mutex_lock(&zpci_list_lock); 959 mutex_lock(&zpci_list_lock);
975 list_add_tail(&zdev->entry, &zpci_list); 960 list_add_tail(&zdev->entry, &zpci_list);
@@ -977,21 +962,12 @@ int zpci_create_device(struct zpci_dev *zdev)
977 hotplug_ops->create_slot(zdev); 962 hotplug_ops->create_slot(zdev);
978 mutex_unlock(&zpci_list_lock); 963 mutex_unlock(&zpci_list_lock);
979 964
980 if (zdev->state == ZPCI_FN_STATE_STANDBY)
981 return 0;
982
983 rc = zpci_enable_device(zdev);
984 if (rc)
985 goto out_start;
986 return 0; 965 return 0;
987 966
988out_start: 967out_disable:
989 mutex_lock(&zpci_list_lock); 968 if (zdev->state == ZPCI_FN_STATE_ONLINE)
990 list_del(&zdev->entry); 969 zpci_disable_device(zdev);
991 if (hotplug_ops) 970out_free:
992 hotplug_ops->remove_slot(zdev);
993 mutex_unlock(&zpci_list_lock);
994out_bus:
995 zpci_free_domain(zdev); 971 zpci_free_domain(zdev);
996out: 972out:
997 return rc; 973 return rc;
@@ -1016,15 +992,9 @@ int zpci_scan_device(struct zpci_dev *zdev)
1016 goto out; 992 goto out;
1017 } 993 }
1018 994
1019 zpci_debug_init_device(zdev);
1020 zpci_fmb_enable_device(zdev);
1021 zpci_map_resources(zdev);
1022 pci_bus_add_devices(zdev->bus); 995 pci_bus_add_devices(zdev->bus);
1023 996
1024 /* now that pdev was added to the bus mark it as used */
1025 zdev->state = ZPCI_FN_STATE_ONLINE;
1026 return 0; 997 return 0;
1027
1028out: 998out:
1029 zpci_dma_exit_device(zdev); 999 zpci_dma_exit_device(zdev);
1030 clp_disable_fh(zdev); 1000 clp_disable_fh(zdev);
@@ -1087,13 +1057,13 @@ void zpci_deregister_hp_ops(void)
1087} 1057}
1088EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); 1058EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
1089 1059
1090unsigned int s390_pci_probe = 1; 1060unsigned int s390_pci_probe;
1091EXPORT_SYMBOL_GPL(s390_pci_probe); 1061EXPORT_SYMBOL_GPL(s390_pci_probe);
1092 1062
1093char * __init pcibios_setup(char *str) 1063char * __init pcibios_setup(char *str)
1094{ 1064{
1095 if (!strcmp(str, "off")) { 1065 if (!strcmp(str, "on")) {
1096 s390_pci_probe = 0; 1066 s390_pci_probe = 1;
1097 return NULL; 1067 return NULL;
1098 } 1068 }
1099 return str; 1069 return str;
@@ -1138,7 +1108,6 @@ static int __init pci_base_init(void)
1138 if (rc) 1108 if (rc)
1139 goto out_find; 1109 goto out_find;
1140 1110
1141 zpci_scan_devices();
1142 return 0; 1111 return 0;
1143 1112
1144out_find: 1113out_find:
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index f339fe2feb15..bd34359d1546 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -13,6 +13,7 @@
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <asm/pci_debug.h>
16#include <asm/pci_clp.h> 17#include <asm/pci_clp.h>
17 18
18/* 19/*
@@ -144,6 +145,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
144 struct zpci_dev *zdev; 145 struct zpci_dev *zdev;
145 int rc; 146 int rc;
146 147
148 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
147 zdev = zpci_alloc_device(); 149 zdev = zpci_alloc_device();
148 if (IS_ERR(zdev)) 150 if (IS_ERR(zdev))
149 return PTR_ERR(zdev); 151 return PTR_ERR(zdev);
@@ -204,8 +206,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
204 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) 206 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
205 *fh = rrb->response.fh; 207 *fh = rrb->response.fh;
206 else { 208 else {
207 pr_err("Set PCI FN failed with response: %x cc: %d\n", 209 zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
208 rrb->response.hdr.rsp, rc); 210 rrb->response.hdr.rsp);
209 rc = -EIO; 211 rc = -EIO;
210 } 212 }
211 clp_free_block(rrb); 213 clp_free_block(rrb);
@@ -221,6 +223,8 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
221 if (!rc) 223 if (!rc)
222 /* Success -> store enabled handle in zdev */ 224 /* Success -> store enabled handle in zdev */
223 zdev->fh = fh; 225 zdev->fh = fh;
226
227 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
224 return rc; 228 return rc;
225} 229}
226 230
@@ -237,9 +241,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
237 if (!rc) 241 if (!rc)
238 /* Success -> store disabled handle in zdev */ 242 /* Success -> store disabled handle in zdev */
239 zdev->fh = fh; 243 zdev->fh = fh;
240 else 244
241 dev_err(&zdev->pdev->dev, 245 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
242 "Failed to disable fn handle: 0x%x\n", fh);
243 return rc; 246 return rc;
244} 247}
245 248
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index a5d07bc2a547..771b82359af4 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -11,12 +11,17 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/export.h>
14#include <linux/pci.h> 15#include <linux/pci.h>
15#include <asm/debug.h> 16#include <asm/debug.h>
16 17
17#include <asm/pci_dma.h> 18#include <asm/pci_dma.h>
18 19
19static struct dentry *debugfs_root; 20static struct dentry *debugfs_root;
21debug_info_t *pci_debug_msg_id;
22EXPORT_SYMBOL_GPL(pci_debug_msg_id);
23debug_info_t *pci_debug_err_id;
24EXPORT_SYMBOL_GPL(pci_debug_err_id);
20 25
21static char *pci_perf_names[] = { 26static char *pci_perf_names[] = {
22 /* hardware counters */ 27 /* hardware counters */
@@ -168,7 +173,6 @@ int __init zpci_debug_init(void)
168 return -EINVAL; 173 return -EINVAL;
169 debug_register_view(pci_debug_msg_id, &debug_sprintf_view); 174 debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
170 debug_set_level(pci_debug_msg_id, 3); 175 debug_set_level(pci_debug_msg_id, 3);
171 zpci_dbg("Debug view initialized\n");
172 176
173 /* error log */ 177 /* error log */
174 pci_debug_err_id = debug_register("pci_error", 2, 1, 16); 178 pci_debug_err_id = debug_register("pci_error", 2, 1, 16);
@@ -176,7 +180,6 @@ int __init zpci_debug_init(void)
176 return -EINVAL; 180 return -EINVAL;
177 debug_register_view(pci_debug_err_id, &debug_hex_ascii_view); 181 debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
178 debug_set_level(pci_debug_err_id, 6); 182 debug_set_level(pci_debug_err_id, 6);
179 zpci_err("Debug view initialized\n");
180 183
181 debugfs_root = debugfs_create_dir("pci", NULL); 184 debugfs_root = debugfs_create_dir("pci", NULL);
182 return 0; 185 return 0;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index a547419907c3..f8e69d5bc0a9 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -169,8 +169,9 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
169 * needs to be redone! 169 * needs to be redone!
170 */ 170 */
171 goto no_refresh; 171 goto no_refresh;
172 rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr, 172
173 nr_pages * PAGE_SIZE); 173 rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
174 nr_pages * PAGE_SIZE);
174 175
175no_refresh: 176no_refresh:
176 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); 177 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -268,8 +269,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
268 int flags = ZPCI_PTE_VALID; 269 int flags = ZPCI_PTE_VALID;
269 dma_addr_t dma_addr; 270 dma_addr_t dma_addr;
270 271
271 WARN_ON_ONCE(offset > PAGE_SIZE);
272
273 /* This rounds up number of pages based on size and offset */ 272 /* This rounds up number of pages based on size and offset */
274 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); 273 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
275 iommu_page_index = dma_alloc_iommu(zdev, nr_pages); 274 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
@@ -291,7 +290,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
291 290
292 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { 291 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
293 atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); 292 atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
294 return dma_addr + offset; 293 return dma_addr + (offset & ~PAGE_MASK);
295 } 294 }
296 295
297out_free: 296out_free:
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
new file mode 100644
index 000000000000..22eeb9d7ffeb
--- /dev/null
+++ b/arch/s390/pci/pci_insn.c
@@ -0,0 +1,202 @@
1/*
2 * s390 specific pci instructions
3 *
4 * Copyright IBM Corp. 2013
5 */
6
7#include <linux/export.h>
8#include <linux/errno.h>
9#include <linux/delay.h>
10#include <asm/pci_insn.h>
11#include <asm/processor.h>
12
13#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
14
15/* Modify PCI Function Controls */
16static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
17{
18 u8 cc;
19
20 asm volatile (
21 " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
22 " ipm %[cc]\n"
23 " srl %[cc],28\n"
24 : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
25 : : "cc");
26 *status = req >> 24 & 0xff;
27 return cc;
28}
29
30int s390pci_mod_fc(u64 req, struct zpci_fib *fib)
31{
32 u8 cc, status;
33
34 do {
35 cc = __mpcifc(req, fib, &status);
36 if (cc == 2)
37 msleep(ZPCI_INSN_BUSY_DELAY);
38 } while (cc == 2);
39
40 if (cc)
41 printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
42 __func__, cc, status);
43 return (cc) ? -EIO : 0;
44}
45
46/* Refresh PCI Translations */
47static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
48{
49 register u64 __addr asm("2") = addr;
50 register u64 __range asm("3") = range;
51 u8 cc;
52
53 asm volatile (
54 " .insn rre,0xb9d30000,%[fn],%[addr]\n"
55 " ipm %[cc]\n"
56 " srl %[cc],28\n"
57 : [cc] "=d" (cc), [fn] "+d" (fn)
58 : [addr] "d" (__addr), "d" (__range)
59 : "cc");
60 *status = fn >> 24 & 0xff;
61 return cc;
62}
63
64int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
65{
66 u8 cc, status;
67
68 do {
69 cc = __rpcit(fn, addr, range, &status);
70 if (cc == 2)
71 udelay(ZPCI_INSN_BUSY_DELAY);
72 } while (cc == 2);
73
74 if (cc)
75 printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
76 __func__, cc, status, addr, range);
77 return (cc) ? -EIO : 0;
78}
79
80/* Set Interruption Controls */
81void set_irq_ctrl(u16 ctl, char *unused, u8 isc)
82{
83 asm volatile (
84 " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
85 : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
86}
87
88/* PCI Load */
89static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
90{
91 register u64 __req asm("2") = req;
92 register u64 __offset asm("3") = offset;
93 int cc = -ENXIO;
94 u64 __data;
95
96 asm volatile (
97 " .insn rre,0xb9d20000,%[data],%[req]\n"
98 "0: ipm %[cc]\n"
99 " srl %[cc],28\n"
100 "1:\n"
101 EX_TABLE(0b, 1b)
102 : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
103 : "d" (__offset)
104 : "cc");
105 *status = __req >> 24 & 0xff;
106 if (!cc)
107 *data = __data;
108
109 return cc;
110}
111
112int s390pci_load(u64 *data, u64 req, u64 offset)
113{
114 u8 status;
115 int cc;
116
117 do {
118 cc = __pcilg(data, req, offset, &status);
119 if (cc == 2)
120 udelay(ZPCI_INSN_BUSY_DELAY);
121 } while (cc == 2);
122
123 if (cc)
124 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
125 __func__, cc, status, req, offset);
126 return (cc > 0) ? -EIO : cc;
127}
128EXPORT_SYMBOL_GPL(s390pci_load);
129
130/* PCI Store */
131static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
132{
133 register u64 __req asm("2") = req;
134 register u64 __offset asm("3") = offset;
135 int cc = -ENXIO;
136
137 asm volatile (
138 " .insn rre,0xb9d00000,%[data],%[req]\n"
139 "0: ipm %[cc]\n"
140 " srl %[cc],28\n"
141 "1:\n"
142 EX_TABLE(0b, 1b)
143 : [cc] "+d" (cc), [req] "+d" (__req)
144 : "d" (__offset), [data] "d" (data)
145 : "cc");
146 *status = __req >> 24 & 0xff;
147 return cc;
148}
149
150int s390pci_store(u64 data, u64 req, u64 offset)
151{
152 u8 status;
153 int cc;
154
155 do {
156 cc = __pcistg(data, req, offset, &status);
157 if (cc == 2)
158 udelay(ZPCI_INSN_BUSY_DELAY);
159 } while (cc == 2);
160
161 if (cc)
162 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
163 __func__, cc, status, req, offset);
164 return (cc > 0) ? -EIO : cc;
165}
166EXPORT_SYMBOL_GPL(s390pci_store);
167
168/* PCI Store Block */
169static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
170{
171 int cc = -ENXIO;
172
173 asm volatile (
174 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
175 "0: ipm %[cc]\n"
176 " srl %[cc],28\n"
177 "1:\n"
178 EX_TABLE(0b, 1b)
179 : [cc] "+d" (cc), [req] "+d" (req)
180 : [offset] "d" (offset), [data] "Q" (*data)
181 : "cc");
182 *status = req >> 24 & 0xff;
183 return cc;
184}
185
186int s390pci_store_block(const u64 *data, u64 req, u64 offset)
187{
188 u8 status;
189 int cc;
190
191 do {
192 cc = __pcistb(data, req, offset, &status);
193 if (cc == 2)
194 udelay(ZPCI_INSN_BUSY_DELAY);
195 } while (cc == 2);
196
197 if (cc)
198 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
199 __func__, cc, status, req, offset);
200 return (cc > 0) ? -EIO : cc;
201}
202EXPORT_SYMBOL_GPL(s390pci_store_block);
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
index 0297931335e1..b097aed05a9b 100644
--- a/arch/s390/pci/pci_msi.c
+++ b/arch/s390/pci/pci_msi.c
@@ -18,8 +18,9 @@
18 18
19/* mapping of irq numbers to msi_desc */ 19/* mapping of irq numbers to msi_desc */
20static struct hlist_head *msi_hash; 20static struct hlist_head *msi_hash;
21static unsigned int msihash_shift = 6; 21static const unsigned int msi_hash_bits = 8;
22#define msi_hashfn(nr) hash_long(nr, msihash_shift) 22#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
23#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
23 24
24static DEFINE_SPINLOCK(msi_map_lock); 25static DEFINE_SPINLOCK(msi_map_lock);
25 26
@@ -74,6 +75,7 @@ int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
74 map->irq = nr; 75 map->irq = nr;
75 map->msi = msi; 76 map->msi = msi;
76 zdev->msi_map[nr & ZPCI_MSI_MASK] = map; 77 zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
78 INIT_HLIST_NODE(&map->msi_chain);
77 79
78 pr_debug("%s hashing irq: %u to bucket nr: %llu\n", 80 pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
79 __func__, nr, msi_hashfn(nr)); 81 __func__, nr, msi_hashfn(nr));
@@ -125,11 +127,11 @@ int __init zpci_msihash_init(void)
125{ 127{
126 unsigned int i; 128 unsigned int i;
127 129
128 msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL); 130 msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
129 if (!msi_hash) 131 if (!msi_hash)
130 return -ENOMEM; 132 return -ENOMEM;
131 133
132 for (i = 0; i < (1U << msihash_shift); i++) 134 for (i = 0; i < MSI_HASH_BUCKETS; i++)
133 INIT_HLIST_HEAD(&msi_hash[i]); 135 INIT_HLIST_HEAD(&msi_hash[i]);
134 return 0; 136 return 0;
135} 137}
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index 79568466b578..f4c6d02421d3 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -41,24 +41,6 @@ void machine_halt(void) {}
41/* If or when software machine-power-off is implemented, add code here. */ 41/* If or when software machine-power-off is implemented, add code here. */
42void machine_power_off(void) {} 42void machine_power_off(void) {}
43 43
44/*
45 * The idle thread. There's no useful work to be
46 * done, so just try to conserve power and have a
47 * low exit latency (ie sit in a loop waiting for
48 * somebody to say that they'd like to reschedule)
49 */
50void __noreturn cpu_idle(void)
51{
52 /* endless idle loop with no priority at all */
53 while (1) {
54 rcu_idle_enter();
55 while (!need_resched())
56 barrier();
57 rcu_idle_exit();
58 schedule_preempt_disabled();
59 }
60}
61
62void ret_from_fork(void); 44void ret_from_fork(void);
63void ret_from_kernel_thread(void); 45void ret_from_kernel_thread(void);
64 46
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c
index 0e46fb19a848..1517a7dcd6d9 100644
--- a/arch/score/kernel/traps.c
+++ b/arch/score/kernel/traps.c
@@ -117,6 +117,8 @@ static void show_code(unsigned int *pc)
117 */ 117 */
118void show_regs(struct pt_regs *regs) 118void show_regs(struct pt_regs *regs)
119{ 119{
120 show_regs_print_info(KERN_DEFAULT);
121
120 printk("r0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 122 printk("r0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
121 regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3], 123 regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3],
122 regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); 124 regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
@@ -149,16 +151,6 @@ static void show_registers(struct pt_regs *regs)
149 printk(KERN_NOTICE "\n"); 151 printk(KERN_NOTICE "\n");
150} 152}
151 153
152/*
153 * The architecture-independent dump_stack generator
154 */
155void dump_stack(void)
156{
157 show_stack(current_thread_info()->task,
158 (long *) get_irq_regs()->regs[0]);
159}
160EXPORT_SYMBOL(dump_stack);
161
162void __die(const char *str, struct pt_regs *regs, const char *file, 154void __die(const char *str, struct pt_regs *regs, const char *file,
163 const char *func, unsigned long line) 155 const char *func, unsigned long line)
164{ 156{
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index cee6bce1e30c..1592aad7dbc4 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
43 43
44static struct kcore_list kcore_mem, kcore_vmalloc; 44static struct kcore_list kcore_mem, kcore_vmalloc;
45 45
46static unsigned long setup_zero_page(void) 46static void setup_zero_page(void)
47{ 47{
48 struct page *page; 48 struct page *page;
49 49
@@ -52,9 +52,7 @@ static unsigned long setup_zero_page(void)
52 panic("Oh boy, that early out of memory?"); 52 panic("Oh boy, that early out of memory?");
53 53
54 page = virt_to_page((void *) empty_zero_page); 54 page = virt_to_page((void *) empty_zero_page);
55 SetPageReserved(page); 55 mark_page_reserved(page);
56
57 return 1UL;
58} 56}
59 57
60#ifndef CONFIG_NEED_MULTIPLE_NODES 58#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -84,7 +82,7 @@ void __init mem_init(void)
84 82
85 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 83 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
86 totalram_pages += free_all_bootmem(); 84 totalram_pages += free_all_bootmem();
87 totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */ 85 setup_zero_page(); /* Setup zeroed pages. */
88 reservedpages = 0; 86 reservedpages = 0;
89 87
90 for (tmp = 0; tmp < max_low_pfn; tmp++) 88 for (tmp = 0; tmp < max_low_pfn; tmp++)
@@ -109,37 +107,16 @@ void __init mem_init(void)
109} 107}
110#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 108#endif /* !CONFIG_NEED_MULTIPLE_NODES */
111 109
112static void free_init_pages(const char *what, unsigned long begin, unsigned long end)
113{
114 unsigned long pfn;
115
116 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
117 struct page *page = pfn_to_page(pfn);
118 void *addr = phys_to_virt(PFN_PHYS(pfn));
119
120 ClearPageReserved(page);
121 init_page_count(page);
122 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
123 __free_page(page);
124 totalram_pages++;
125 }
126 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
127}
128
129#ifdef CONFIG_BLK_DEV_INITRD 110#ifdef CONFIG_BLK_DEV_INITRD
130void free_initrd_mem(unsigned long start, unsigned long end) 111void free_initrd_mem(unsigned long start, unsigned long end)
131{ 112{
132 free_init_pages("initrd memory", 113 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
133 virt_to_phys((void *) start),
134 virt_to_phys((void *) end));
135} 114}
136#endif 115#endif
137 116
138void __init_refok free_initmem(void) 117void __init_refok free_initmem(void)
139{ 118{
140 free_init_pages("unused kernel memory", 119 free_initmem_default(POISON_FREE_INITMEM);
141 __pa(&__init_begin),
142 __pa(&__init_end));
143} 120}
144 121
145unsigned long pgd_current; 122unsigned long pgd_current;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5e859633ce69..78d8ace57272 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -33,6 +33,7 @@ config SUPERH
33 select GENERIC_ATOMIC64 33 select GENERIC_ATOMIC64
34 select GENERIC_IRQ_SHOW 34 select GENERIC_IRQ_SHOW
35 select GENERIC_SMP_IDLE_THREAD 35 select GENERIC_SMP_IDLE_THREAD
36 select GENERIC_IDLE_POLL_SETUP
36 select GENERIC_CLOCKEVENTS 37 select GENERIC_CLOCKEVENTS
37 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST 38 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
38 select GENERIC_STRNCPY_FROM_USER 39 select GENERIC_STRNCPY_FROM_USER
@@ -148,9 +149,6 @@ config ARCH_HAS_ILOG2_U32
148config ARCH_HAS_ILOG2_U64 149config ARCH_HAS_ILOG2_U64
149 def_bool n 150 def_bool n
150 151
151config ARCH_HAS_DEFAULT_IDLE
152 def_bool y
153
154config NO_IOPORT 152config NO_IOPORT
155 def_bool !PCI 153 def_bool !PCI
156 depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \ 154 depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \
@@ -624,25 +622,7 @@ config SH_CLK_CPG_LEGACY
624endmenu 622endmenu
625 623
626menu "CPU Frequency scaling" 624menu "CPU Frequency scaling"
627
628source "drivers/cpufreq/Kconfig" 625source "drivers/cpufreq/Kconfig"
629
630config SH_CPU_FREQ
631 tristate "SuperH CPU Frequency driver"
632 depends on CPU_FREQ
633 select CPU_FREQ_TABLE
634 help
635 This adds the cpufreq driver for SuperH. Any CPU that supports
636 clock rate rounding through the clock framework can use this
637 driver. While it will make the kernel slightly larger, this is
638 harmless for CPUs that don't support rate rounding. The driver
639 will also generate a notice in the boot log before disabling
640 itself if the CPU in question is not capable of rate rounding.
641
642 For details, take a look at <file:Documentation/cpu-freq>.
643
644 If unsure, say N.
645
646endmenu 626endmenu
647 627
648source "arch/sh/drivers/Kconfig" 628source "arch/sh/drivers/Kconfig"
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index aaff7671101b..764530c85aa9 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -254,11 +254,13 @@ static int usbhs_get_id(struct platform_device *pdev)
254 return gpio_get_value(GPIO_PTB3); 254 return gpio_get_value(GPIO_PTB3);
255} 255}
256 256
257static void usbhs_phy_reset(struct platform_device *pdev) 257static int usbhs_phy_reset(struct platform_device *pdev)
258{ 258{
259 /* enable vbus if HOST */ 259 /* enable vbus if HOST */
260 if (!gpio_get_value(GPIO_PTB3)) 260 if (!gpio_get_value(GPIO_PTB3))
261 gpio_set_value(GPIO_PTB5, 1); 261 gpio_set_value(GPIO_PTB5, 1);
262
263 return 0;
262} 264}
263 265
264static struct renesas_usbhs_platform_info usbhs_info = { 266static struct renesas_usbhs_platform_info usbhs_info = {
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index c2c85f6cd738..a162a7f86b2e 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -35,7 +35,7 @@ static unsigned int nr_ports;
35 35
36static struct sh7786_pcie_hwops { 36static struct sh7786_pcie_hwops {
37 int (*core_init)(void); 37 int (*core_init)(void);
38 async_func_ptr *port_init_hw; 38 async_func_t port_init_hw;
39} *sh7786_pcie_hwops; 39} *sh7786_pcie_hwops;
40 40
41static struct resource sh7786_pci0_resources[] = { 41static struct resource sh7786_pci0_resources[] = {
diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h
index b3808c7d67b2..699255d6d1c6 100644
--- a/arch/sh/include/asm/hugetlb.h
+++ b/arch/sh/include/asm/hugetlb.h
@@ -3,6 +3,7 @@
3 3
4#include <asm/cacheflush.h> 4#include <asm/cacheflush.h>
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm-generic/hugetlb.h>
6 7
7 8
8static inline int is_hugepage_only_range(struct mm_struct *mm, 9static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h
index e14567a7e9a1..70ae0b2888ab 100644
--- a/arch/sh/include/asm/suspend.h
+++ b/arch/sh/include/asm/suspend.h
@@ -14,9 +14,9 @@ struct swsusp_arch_regs {
14void sh_mobile_call_standby(unsigned long mode); 14void sh_mobile_call_standby(unsigned long mode);
15 15
16#ifdef CONFIG_CPU_IDLE 16#ifdef CONFIG_CPU_IDLE
17void sh_mobile_setup_cpuidle(void); 17int sh_mobile_setup_cpuidle(void);
18#else 18#else
19static inline void sh_mobile_setup_cpuidle(void) {} 19static inline int sh_mobile_setup_cpuidle(void) { return 0; }
20#endif 20#endif
21 21
22/* notifier chains for pre/post sleep hooks */ 22/* notifier chains for pre/post sleep hooks */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 7d5ac4e48485..45a93669289d 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -207,8 +207,6 @@ static inline bool test_and_clear_restore_sigmask(void)
207 return true; 207 return true;
208} 208}
209 209
210#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
211
212#endif /* !__ASSEMBLY__ */ 210#endif /* !__ASSEMBLY__ */
213 211
214#endif /* __KERNEL__ */ 212#endif /* __KERNEL__ */
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index 5e90fa2b7eed..e77816c4b9bc 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -30,12 +30,4 @@
30# define __ARCH_WANT_SYS_VFORK 30# define __ARCH_WANT_SYS_VFORK
31# define __ARCH_WANT_SYS_CLONE 31# define __ARCH_WANT_SYS_CLONE
32 32
33/*
34 * "Conditional" syscalls
35 *
36 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
37 * but it doesn't work on all toolchains, so we just do it by hand
38 */
39# define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
40
41#include <uapi/asm/unistd.h> 33#include <uapi/asm/unistd.h>
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index f259b37874e9..261c8bfd75ce 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_VSYSCALL) += vsyscall/
31obj-$(CONFIG_SMP) += smp.o 31obj-$(CONFIG_SMP) += smp.o
32obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o 32obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
33obj-$(CONFIG_KGDB) += kgdb.o 33obj-$(CONFIG_KGDB) += kgdb.o
34obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
35obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o 34obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
36obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 35obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
37obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 36obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 1ddc876d3b26..d30622592116 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -51,70 +51,53 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
51 return k; 51 return k;
52} 52}
53 53
54static struct cpuidle_device cpuidle_dev;
55static struct cpuidle_driver cpuidle_driver = { 54static struct cpuidle_driver cpuidle_driver = {
56 .name = "sh_idle", 55 .name = "sh_idle",
57 .owner = THIS_MODULE, 56 .owner = THIS_MODULE,
58 .en_core_tk_irqen = 1, 57 .states = {
58 {
59 .exit_latency = 1,
60 .target_residency = 1 * 2,
61 .power_usage = 3,
62 .flags = CPUIDLE_FLAG_TIME_VALID,
63 .enter = cpuidle_sleep_enter,
64 .name = "C1",
65 .desc = "SuperH Sleep Mode",
66 },
67 {
68 .exit_latency = 100,
69 .target_residency = 1 * 2,
70 .power_usage = 1,
71 .flags = CPUIDLE_FLAG_TIME_VALID,
72 .enter = cpuidle_sleep_enter,
73 .name = "C2",
74 .desc = "SuperH Sleep Mode [SF]",
75 .disabled = true,
76 },
77 {
78 .exit_latency = 2300,
79 .target_residency = 1 * 2,
80 .power_usage = 1,
81 .flags = CPUIDLE_FLAG_TIME_VALID,
82 .enter = cpuidle_sleep_enter,
83 .name = "C3",
84 .desc = "SuperH Mobile Standby Mode [SF]",
85 .disabled = true,
86 },
87 },
88 .safe_state_index = 0,
89 .state_count = 3,
59}; 90};
60 91
61void sh_mobile_setup_cpuidle(void) 92int __init sh_mobile_setup_cpuidle(void)
62{ 93{
63 struct cpuidle_device *dev = &cpuidle_dev; 94 int ret;
64 struct cpuidle_driver *drv = &cpuidle_driver;
65 struct cpuidle_state *state;
66 int i;
67 95
96 if (sh_mobile_sleep_supported & SUSP_SH_SF)
97 cpuidle_driver.states[1].disabled = false;
68 98
69 for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 99 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
70 drv->states[i].name[0] = '\0'; 100 cpuidle_driver.states[2].disabled = false;
71 drv->states[i].desc[0] = '\0';
72 }
73 101
74 i = CPUIDLE_DRIVER_STATE_START; 102 return cpuidle_register(&cpuidle_driver);
75
76 state = &drv->states[i++];
77 snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
78 strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
79 state->exit_latency = 1;
80 state->target_residency = 1 * 2;
81 state->power_usage = 3;
82 state->flags = 0;
83 state->flags |= CPUIDLE_FLAG_TIME_VALID;
84 state->enter = cpuidle_sleep_enter;
85
86 drv->safe_state_index = i-1;
87
88 if (sh_mobile_sleep_supported & SUSP_SH_SF) {
89 state = &drv->states[i++];
90 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
91 strncpy(state->desc, "SuperH Sleep Mode [SF]",
92 CPUIDLE_DESC_LEN);
93 state->exit_latency = 100;
94 state->target_residency = 1 * 2;
95 state->power_usage = 1;
96 state->flags = 0;
97 state->flags |= CPUIDLE_FLAG_TIME_VALID;
98 state->enter = cpuidle_sleep_enter;
99 }
100
101 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
102 state = &drv->states[i++];
103 snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
104 strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
105 CPUIDLE_DESC_LEN);
106 state->exit_latency = 2300;
107 state->target_residency = 1 * 2;
108 state->power_usage = 1;
109 state->flags = 0;
110 state->flags |= CPUIDLE_FLAG_TIME_VALID;
111 state->enter = cpuidle_sleep_enter;
112 }
113
114 drv->state_count = i;
115 dev->state_count = i;
116
117 cpuidle_register_driver(&cpuidle_driver);
118
119 cpuidle_register_device(dev);
120} 103}
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index 08d27fac8d08..ac37b7234f85 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -150,8 +150,7 @@ static const struct platform_suspend_ops sh_pm_ops = {
150static int __init sh_pm_init(void) 150static int __init sh_pm_init(void)
151{ 151{
152 suspend_set_ops(&sh_pm_ops); 152 suspend_set_ops(&sh_pm_ops);
153 sh_mobile_setup_cpuidle(); 153 return sh_mobile_setup_cpuidle();
154 return 0;
155} 154}
156 155
157late_initcall(sh_pm_init); 156late_initcall(sh_pm_init);
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
deleted file mode 100644
index e68b45b6f3f9..000000000000
--- a/arch/sh/kernel/cpufreq.c
+++ /dev/null
@@ -1,201 +0,0 @@
1/*
2 * arch/sh/kernel/cpufreq.c
3 *
4 * cpufreq driver for the SuperH processors.
5 *
6 * Copyright (C) 2002 - 2012 Paul Mundt
7 * Copyright (C) 2002 M. R. Brown
8 *
9 * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
10 *
11 * Copyright (C) 2004-2007 Atmel Corporation
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#define pr_fmt(fmt) "cpufreq: " fmt
18
19#include <linux/types.h>
20#include <linux/cpufreq.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/err.h>
25#include <linux/cpumask.h>
26#include <linux/cpu.h>
27#include <linux/smp.h>
28#include <linux/sched.h> /* set_cpus_allowed() */
29#include <linux/clk.h>
30#include <linux/percpu.h>
31#include <linux/sh_clk.h>
32
33static DEFINE_PER_CPU(struct clk, sh_cpuclk);
34
35static unsigned int sh_cpufreq_get(unsigned int cpu)
36{
37 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
38}
39
40/*
41 * Here we notify other drivers of the proposed change and the final change.
42 */
43static int sh_cpufreq_target(struct cpufreq_policy *policy,
44 unsigned int target_freq,
45 unsigned int relation)
46{
47 unsigned int cpu = policy->cpu;
48 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
49 cpumask_t cpus_allowed;
50 struct cpufreq_freqs freqs;
51 struct device *dev;
52 long freq;
53
54 if (!cpu_online(cpu))
55 return -ENODEV;
56
57 cpus_allowed = current->cpus_allowed;
58 set_cpus_allowed_ptr(current, cpumask_of(cpu));
59
60 BUG_ON(smp_processor_id() != cpu);
61
62 dev = get_cpu_device(cpu);
63
64 /* Convert target_freq from kHz to Hz */
65 freq = clk_round_rate(cpuclk, target_freq * 1000);
66
67 if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
68 return -EINVAL;
69
70 dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
71
72 freqs.cpu = cpu;
73 freqs.old = sh_cpufreq_get(cpu);
74 freqs.new = (freq + 500) / 1000;
75 freqs.flags = 0;
76
77 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
78 set_cpus_allowed_ptr(current, &cpus_allowed);
79 clk_set_rate(cpuclk, freq);
80 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
81
82 dev_dbg(dev, "set frequency %lu Hz\n", freq);
83
84 return 0;
85}
86
87static int sh_cpufreq_verify(struct cpufreq_policy *policy)
88{
89 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
90 struct cpufreq_frequency_table *freq_table;
91
92 freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
93 if (freq_table)
94 return cpufreq_frequency_table_verify(policy, freq_table);
95
96 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
97 policy->cpuinfo.max_freq);
98
99 policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
100 policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
101
102 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
103 policy->cpuinfo.max_freq);
104
105 return 0;
106}
107
108static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
109{
110 unsigned int cpu = policy->cpu;
111 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
112 struct cpufreq_frequency_table *freq_table;
113 struct device *dev;
114
115 if (!cpu_online(cpu))
116 return -ENODEV;
117
118 dev = get_cpu_device(cpu);
119
120 cpuclk = clk_get(dev, "cpu_clk");
121 if (IS_ERR(cpuclk)) {
122 dev_err(dev, "couldn't get CPU clk\n");
123 return PTR_ERR(cpuclk);
124 }
125
126 policy->cur = policy->min = policy->max = sh_cpufreq_get(cpu);
127
128 freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
129 if (freq_table) {
130 int result;
131
132 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
133 if (!result)
134 cpufreq_frequency_table_get_attr(freq_table, cpu);
135 } else {
136 dev_notice(dev, "no frequency table found, falling back "
137 "to rate rounding.\n");
138
139 policy->cpuinfo.min_freq =
140 (clk_round_rate(cpuclk, 1) + 500) / 1000;
141 policy->cpuinfo.max_freq =
142 (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
143 }
144
145 policy->min = policy->cpuinfo.min_freq;
146 policy->max = policy->cpuinfo.max_freq;
147
148 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
149
150 dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
151 "Maximum %u.%03u MHz.\n",
152 policy->min / 1000, policy->min % 1000,
153 policy->max / 1000, policy->max % 1000);
154
155 return 0;
156}
157
158static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
159{
160 unsigned int cpu = policy->cpu;
161 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
162
163 cpufreq_frequency_table_put_attr(cpu);
164 clk_put(cpuclk);
165
166 return 0;
167}
168
169static struct freq_attr *sh_freq_attr[] = {
170 &cpufreq_freq_attr_scaling_available_freqs,
171 NULL,
172};
173
174static struct cpufreq_driver sh_cpufreq_driver = {
175 .owner = THIS_MODULE,
176 .name = "sh",
177 .get = sh_cpufreq_get,
178 .target = sh_cpufreq_target,
179 .verify = sh_cpufreq_verify,
180 .init = sh_cpufreq_cpu_init,
181 .exit = sh_cpufreq_cpu_exit,
182 .attr = sh_freq_attr,
183};
184
185static int __init sh_cpufreq_module_init(void)
186{
187 pr_notice("SuperH CPU frequency driver.\n");
188 return cpufreq_register_driver(&sh_cpufreq_driver);
189}
190
191static void __exit sh_cpufreq_module_exit(void)
192{
193 cpufreq_unregister_driver(&sh_cpufreq_driver);
194}
195
196module_init(sh_cpufreq_module_init);
197module_exit(sh_cpufreq_module_exit);
198
199MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
200MODULE_DESCRIPTION("cpufreq driver for SuperH");
201MODULE_LICENSE("GPL");
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
index 7617dc4129ac..b959f5592604 100644
--- a/arch/sh/kernel/dumpstack.c
+++ b/arch/sh/kernel/dumpstack.c
@@ -158,9 +158,3 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
158 (unsigned long)task_stack_page(tsk)); 158 (unsigned long)task_stack_page(tsk));
159 show_trace(tsk, sp, NULL); 159 show_trace(tsk, sp, NULL);
160} 160}
161
162void dump_stack(void)
163{
164 show_stack(NULL, NULL);
165}
166EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 3d5a1b387cc0..2ea4483fd722 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -24,98 +24,24 @@
24 24
25static void (*sh_idle)(void); 25static void (*sh_idle)(void);
26 26
27static int hlt_counter; 27void default_idle(void)
28
29static int __init nohlt_setup(char *__unused)
30{
31 hlt_counter = 1;
32 return 1;
33}
34__setup("nohlt", nohlt_setup);
35
36static int __init hlt_setup(char *__unused)
37{
38 hlt_counter = 0;
39 return 1;
40}
41__setup("hlt", hlt_setup);
42
43static inline int hlt_works(void)
44{
45 return !hlt_counter;
46}
47
48/*
49 * On SMP it's slightly faster (but much more power-consuming!)
50 * to poll the ->work.need_resched flag instead of waiting for the
51 * cross-CPU IPI to arrive. Use this option with caution.
52 */
53static void poll_idle(void)
54{ 28{
29 set_bl_bit();
55 local_irq_enable(); 30 local_irq_enable();
56 while (!need_resched()) 31 /* Isn't this racy ? */
57 cpu_relax(); 32 cpu_sleep();
33 clear_bl_bit();
58} 34}
59 35
60void default_idle(void) 36void arch_cpu_idle_dead(void)
61{ 37{
62 if (hlt_works()) { 38 play_dead();
63 clear_thread_flag(TIF_POLLING_NRFLAG);
64 smp_mb__after_clear_bit();
65
66 set_bl_bit();
67 if (!need_resched()) {
68 local_irq_enable();
69 cpu_sleep();
70 } else
71 local_irq_enable();
72
73 set_thread_flag(TIF_POLLING_NRFLAG);
74 clear_bl_bit();
75 } else
76 poll_idle();
77} 39}
78 40
79/* 41void arch_cpu_idle(void)
80 * The idle thread. There's no useful work to be done, so just try to conserve
81 * power and have a low exit latency (ie sit in a loop waiting for somebody to
82 * say that they'd like to reschedule)
83 */
84void cpu_idle(void)
85{ 42{
86 unsigned int cpu = smp_processor_id(); 43 if (cpuidle_idle_call())
87 44 sh_idle();
88 set_thread_flag(TIF_POLLING_NRFLAG);
89
90 /* endless idle loop with no priority at all */
91 while (1) {
92 tick_nohz_idle_enter();
93 rcu_idle_enter();
94
95 while (!need_resched()) {
96 check_pgt_cache();
97 rmb();
98
99 if (cpu_is_offline(cpu))
100 play_dead();
101
102 local_irq_disable();
103 /* Don't trace irqs off for idle */
104 stop_critical_timings();
105 if (cpuidle_idle_call())
106 sh_idle();
107 /*
108 * Sanity check to ensure that sh_idle() returns
109 * with IRQs enabled
110 */
111 WARN_ON(irqs_disabled());
112 start_critical_timings();
113 }
114
115 rcu_idle_exit();
116 tick_nohz_idle_exit();
117 schedule_preempt_disabled();
118 }
119} 45}
120 46
121void __init select_idle_routine(void) 47void __init select_idle_routine(void)
@@ -123,13 +49,8 @@ void __init select_idle_routine(void)
123 /* 49 /*
124 * If a platform has set its own idle routine, leave it alone. 50 * If a platform has set its own idle routine, leave it alone.
125 */ 51 */
126 if (sh_idle) 52 if (!sh_idle)
127 return;
128
129 if (hlt_works())
130 sh_idle = default_idle; 53 sh_idle = default_idle;
131 else
132 sh_idle = poll_idle;
133} 54}
134 55
135void stop_this_cpu(void *unused) 56void stop_this_cpu(void *unused)
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 73eb66fc6253..ebd3933005b4 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -32,11 +32,7 @@
32void show_regs(struct pt_regs * regs) 32void show_regs(struct pt_regs * regs)
33{ 33{
34 printk("\n"); 34 printk("\n");
35 printk("Pid : %d, Comm: \t\t%s\n", task_pid_nr(current), current->comm); 35 show_regs_print_info(KERN_DEFAULT);
36 printk("CPU : %d \t\t%s (%s %.*s)\n\n",
37 smp_processor_id(), print_tainted(), init_utsname()->release,
38 (int)strcspn(init_utsname()->version, " "),
39 init_utsname()->version);
40 36
41 print_symbol("PC is at %s\n", instruction_pointer(regs)); 37 print_symbol("PC is at %s\n", instruction_pointer(regs));
42 print_symbol("PR is at %s\n", regs->pr); 38 print_symbol("PR is at %s\n", regs->pr);
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index e611c85144b1..174d124b419e 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -40,6 +40,7 @@ void show_regs(struct pt_regs *regs)
40 unsigned long long ah, al, bh, bl, ch, cl; 40 unsigned long long ah, al, bh, bl, ch, cl;
41 41
42 printk("\n"); 42 printk("\n");
43 show_regs_print_info(KERN_DEFAULT);
43 44
44 ah = (regs->pc) >> 32; 45 ah = (regs->pc) >> 32;
45 al = (regs->pc) & 0xffffffff; 46 al = (regs->pc) & 0xffffffff;
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index 47475cca068a..fe584e516964 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -104,6 +104,7 @@ void sh_bios_vbr_reload(void)
104 ); 104 );
105} 105}
106 106
107#ifdef CONFIG_EARLY_PRINTK
107/* 108/*
108 * Print a string through the BIOS 109 * Print a string through the BIOS
109 */ 110 */
@@ -144,8 +145,6 @@ static struct console bios_console = {
144 .index = -1, 145 .index = -1,
145}; 146};
146 147
147static struct console *early_console;
148
149static int __init setup_early_printk(char *buf) 148static int __init setup_early_printk(char *buf)
150{ 149{
151 int keep_early = 0; 150 int keep_early = 0;
@@ -170,3 +169,4 @@ static int __init setup_early_printk(char *buf)
170 return 0; 169 return 0;
171} 170}
172early_param("earlyprintk", setup_early_printk); 171early_param("earlyprintk", setup_early_printk);
172#endif
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 2062aa88af41..45696451f0ea 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -203,7 +203,7 @@ asmlinkage void __cpuinit start_secondary(void)
203 set_cpu_online(cpu, true); 203 set_cpu_online(cpu, true);
204 per_cpu(cpu_state, cpu) = CPU_ONLINE; 204 per_cpu(cpu_state, cpu) = CPU_ONLINE;
205 205
206 cpu_idle(); 206 cpu_startup_entry(CPUHP_ONLINE);
207} 207}
208 208
209extern struct { 209extern struct {
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 105794037143..20f9ead650d3 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -417,15 +417,13 @@ void __init mem_init(void)
417 417
418 for_each_online_node(nid) { 418 for_each_online_node(nid) {
419 pg_data_t *pgdat = NODE_DATA(nid); 419 pg_data_t *pgdat = NODE_DATA(nid);
420 unsigned long node_pages = 0;
421 void *node_high_memory; 420 void *node_high_memory;
422 421
423 num_physpages += pgdat->node_present_pages; 422 num_physpages += pgdat->node_present_pages;
424 423
425 if (pgdat->node_spanned_pages) 424 if (pgdat->node_spanned_pages)
426 node_pages = free_all_bootmem_node(pgdat); 425 totalram_pages += free_all_bootmem_node(pgdat);
427 426
428 totalram_pages += node_pages;
429 427
430 node_high_memory = (void *)__va((pgdat->node_start_pfn + 428 node_high_memory = (void *)__va((pgdat->node_start_pfn +
431 pgdat->node_spanned_pages) << 429 pgdat->node_spanned_pages) <<
@@ -501,31 +499,13 @@ void __init mem_init(void)
501 499
502void free_initmem(void) 500void free_initmem(void)
503{ 501{
504 unsigned long addr; 502 free_initmem_default(0);
505
506 addr = (unsigned long)(&__init_begin);
507 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
508 ClearPageReserved(virt_to_page(addr));
509 init_page_count(virt_to_page(addr));
510 free_page(addr);
511 totalram_pages++;
512 }
513 printk("Freeing unused kernel memory: %ldk freed\n",
514 ((unsigned long)&__init_end -
515 (unsigned long)&__init_begin) >> 10);
516} 503}
517 504
518#ifdef CONFIG_BLK_DEV_INITRD 505#ifdef CONFIG_BLK_DEV_INITRD
519void free_initrd_mem(unsigned long start, unsigned long end) 506void free_initrd_mem(unsigned long start, unsigned long end)
520{ 507{
521 unsigned long p; 508 free_reserved_area(start, end, 0, "initrd");
522 for (p = start; p < end; p += PAGE_SIZE) {
523 ClearPageReserved(virt_to_page(p));
524 init_page_count(virt_to_page(p));
525 free_page(p);
526 totalram_pages++;
527 }
528 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
529} 509}
530#endif 510#endif
531 511
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3d361f236308..f5041d741dea 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -62,7 +62,6 @@ config SPARC64
62 select HAVE_RCU_TABLE_FREE if SMP 62 select HAVE_RCU_TABLE_FREE if SMP
63 select HAVE_MEMBLOCK 63 select HAVE_MEMBLOCK
64 select HAVE_MEMBLOCK_NODE_MAP 64 select HAVE_MEMBLOCK_NODE_MAP
65 select HAVE_SYSCALL_WRAPPERS
66 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 65 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
67 select HAVE_DYNAMIC_FTRACE 66 select HAVE_DYNAMIC_FTRACE
68 select HAVE_FTRACE_MCOUNT_RECORD 67 select HAVE_FTRACE_MCOUNT_RECORD
@@ -254,29 +253,6 @@ config HOTPLUG_CPU
254 253
255if SPARC64 254if SPARC64
256source "drivers/cpufreq/Kconfig" 255source "drivers/cpufreq/Kconfig"
257
258config US3_FREQ
259 tristate "UltraSPARC-III CPU Frequency driver"
260 depends on CPU_FREQ
261 select CPU_FREQ_TABLE
262 help
263 This adds the CPUFreq driver for UltraSPARC-III processors.
264
265 For details, take a look at <file:Documentation/cpu-freq>.
266
267 If in doubt, say N.
268
269config US2E_FREQ
270 tristate "UltraSPARC-IIe CPU Frequency driver"
271 depends on CPU_FREQ
272 select CPU_FREQ_TABLE
273 help
274 This adds the CPUFreq driver for UltraSPARC-IIe processors.
275
276 For details, take a look at <file:Documentation/cpu-freq>.
277
278 If in doubt, say N.
279
280endif 256endif
281 257
282config US3_MC 258config US3_MC
@@ -407,6 +383,8 @@ config SERIAL_CONSOLE
407config SPARC_LEON 383config SPARC_LEON
408 bool "Sparc Leon processor family" 384 bool "Sparc Leon processor family"
409 depends on SPARC32 385 depends on SPARC32
386 select USB_EHCI_BIG_ENDIAN_MMIO
387 select USB_EHCI_BIG_ENDIAN_DESC
410 ---help--- 388 ---help---
411 If you say Y here if you are running on a SPARC-LEON processor. 389 If you say Y here if you are running on a SPARC-LEON processor.
412 The LEON processor is a synthesizable VHDL model of the 390 The LEON processor is a synthesizable VHDL model of the
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index 7eb57d245044..e4cab465b81f 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
2#define _ASM_SPARC64_HUGETLB_H 2#define _ASM_SPARC64_HUGETLB_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
5 6
6 7
7void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 25849ae3e900..dd3807599bb9 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -132,8 +132,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
132#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ 132#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
133 _TIF_SIGPENDING) 133 _TIF_SIGPENDING)
134 134
135#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
136
137#endif /* __KERNEL__ */ 135#endif /* __KERNEL__ */
138 136
139#endif /* _ASM_THREAD_INFO_H */ 137#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 269bd92313df..d5e504251079 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -256,8 +256,6 @@ static inline bool test_and_clear_restore_sigmask(void)
256 return true; 256 return true;
257} 257}
258 258
259#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
260
261#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) 259#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
262#define test_thread_64bit_stack(__SP) \ 260#define test_thread_64bit_stack(__SP) \
263 ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \ 261 ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 5356810bd7e7..dfa53fdd5cbc 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -45,12 +45,4 @@
45#define __ARCH_WANT_COMPAT_SYS_SENDFILE 45#define __ARCH_WANT_COMPAT_SYS_SENDFILE
46#endif 46#endif
47 47
48/*
49 * "Conditional" syscalls
50 *
51 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
52 * but it doesn't work on all toolchains, so we just do it by hand
53 */
54#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
55
56#endif /* _SPARC_UNISTD_H */ 48#endif /* _SPARC_UNISTD_H */
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 6cf591b7e1c6..5276fd4e9d03 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -102,9 +102,6 @@ obj-$(CONFIG_PCI_MSI) += pci_msi.o
102 102
103obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o 103obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o
104 104
105# sparc64 cpufreq
106obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
107obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
108obj-$(CONFIG_US3_MC) += chmc.o 105obj-$(CONFIG_US3_MC) += chmc.o
109 106
110obj-$(CONFIG_KPROBES) += kprobes.o 107obj-$(CONFIG_KPROBES) += kprobes.o
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
index 9365432904d6..605c960b2fa6 100644
--- a/arch/sparc/kernel/hvtramp.S
+++ b/arch/sparc/kernel/hvtramp.S
@@ -128,8 +128,7 @@ hv_cpu_startup:
128 128
129 call smp_callin 129 call smp_callin
130 nop 130 nop
131 call cpu_idle 131
132 mov 0, %o0
133 call cpu_panic 132 call cpu_panic
134 nop 133 nop
135 134
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 62eede13831a..fdd819dfdacf 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -64,23 +64,12 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
64struct task_struct *last_task_used_math = NULL; 64struct task_struct *last_task_used_math = NULL;
65struct thread_info *current_set[NR_CPUS]; 65struct thread_info *current_set[NR_CPUS];
66 66
67/* 67/* Idle loop support. */
68 * the idle loop on a Sparc... ;) 68void arch_cpu_idle(void)
69 */
70void cpu_idle(void)
71{ 69{
72 set_thread_flag(TIF_POLLING_NRFLAG); 70 if (sparc_idle)
73 71 (*sparc_idle)();
74 /* endless idle loop with no priority at all */ 72 local_irq_enable();
75 for (;;) {
76 while (!need_resched()) {
77 if (sparc_idle)
78 (*sparc_idle)();
79 else
80 cpu_relax();
81 }
82 schedule_preempt_disabled();
83 }
84} 73}
85 74
86/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ 75/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
@@ -123,6 +112,8 @@ void show_regs(struct pt_regs *r)
123{ 112{
124 struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14]; 113 struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14];
125 114
115 show_regs_print_info(KERN_DEFAULT);
116
126 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", 117 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
127 r->psr, r->pc, r->npc, r->y, print_tainted()); 118 r->psr, r->pc, r->npc, r->y, print_tainted());
128 printk("PC: <%pS>\n", (void *) r->pc); 119 printk("PC: <%pS>\n", (void *) r->pc);
@@ -153,11 +144,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
153 struct reg_window32 *rw; 144 struct reg_window32 *rw;
154 int count = 0; 145 int count = 0;
155 146
156 if (tsk != NULL) 147 if (!tsk)
157 task_base = (unsigned long) task_stack_page(tsk); 148 tsk = current;
158 else
159 task_base = (unsigned long) current_thread_info();
160 149
150 if (tsk == current && !_ksp)
151 __asm__ __volatile__("mov %%fp, %0" : "=r" (_ksp));
152
153 task_base = (unsigned long) task_stack_page(tsk);
161 fp = (unsigned long) _ksp; 154 fp = (unsigned long) _ksp;
162 do { 155 do {
163 /* Bogus frame pointer? */ 156 /* Bogus frame pointer? */
@@ -173,17 +166,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
173 printk("\n"); 166 printk("\n");
174} 167}
175 168
176void dump_stack(void)
177{
178 unsigned long *ksp;
179
180 __asm__ __volatile__("mov %%fp, %0"
181 : "=r" (ksp));
182 show_stack(current, ksp);
183}
184
185EXPORT_SYMBOL(dump_stack);
186
187/* 169/*
188 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. 170 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
189 */ 171 */
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index cdb80b2adbe0..baebab215492 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -52,20 +52,17 @@
52 52
53#include "kstack.h" 53#include "kstack.h"
54 54
55static void sparc64_yield(int cpu) 55/* Idle loop support on sparc64. */
56void arch_cpu_idle(void)
56{ 57{
57 if (tlb_type != hypervisor) { 58 if (tlb_type != hypervisor) {
58 touch_nmi_watchdog(); 59 touch_nmi_watchdog();
59 return; 60 } else {
60 }
61
62 clear_thread_flag(TIF_POLLING_NRFLAG);
63 smp_mb__after_clear_bit();
64
65 while (!need_resched() && !cpu_is_offline(cpu)) {
66 unsigned long pstate; 61 unsigned long pstate;
67 62
68 /* Disable interrupts. */ 63 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
64 * the cpu sleep hypervisor call.
65 */
69 __asm__ __volatile__( 66 __asm__ __volatile__(
70 "rdpr %%pstate, %0\n\t" 67 "rdpr %%pstate, %0\n\t"
71 "andn %0, %1, %0\n\t" 68 "andn %0, %1, %0\n\t"
@@ -73,7 +70,7 @@ static void sparc64_yield(int cpu)
73 : "=&r" (pstate) 70 : "=&r" (pstate)
74 : "i" (PSTATE_IE)); 71 : "i" (PSTATE_IE));
75 72
76 if (!need_resched() && !cpu_is_offline(cpu)) 73 if (!need_resched() && !cpu_is_offline(smp_processor_id()))
77 sun4v_cpu_yield(); 74 sun4v_cpu_yield();
78 75
79 /* Re-enable interrupts. */ 76 /* Re-enable interrupts. */
@@ -84,36 +81,16 @@ static void sparc64_yield(int cpu)
84 : "=&r" (pstate) 81 : "=&r" (pstate)
85 : "i" (PSTATE_IE)); 82 : "i" (PSTATE_IE));
86 } 83 }
87 84 local_irq_enable();
88 set_thread_flag(TIF_POLLING_NRFLAG);
89} 85}
90 86
91/* The idle loop on sparc64. */
92void cpu_idle(void)
93{
94 int cpu = smp_processor_id();
95
96 set_thread_flag(TIF_POLLING_NRFLAG);
97
98 while(1) {
99 tick_nohz_idle_enter();
100 rcu_idle_enter();
101
102 while (!need_resched() && !cpu_is_offline(cpu))
103 sparc64_yield(cpu);
104
105 rcu_idle_exit();
106 tick_nohz_idle_exit();
107
108#ifdef CONFIG_HOTPLUG_CPU 87#ifdef CONFIG_HOTPLUG_CPU
109 if (cpu_is_offline(cpu)) { 88void arch_cpu_idle_dead()
110 sched_preempt_enable_no_resched(); 89{
111 cpu_play_dead(); 90 sched_preempt_enable_no_resched();
112 } 91 cpu_play_dead();
113#endif
114 schedule_preempt_disabled();
115 }
116} 92}
93#endif
117 94
118#ifdef CONFIG_COMPAT 95#ifdef CONFIG_COMPAT
119static void show_regwindow32(struct pt_regs *regs) 96static void show_regwindow32(struct pt_regs *regs)
@@ -186,6 +163,8 @@ static void show_regwindow(struct pt_regs *regs)
186 163
187void show_regs(struct pt_regs *regs) 164void show_regs(struct pt_regs *regs)
188{ 165{
166 show_regs_print_info(KERN_DEFAULT);
167
189 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, 168 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
190 regs->tpc, regs->tnpc, regs->y, print_tainted()); 169 regs->tpc, regs->tnpc, regs->y, print_tainted());
191 printk("TPC: <%pS>\n", (void *) regs->tpc); 170 printk("TPC: <%pS>\n", (void *) regs->tpc);
@@ -315,7 +294,7 @@ static void sysrq_handle_globreg(int key)
315 294
316static struct sysrq_key_op sparc_globalreg_op = { 295static struct sysrq_key_op sparc_globalreg_op = {
317 .handler = sysrq_handle_globreg, 296 .handler = sysrq_handle_globreg,
318 .help_msg = "global-regs(Y)", 297 .help_msg = "global-regs(y)",
319 .action_msg = "Show Global CPU Regs", 298 .action_msg = "Show Global CPU Regs",
320}; 299};
321 300
@@ -385,7 +364,7 @@ static void sysrq_handle_globpmu(int key)
385 364
386static struct sysrq_key_op sparc_globalpmu_op = { 365static struct sysrq_key_op sparc_globalpmu_op = {
387 .handler = sysrq_handle_globpmu, 366 .handler = sysrq_handle_globpmu,
388 .help_msg = "global-pmu(X)", 367 .help_msg = "global-pmu(x)",
389 .action_msg = "Show Global PMU Regs", 368 .action_msg = "Show Global PMU Regs",
390}; 369};
391 370
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 9e7e6d718367..e3f2b81c23f1 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -369,7 +369,7 @@ void __cpuinit sparc_start_secondary(void *arg)
369 local_irq_enable(); 369 local_irq_enable();
370 370
371 wmb(); 371 wmb();
372 cpu_idle(); 372 cpu_startup_entry(CPUHP_ONLINE);
373 373
374 /* We should never reach here! */ 374 /* We should never reach here! */
375 BUG(); 375 BUG();
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index ca64d2a86ec0..77539eda928c 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -127,6 +127,8 @@ void __cpuinit smp_callin(void)
127 127
128 /* idle thread is expected to have preempt disabled */ 128 /* idle thread is expected to have preempt disabled */
129 preempt_disable(); 129 preempt_disable();
130
131 cpu_startup_entry(CPUHP_ONLINE);
130} 132}
131 133
132void cpu_panic(void) 134void cpu_panic(void)
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index 240a3cecc11e..2e680b5245c9 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -36,7 +36,6 @@ STUB: sra REG1, 0, REG1; \
36 jmpl %g1 + %lo(SYSCALL), %g0; \ 36 jmpl %g1 + %lo(SYSCALL), %g0; \
37 sra REG3, 0, REG3 37 sra REG3, 0, REG3
38 38
39SIGN1(sys32_getrusage, compat_sys_getrusage, %o0)
40SIGN1(sys32_readahead, compat_sys_readahead, %o0) 39SIGN1(sys32_readahead, compat_sys_readahead, %o0)
41SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4) 40SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4)
42SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5) 41SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
@@ -46,12 +45,9 @@ SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
46SIGN1(sys32_mq_open, compat_sys_mq_open, %o1) 45SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
47SIGN1(sys32_select, compat_sys_select, %o0) 46SIGN1(sys32_select, compat_sys_select, %o0)
48SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5) 47SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
49SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1)
50SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) 48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
51SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) 49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
52SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) 50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
53SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5)
54SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0)
55 51
56 .globl sys32_mmap2 52 .globl sys32_mmap2
57sys32_mmap2: 53sys32_mmap2:
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index f38f2280fade..3d0ddbc005fe 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -49,71 +49,6 @@
49#include <asm/mmu_context.h> 49#include <asm/mmu_context.h>
50#include <asm/compat_signal.h> 50#include <asm/compat_signal.h>
51 51
52#ifdef CONFIG_SYSVIPC
53asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth)
54{
55 int version;
56
57 version = call >> 16; /* hack for backward compatibility */
58 call &= 0xffff;
59
60 switch (call) {
61 case SEMTIMEDOP:
62 if (fifth)
63 /* sign extend semid */
64 return compat_sys_semtimedop((int)first,
65 compat_ptr(ptr), second,
66 compat_ptr(fifth));
67 /* else fall through for normal semop() */
68 case SEMOP:
69 /* struct sembuf is the same on 32 and 64bit :)) */
70 /* sign extend semid */
71 return sys_semtimedop((int)first, compat_ptr(ptr), second,
72 NULL);
73 case SEMGET:
74 /* sign extend key, nsems */
75 return sys_semget((int)first, (int)second, third);
76 case SEMCTL:
77 /* sign extend semid, semnum */
78 return compat_sys_semctl((int)first, (int)second, third,
79 compat_ptr(ptr));
80
81 case MSGSND:
82 /* sign extend msqid */
83 return compat_sys_msgsnd((int)first, (int)second, third,
84 compat_ptr(ptr));
85 case MSGRCV:
86 /* sign extend msqid, msgtyp */
87 return compat_sys_msgrcv((int)first, second, (int)fifth,
88 third, version, compat_ptr(ptr));
89 case MSGGET:
90 /* sign extend key */
91 return sys_msgget((int)first, second);
92 case MSGCTL:
93 /* sign extend msqid */
94 return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
95
96 case SHMAT:
97 /* sign extend shmid */
98 return compat_sys_shmat((int)first, second, third, version,
99 compat_ptr(ptr));
100 case SHMDT:
101 return sys_shmdt(compat_ptr(ptr));
102 case SHMGET:
103 /* sign extend key_t */
104 return sys_shmget((int)first, second, third);
105 case SHMCTL:
106 /* sign extend shmid */
107 return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
108
109 default:
110 return -ENOSYS;
111 }
112
113 return -ENOSYS;
114}
115#endif
116
117asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low) 52asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
118{ 53{
119 if ((int)high < 0) 54 if ((int)high < 0)
@@ -303,15 +238,7 @@ long compat_sys_fadvise64_64(int fd,
303 advice); 238 advice);
304} 239}
305 240
306long sys32_lookup_dcookie(unsigned long cookie_high, 241long sys32_sync_file_range(unsigned int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, unsigned int flags)
307 unsigned long cookie_low,
308 char __user *buf, size_t len)
309{
310 return sys_lookup_dcookie((cookie_high << 32) | cookie_low,
311 buf, len);
312}
313
314long compat_sync_file_range(int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, int flags)
315{ 242{
316 return sys_sync_file_range(fd, 243 return sys_sync_file_range(fd,
317 (off_high << 32) | off_low, 244 (off_high << 32) | off_low,
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 708bc29d36a8..2daaaa6eda23 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -353,7 +353,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
353 case SEMCTL: { 353 case SEMCTL: {
354 err = sys_semctl(first, second, 354 err = sys_semctl(first, second,
355 (int)third | IPC_64, 355 (int)third | IPC_64,
356 (union semun) ptr); 356 (unsigned long) ptr);
357 goto out; 357 goto out;
358 } 358 }
359 default: 359 default:
@@ -470,10 +470,6 @@ SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
470 470
471 return vm_munmap(addr, len); 471 return vm_munmap(addr, len);
472} 472}
473
474extern unsigned long do_mremap(unsigned long addr,
475 unsigned long old_len, unsigned long new_len,
476 unsigned long flags, unsigned long new_addr);
477 473
478SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len, 474SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
479 unsigned long, new_len, unsigned long, flags, 475 unsigned long, new_len, unsigned long, flags,
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 088134834dab..8fd932080215 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -23,9 +23,9 @@ sys_call_table32:
23/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod 23/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
24/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek 24/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek
25/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 25/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
26/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause 26/*25*/ .word compat_sys_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause
27/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice 27/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
28 .word sys_chown, sys_sync, sys_kill, compat_sys_newstat, sys32_sendfile 28 .word sys_chown, sys_sync, sys_kill, compat_sys_newstat, compat_sys_sendfile
29/*40*/ .word compat_sys_newlstat, sys_dup, sys_sparc_pipe, compat_sys_times, sys_getuid 29/*40*/ .word compat_sys_newlstat, sys_dup, sys_sparc_pipe, compat_sys_times, sys_getuid
30 .word sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16 30 .word sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
31/*50*/ .word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl 31/*50*/ .word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
@@ -41,7 +41,7 @@ sys_call_table32:
41/*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending 41/*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending
42 .word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid 42 .word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid
43/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall 43/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
44 .word sys_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd 44 .word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, sys_nis_syscall, sys_getcwd
45/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod 45/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
46 .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate 46 .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
47/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall 47/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
@@ -59,7 +59,7 @@ sys_call_table32:
59/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl 59/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
60 .word sys_epoll_wait, sys_ioprio_set, sys_getppid, compat_sys_sparc_sigaction, sys_sgetmask 60 .word sys_epoll_wait, sys_ioprio_set, sys_getppid, compat_sys_sparc_sigaction, sys_sgetmask
61/*200*/ .word sys_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir 61/*200*/ .word sys_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
62 .word sys32_readahead, sys32_socketcall, sys_syslog, sys32_lookup_dcookie, sys32_fadvise64 62 .word sys32_readahead, sys32_socketcall, sys_syslog, compat_sys_lookup_dcookie, sys32_fadvise64
63/*210*/ .word sys32_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, compat_sys_sysinfo 63/*210*/ .word sys32_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, compat_sys_sysinfo
64 .word compat_sys_ipc, sys32_sigreturn, sys_clone, sys_ioprio_get, compat_sys_adjtimex 64 .word compat_sys_ipc, sys32_sigreturn, sys_clone, sys_ioprio_get, compat_sys_adjtimex
65/*220*/ .word compat_sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid 65/*220*/ .word compat_sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index da1b781b5e65..2e973a26fbda 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -407,8 +407,7 @@ after_lock_tlb:
407 407
408 call smp_callin 408 call smp_callin
409 nop 409 nop
410 call cpu_idle 410
411 mov 0, %o0
412 call cpu_panic 411 call cpu_panic
413 nop 412 nop
4141: b,a,pt %xcc, 1b 4131: b,a,pt %xcc, 1b
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 8d38ca97aa23..b3f833ab90eb 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2350,13 +2350,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2350 } while (++count < 16); 2350 } while (++count < 16);
2351} 2351}
2352 2352
2353void dump_stack(void)
2354{
2355 show_stack(current, NULL);
2356}
2357
2358EXPORT_SYMBOL(dump_stack);
2359
2360static inline struct reg_window *kernel_stack_up(struct reg_window *rw) 2353static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2361{ 2354{
2362 unsigned long fp = rw->ins[6]; 2355 unsigned long fp = rw->ins[6];
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c
deleted file mode 100644
index 489fc15f3194..000000000000
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ /dev/null
@@ -1,413 +0,0 @@
1/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 *
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/cpufreq.h>
14#include <linux/threads.h>
15#include <linux/slab.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18
19#include <asm/asi.h>
20#include <asm/timer.h>
21
22static struct cpufreq_driver *cpufreq_us2e_driver;
23
24struct us2e_freq_percpu_info {
25 struct cpufreq_frequency_table table[6];
26};
27
28/* Indexed by cpu number. */
29static struct us2e_freq_percpu_info *us2e_freq_table;
30
31#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
32#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
33
34/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
35 * in the ESTAR mode control register.
36 */
37#define ESTAR_MODE_DIV_1 0x0000000000000000UL
38#define ESTAR_MODE_DIV_2 0x0000000000000001UL
39#define ESTAR_MODE_DIV_4 0x0000000000000003UL
40#define ESTAR_MODE_DIV_6 0x0000000000000002UL
41#define ESTAR_MODE_DIV_8 0x0000000000000004UL
42#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
43
44#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
45#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
46#define MCTRL0_REFR_COUNT_SHIFT 8
47#define MCTRL0_REFR_INTERVAL 7800
48#define MCTRL0_REFR_CLKS_P_CNT 64
49
50static unsigned long read_hbreg(unsigned long addr)
51{
52 unsigned long ret;
53
54 __asm__ __volatile__("ldxa [%1] %2, %0"
55 : "=&r" (ret)
56 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
57 return ret;
58}
59
60static void write_hbreg(unsigned long addr, unsigned long val)
61{
62 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
63 "membar #Sync"
64 : /* no outputs */
65 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
66 : "memory");
67 if (addr == HBIRD_ESTAR_MODE_ADDR) {
68 /* Need to wait 16 clock cycles for the PLL to lock. */
69 udelay(1);
70 }
71}
72
73static void self_refresh_ctl(int enable)
74{
75 unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
76
77 if (enable)
78 mctrl |= MCTRL0_SREFRESH_ENAB;
79 else
80 mctrl &= ~MCTRL0_SREFRESH_ENAB;
81 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
82 (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
83}
84
85static void frob_mem_refresh(int cpu_slowing_down,
86 unsigned long clock_tick,
87 unsigned long old_divisor, unsigned long divisor)
88{
89 unsigned long old_refr_count, refr_count, mctrl;
90
91 refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
92 refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
93
94 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
95 old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
96 >> MCTRL0_REFR_COUNT_SHIFT;
97
98 mctrl &= ~MCTRL0_REFR_COUNT_MASK;
99 mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
100 write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
101 mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
102
103 if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
104 unsigned long usecs;
105
106 /* We have to wait for both refresh counts (old
107 * and new) to go to zero.
108 */
109 usecs = (MCTRL0_REFR_CLKS_P_CNT *
110 (refr_count + old_refr_count) *
111 1000000UL *
112 old_divisor) / clock_tick;
113 udelay(usecs + 1UL);
114 }
115}
116
117static void us2e_transition(unsigned long estar, unsigned long new_bits,
118 unsigned long clock_tick,
119 unsigned long old_divisor, unsigned long divisor)
120{
121 unsigned long flags;
122
123 local_irq_save(flags);
124
125 estar &= ~ESTAR_MODE_DIV_MASK;
126
127 /* This is based upon the state transition diagram in the IIe manual. */
128 if (old_divisor == 2 && divisor == 1) {
129 self_refresh_ctl(0);
130 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
131 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
132 } else if (old_divisor == 1 && divisor == 2) {
133 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
134 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
135 self_refresh_ctl(1);
136 } else if (old_divisor == 1 && divisor > 2) {
137 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
138 1, 2);
139 us2e_transition(estar, new_bits, clock_tick,
140 2, divisor);
141 } else if (old_divisor > 2 && divisor == 1) {
142 us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
143 old_divisor, 2);
144 us2e_transition(estar, new_bits, clock_tick,
145 2, divisor);
146 } else if (old_divisor < divisor) {
147 frob_mem_refresh(0, clock_tick, old_divisor, divisor);
148 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
149 } else if (old_divisor > divisor) {
150 write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
151 frob_mem_refresh(1, clock_tick, old_divisor, divisor);
152 } else {
153 BUG();
154 }
155
156 local_irq_restore(flags);
157}
158
159static unsigned long index_to_estar_mode(unsigned int index)
160{
161 switch (index) {
162 case 0:
163 return ESTAR_MODE_DIV_1;
164
165 case 1:
166 return ESTAR_MODE_DIV_2;
167
168 case 2:
169 return ESTAR_MODE_DIV_4;
170
171 case 3:
172 return ESTAR_MODE_DIV_6;
173
174 case 4:
175 return ESTAR_MODE_DIV_8;
176
177 default:
178 BUG();
179 }
180}
181
182static unsigned long index_to_divisor(unsigned int index)
183{
184 switch (index) {
185 case 0:
186 return 1;
187
188 case 1:
189 return 2;
190
191 case 2:
192 return 4;
193
194 case 3:
195 return 6;
196
197 case 4:
198 return 8;
199
200 default:
201 BUG();
202 }
203}
204
205static unsigned long estar_to_divisor(unsigned long estar)
206{
207 unsigned long ret;
208
209 switch (estar & ESTAR_MODE_DIV_MASK) {
210 case ESTAR_MODE_DIV_1:
211 ret = 1;
212 break;
213 case ESTAR_MODE_DIV_2:
214 ret = 2;
215 break;
216 case ESTAR_MODE_DIV_4:
217 ret = 4;
218 break;
219 case ESTAR_MODE_DIV_6:
220 ret = 6;
221 break;
222 case ESTAR_MODE_DIV_8:
223 ret = 8;
224 break;
225 default:
226 BUG();
227 }
228
229 return ret;
230}
231
232static unsigned int us2e_freq_get(unsigned int cpu)
233{
234 cpumask_t cpus_allowed;
235 unsigned long clock_tick, estar;
236
237 if (!cpu_online(cpu))
238 return 0;
239
240 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
241 set_cpus_allowed_ptr(current, cpumask_of(cpu));
242
243 clock_tick = sparc64_get_clock_tick(cpu) / 1000;
244 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
245
246 set_cpus_allowed_ptr(current, &cpus_allowed);
247
248 return clock_tick / estar_to_divisor(estar);
249}
250
251static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
252{
253 unsigned long new_bits, new_freq;
254 unsigned long clock_tick, divisor, old_divisor, estar;
255 cpumask_t cpus_allowed;
256 struct cpufreq_freqs freqs;
257
258 if (!cpu_online(cpu))
259 return;
260
261 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
262 set_cpus_allowed_ptr(current, cpumask_of(cpu));
263
264 new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
265 new_bits = index_to_estar_mode(index);
266 divisor = index_to_divisor(index);
267 new_freq /= divisor;
268
269 estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
270
271 old_divisor = estar_to_divisor(estar);
272
273 freqs.old = clock_tick / old_divisor;
274 freqs.new = new_freq;
275 freqs.cpu = cpu;
276 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
277
278 if (old_divisor != divisor)
279 us2e_transition(estar, new_bits, clock_tick * 1000,
280 old_divisor, divisor);
281
282 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
283
284 set_cpus_allowed_ptr(current, &cpus_allowed);
285}
286
287static int us2e_freq_target(struct cpufreq_policy *policy,
288 unsigned int target_freq,
289 unsigned int relation)
290{
291 unsigned int new_index = 0;
292
293 if (cpufreq_frequency_table_target(policy,
294 &us2e_freq_table[policy->cpu].table[0],
295 target_freq, relation, &new_index))
296 return -EINVAL;
297
298 us2e_set_cpu_divider_index(policy->cpu, new_index);
299
300 return 0;
301}
302
303static int us2e_freq_verify(struct cpufreq_policy *policy)
304{
305 return cpufreq_frequency_table_verify(policy,
306 &us2e_freq_table[policy->cpu].table[0]);
307}
308
309static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
310{
311 unsigned int cpu = policy->cpu;
312 unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
313 struct cpufreq_frequency_table *table =
314 &us2e_freq_table[cpu].table[0];
315
316 table[0].index = 0;
317 table[0].frequency = clock_tick / 1;
318 table[1].index = 1;
319 table[1].frequency = clock_tick / 2;
320 table[2].index = 2;
321 table[2].frequency = clock_tick / 4;
322 table[2].index = 3;
323 table[2].frequency = clock_tick / 6;
324 table[2].index = 4;
325 table[2].frequency = clock_tick / 8;
326 table[2].index = 5;
327 table[3].frequency = CPUFREQ_TABLE_END;
328
329 policy->cpuinfo.transition_latency = 0;
330 policy->cur = clock_tick;
331
332 return cpufreq_frequency_table_cpuinfo(policy, table);
333}
334
335static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
336{
337 if (cpufreq_us2e_driver)
338 us2e_set_cpu_divider_index(policy->cpu, 0);
339
340 return 0;
341}
342
343static int __init us2e_freq_init(void)
344{
345 unsigned long manuf, impl, ver;
346 int ret;
347
348 if (tlb_type != spitfire)
349 return -ENODEV;
350
351 __asm__("rdpr %%ver, %0" : "=r" (ver));
352 manuf = ((ver >> 48) & 0xffff);
353 impl = ((ver >> 32) & 0xffff);
354
355 if (manuf == 0x17 && impl == 0x13) {
356 struct cpufreq_driver *driver;
357
358 ret = -ENOMEM;
359 driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
360 if (!driver)
361 goto err_out;
362
363 us2e_freq_table = kzalloc(
364 (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
365 GFP_KERNEL);
366 if (!us2e_freq_table)
367 goto err_out;
368
369 driver->init = us2e_freq_cpu_init;
370 driver->verify = us2e_freq_verify;
371 driver->target = us2e_freq_target;
372 driver->get = us2e_freq_get;
373 driver->exit = us2e_freq_cpu_exit;
374 driver->owner = THIS_MODULE,
375 strcpy(driver->name, "UltraSPARC-IIe");
376
377 cpufreq_us2e_driver = driver;
378 ret = cpufreq_register_driver(driver);
379 if (ret)
380 goto err_out;
381
382 return 0;
383
384err_out:
385 if (driver) {
386 kfree(driver);
387 cpufreq_us2e_driver = NULL;
388 }
389 kfree(us2e_freq_table);
390 us2e_freq_table = NULL;
391 return ret;
392 }
393
394 return -ENODEV;
395}
396
397static void __exit us2e_freq_exit(void)
398{
399 if (cpufreq_us2e_driver) {
400 cpufreq_unregister_driver(cpufreq_us2e_driver);
401 kfree(cpufreq_us2e_driver);
402 cpufreq_us2e_driver = NULL;
403 kfree(us2e_freq_table);
404 us2e_freq_table = NULL;
405 }
406}
407
408MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
409MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
410MODULE_LICENSE("GPL");
411
412module_init(us2e_freq_init);
413module_exit(us2e_freq_exit);
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
deleted file mode 100644
index eb1624b931d9..000000000000
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ /dev/null
@@ -1,274 +0,0 @@
1/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
4 *
5 * Many thanks to Dominik Brodowski for fixing up the cpufreq
6 * infrastructure in order to make this driver easier to implement.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/cpufreq.h>
14#include <linux/threads.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17
18#include <asm/head.h>
19#include <asm/timer.h>
20
21static struct cpufreq_driver *cpufreq_us3_driver;
22
23struct us3_freq_percpu_info {
24 struct cpufreq_frequency_table table[4];
25};
26
27/* Indexed by cpu number. */
28static struct us3_freq_percpu_info *us3_freq_table;
29
30/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
31 * in the Safari config register.
32 */
33#define SAFARI_CFG_DIV_1 0x0000000000000000UL
34#define SAFARI_CFG_DIV_2 0x0000000040000000UL
35#define SAFARI_CFG_DIV_32 0x0000000080000000UL
36#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
37
38static unsigned long read_safari_cfg(void)
39{
40 unsigned long ret;
41
42 __asm__ __volatile__("ldxa [%%g0] %1, %0"
43 : "=&r" (ret)
44 : "i" (ASI_SAFARI_CONFIG));
45 return ret;
46}
47
48static void write_safari_cfg(unsigned long val)
49{
50 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
51 "membar #Sync"
52 : /* no outputs */
53 : "r" (val), "i" (ASI_SAFARI_CONFIG)
54 : "memory");
55}
56
57static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
58{
59 unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
60 unsigned long ret;
61
62 switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
63 case SAFARI_CFG_DIV_1:
64 ret = clock_tick / 1;
65 break;
66 case SAFARI_CFG_DIV_2:
67 ret = clock_tick / 2;
68 break;
69 case SAFARI_CFG_DIV_32:
70 ret = clock_tick / 32;
71 break;
72 default:
73 BUG();
74 }
75
76 return ret;
77}
78
79static unsigned int us3_freq_get(unsigned int cpu)
80{
81 cpumask_t cpus_allowed;
82 unsigned long reg;
83 unsigned int ret;
84
85 if (!cpu_online(cpu))
86 return 0;
87
88 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
89 set_cpus_allowed_ptr(current, cpumask_of(cpu));
90
91 reg = read_safari_cfg();
92 ret = get_current_freq(cpu, reg);
93
94 set_cpus_allowed_ptr(current, &cpus_allowed);
95
96 return ret;
97}
98
99static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
100{
101 unsigned long new_bits, new_freq, reg;
102 cpumask_t cpus_allowed;
103 struct cpufreq_freqs freqs;
104
105 if (!cpu_online(cpu))
106 return;
107
108 cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
109 set_cpus_allowed_ptr(current, cpumask_of(cpu));
110
111 new_freq = sparc64_get_clock_tick(cpu) / 1000;
112 switch (index) {
113 case 0:
114 new_bits = SAFARI_CFG_DIV_1;
115 new_freq /= 1;
116 break;
117 case 1:
118 new_bits = SAFARI_CFG_DIV_2;
119 new_freq /= 2;
120 break;
121 case 2:
122 new_bits = SAFARI_CFG_DIV_32;
123 new_freq /= 32;
124 break;
125
126 default:
127 BUG();
128 }
129
130 reg = read_safari_cfg();
131
132 freqs.old = get_current_freq(cpu, reg);
133 freqs.new = new_freq;
134 freqs.cpu = cpu;
135 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
136
137 reg &= ~SAFARI_CFG_DIV_MASK;
138 reg |= new_bits;
139 write_safari_cfg(reg);
140
141 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
142
143 set_cpus_allowed_ptr(current, &cpus_allowed);
144}
145
146static int us3_freq_target(struct cpufreq_policy *policy,
147 unsigned int target_freq,
148 unsigned int relation)
149{
150 unsigned int new_index = 0;
151
152 if (cpufreq_frequency_table_target(policy,
153 &us3_freq_table[policy->cpu].table[0],
154 target_freq,
155 relation,
156 &new_index))
157 return -EINVAL;
158
159 us3_set_cpu_divider_index(policy->cpu, new_index);
160
161 return 0;
162}
163
164static int us3_freq_verify(struct cpufreq_policy *policy)
165{
166 return cpufreq_frequency_table_verify(policy,
167 &us3_freq_table[policy->cpu].table[0]);
168}
169
170static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
171{
172 unsigned int cpu = policy->cpu;
173 unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
174 struct cpufreq_frequency_table *table =
175 &us3_freq_table[cpu].table[0];
176
177 table[0].index = 0;
178 table[0].frequency = clock_tick / 1;
179 table[1].index = 1;
180 table[1].frequency = clock_tick / 2;
181 table[2].index = 2;
182 table[2].frequency = clock_tick / 32;
183 table[3].index = 0;
184 table[3].frequency = CPUFREQ_TABLE_END;
185
186 policy->cpuinfo.transition_latency = 0;
187 policy->cur = clock_tick;
188
189 return cpufreq_frequency_table_cpuinfo(policy, table);
190}
191
192static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
193{
194 if (cpufreq_us3_driver)
195 us3_set_cpu_divider_index(policy->cpu, 0);
196
197 return 0;
198}
199
200static int __init us3_freq_init(void)
201{
202 unsigned long manuf, impl, ver;
203 int ret;
204
205 if (tlb_type != cheetah && tlb_type != cheetah_plus)
206 return -ENODEV;
207
208 __asm__("rdpr %%ver, %0" : "=r" (ver));
209 manuf = ((ver >> 48) & 0xffff);
210 impl = ((ver >> 32) & 0xffff);
211
212 if (manuf == CHEETAH_MANUF &&
213 (impl == CHEETAH_IMPL ||
214 impl == CHEETAH_PLUS_IMPL ||
215 impl == JAGUAR_IMPL ||
216 impl == PANTHER_IMPL)) {
217 struct cpufreq_driver *driver;
218
219 ret = -ENOMEM;
220 driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
221 if (!driver)
222 goto err_out;
223
224 us3_freq_table = kzalloc(
225 (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
226 GFP_KERNEL);
227 if (!us3_freq_table)
228 goto err_out;
229
230 driver->init = us3_freq_cpu_init;
231 driver->verify = us3_freq_verify;
232 driver->target = us3_freq_target;
233 driver->get = us3_freq_get;
234 driver->exit = us3_freq_cpu_exit;
235 driver->owner = THIS_MODULE,
236 strcpy(driver->name, "UltraSPARC-III");
237
238 cpufreq_us3_driver = driver;
239 ret = cpufreq_register_driver(driver);
240 if (ret)
241 goto err_out;
242
243 return 0;
244
245err_out:
246 if (driver) {
247 kfree(driver);
248 cpufreq_us3_driver = NULL;
249 }
250 kfree(us3_freq_table);
251 us3_freq_table = NULL;
252 return ret;
253 }
254
255 return -ENODEV;
256}
257
258static void __exit us3_freq_exit(void)
259{
260 if (cpufreq_us3_driver) {
261 cpufreq_unregister_driver(cpufreq_us3_driver);
262 kfree(cpufreq_us3_driver);
263 cpufreq_us3_driver = NULL;
264 kfree(us3_freq_table);
265 us3_freq_table = NULL;
266 }
267}
268
269MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
270MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
271MODULE_LICENSE("GPL");
272
273module_init(us3_freq_init);
274module_exit(us3_freq_exit);
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 8410065f2862..dbe119b63b48 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -45,4 +45,3 @@ obj-y += iomap.o
45obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o 45obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
46obj-y += ksyms.o 46obj-y += ksyms.o
47obj-$(CONFIG_SPARC64) += PeeCeeI.o 47obj-$(CONFIG_SPARC64) += PeeCeeI.o
48obj-y += usercopy.o
diff --git a/arch/sparc/lib/usercopy.c b/arch/sparc/lib/usercopy.c
deleted file mode 100644
index 5c4284ce1c03..000000000000
--- a/arch/sparc/lib/usercopy.c
+++ /dev/null
@@ -1,9 +0,0 @@
1#include <linux/module.h>
2#include <linux/kernel.h>
3#include <linux/bug.h>
4
5void copy_from_user_overflow(void)
6{
7 WARN(1, "Buffer overflow detected!\n");
8}
9EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 48e0c030e8f5..4490c397bb5b 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -282,14 +282,8 @@ static void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
282 printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); 282 printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
283#endif 283#endif
284 284
285 for (tmp = start_pfn; tmp < end_pfn; tmp++) { 285 for (tmp = start_pfn; tmp < end_pfn; tmp++)
286 struct page *page = pfn_to_page(tmp); 286 free_highmem_page(pfn_to_page(tmp));
287
288 ClearPageReserved(page);
289 init_page_count(page);
290 __free_page(page);
291 totalhigh_pages++;
292 }
293} 287}
294 288
295void __init mem_init(void) 289void __init mem_init(void)
@@ -347,8 +341,6 @@ void __init mem_init(void)
347 map_high_region(start_pfn, end_pfn); 341 map_high_region(start_pfn, end_pfn);
348 } 342 }
349 343
350 totalram_pages += totalhigh_pages;
351
352 codepages = (((unsigned long) &_etext) - ((unsigned long)&_start)); 344 codepages = (((unsigned long) &_etext) - ((unsigned long)&_start));
353 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; 345 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
354 datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext)); 346 datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext));
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 1588d33d5492..6ac99d64a13c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2181,10 +2181,9 @@ unsigned long vmemmap_table[VMEMMAP_SIZE];
2181static long __meminitdata addr_start, addr_end; 2181static long __meminitdata addr_start, addr_end;
2182static int __meminitdata node_start; 2182static int __meminitdata node_start;
2183 2183
2184int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 2184int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2185 int node)
2185{ 2186{
2186 unsigned long vstart = (unsigned long) start;
2187 unsigned long vend = (unsigned long) (start + nr);
2188 unsigned long phys_start = (vstart - VMEMMAP_BASE); 2187 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2189 unsigned long phys_end = (vend - VMEMMAP_BASE); 2188 unsigned long phys_end = (vend - VMEMMAP_BASE);
2190 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; 2189 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
@@ -2236,7 +2235,7 @@ void __meminit vmemmap_populate_print_last(void)
2236 } 2235 }
2237} 2236}
2238 2237
2239void vmemmap_free(struct page *memmap, unsigned long nr_pages) 2238void vmemmap_free(unsigned long start, unsigned long end)
2240{ 2239{
2241} 2240}
2242 2241
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 25877aebc685..0e5343902363 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -16,9 +16,9 @@ config TILE
16 select GENERIC_PENDING_IRQ if SMP 16 select GENERIC_PENDING_IRQ if SMP
17 select GENERIC_IRQ_SHOW 17 select GENERIC_IRQ_SHOW
18 select HAVE_DEBUG_BUGVERBOSE 18 select HAVE_DEBUG_BUGVERBOSE
19 select HAVE_SYSCALL_WRAPPERS if TILEGX
20 select VIRT_TO_BUS 19 select VIRT_TO_BUS
21 select SYS_HYPERVISOR 20 select SYS_HYPERVISOR
21 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
22 select ARCH_HAVE_NMI_SAFE_CMPXCHG 22 select ARCH_HAVE_NMI_SAFE_CMPXCHG
23 select GENERIC_CLOCKEVENTS 23 select GENERIC_CLOCKEVENTS
24 select MODULES_USE_ELF_RELA 24 select MODULES_USE_ELF_RELA
@@ -114,13 +114,6 @@ config STRICT_DEVMEM
114config SMP 114config SMP
115 def_bool y 115 def_bool y
116 116
117# Allow checking for compile-time determined overflow errors in
118# copy_from_user(). There are still unprovable places in the
119# generic code as of 2.6.34, so this option is not really compatible
120# with -Werror, which is more useful in general.
121config DEBUG_COPY_FROM_USER
122 def_bool n
123
124config HVC_TILE 117config HVC_TILE
125 depends on TTY 118 depends on TTY
126 select HVC_DRIVER 119 select HVC_DRIVER
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h
index 0f885af2b621..3257733003f8 100644
--- a/arch/tile/include/asm/hugetlb.h
+++ b/arch/tile/include/asm/hugetlb.h
@@ -16,6 +16,7 @@
16#define _ASM_TILE_HUGETLB_H 16#define _ASM_TILE_HUGETLB_H
17 17
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm-generic/hugetlb.h>
19 20
20 21
21static inline int is_hugepage_only_range(struct mm_struct *mm, 22static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index e9c670d7a7fe..ccc8ef37235c 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -153,8 +153,6 @@ extern void _cpu_idle(void);
153#define TS_POLLING 0x0004 /* in idle loop but not sleeping */ 153#define TS_POLLING 0x0004 /* in idle loop but not sleeping */
154#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ 154#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */
155 155
156#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
157
158#ifndef __ASSEMBLY__ 156#ifndef __ASSEMBLY__
159#define HAVE_SET_RESTORE_SIGMASK 1 157#define HAVE_SET_RESTORE_SIGMASK 1
160static inline void set_restore_sigmask(void) 158static inline void set_restore_sigmask(void)
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index 9ab078a4605d..8a082bc6bca5 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -395,7 +395,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
395 return n; 395 return n;
396} 396}
397 397
398#ifdef CONFIG_DEBUG_COPY_FROM_USER 398#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
399/*
400 * There are still unprovable places in the generic code as of 2.6.34, so this
401 * option is not really compatible with -Werror, which is more useful in
402 * general.
403 */
399extern void copy_from_user_overflow(void) 404extern void copy_from_user_overflow(void)
400 __compiletime_warning("copy_from_user() size is not provably correct"); 405 __compiletime_warning("copy_from_user() size is not provably correct");
401 406
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 6ea4cdb3c6a0..ed378416b86a 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -56,12 +56,6 @@ COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
56 return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low); 56 return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
57} 57}
58 58
59COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, low, u32, high,
60 char __user *, buf, size_t, len)
61{
62 return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len);
63}
64
65COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags, 59COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
66 u32, offset_lo, u32, offset_hi, 60 u32, offset_lo, u32, offset_hi,
67 u32, nbytes_lo, u32, nbytes_hi) 61 u32, nbytes_lo, u32, nbytes_hi)
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index afb9c9a0d887..34d72a151bf3 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/irqflags.h> 19#include <linux/irqflags.h>
20#include <linux/printk.h>
20#include <asm/setup.h> 21#include <asm/setup.h>
21#include <hv/hypervisor.h> 22#include <hv/hypervisor.h>
22 23
@@ -33,25 +34,8 @@ static struct console early_hv_console = {
33}; 34};
34 35
35/* Direct interface for emergencies */ 36/* Direct interface for emergencies */
36static struct console *early_console = &early_hv_console;
37static int early_console_initialized;
38static int early_console_complete; 37static int early_console_complete;
39 38
40static void early_vprintk(const char *fmt, va_list ap)
41{
42 char buf[512];
43 int n = vscnprintf(buf, sizeof(buf), fmt, ap);
44 early_console->write(early_console, buf, n);
45}
46
47void early_printk(const char *fmt, ...)
48{
49 va_list ap;
50 va_start(ap, fmt);
51 early_vprintk(fmt, ap);
52 va_end(ap);
53}
54
55void early_panic(const char *fmt, ...) 39void early_panic(const char *fmt, ...)
56{ 40{
57 va_list ap; 41 va_list ap;
@@ -69,14 +53,13 @@ static int __initdata keep_early;
69 53
70static int __init setup_early_printk(char *str) 54static int __init setup_early_printk(char *str)
71{ 55{
72 if (early_console_initialized) 56 if (early_console)
73 return 1; 57 return 1;
74 58
75 if (str != NULL && strncmp(str, "keep", 4) == 0) 59 if (str != NULL && strncmp(str, "keep", 4) == 0)
76 keep_early = 1; 60 keep_early = 1;
77 61
78 early_console = &early_hv_console; 62 early_console = &early_hv_console;
79 early_console_initialized = 1;
80 register_console(early_console); 63 register_console(early_console);
81 64
82 return 0; 65 return 0;
@@ -85,12 +68,12 @@ static int __init setup_early_printk(char *str)
85void __init disable_early_printk(void) 68void __init disable_early_printk(void)
86{ 69{
87 early_console_complete = 1; 70 early_console_complete = 1;
88 if (!early_console_initialized || !early_console) 71 if (!early_console)
89 return; 72 return;
90 if (!keep_early) { 73 if (!keep_early) {
91 early_printk("disabling early console\n"); 74 early_printk("disabling early console\n");
92 unregister_console(early_console); 75 unregister_console(early_console);
93 early_console_initialized = 0; 76 early_console = NULL;
94 } else { 77 } else {
95 early_printk("keeping early console\n"); 78 early_printk("keeping early console\n");
96 } 79 }
@@ -98,7 +81,7 @@ void __init disable_early_printk(void)
98 81
99void warn_early_printk(void) 82void warn_early_printk(void)
100{ 83{
101 if (early_console_complete || early_console_initialized) 84 if (early_console_complete || early_console)
102 return; 85 return;
103 early_printk("\ 86 early_printk("\
104Machine shutting down before console output is fully initialized.\n\ 87Machine shutting down before console output is fully initialized.\n\
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index caf93ae11793..8ac304484f98 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -40,13 +40,11 @@
40#include <arch/abi.h> 40#include <arch/abi.h>
41#include <arch/sim_def.h> 41#include <arch/sim_def.h>
42 42
43
44/* 43/*
45 * Use the (x86) "idle=poll" option to prefer low latency when leaving the 44 * Use the (x86) "idle=poll" option to prefer low latency when leaving the
46 * idle loop over low power while in the idle loop, e.g. if we have 45 * idle loop over low power while in the idle loop, e.g. if we have
47 * one thread per core and we want to get threads out of futex waits fast. 46 * one thread per core and we want to get threads out of futex waits fast.
48 */ 47 */
49static int no_idle_nap;
50static int __init idle_setup(char *str) 48static int __init idle_setup(char *str)
51{ 49{
52 if (!str) 50 if (!str)
@@ -54,64 +52,19 @@ static int __init idle_setup(char *str)
54 52
55 if (!strcmp(str, "poll")) { 53 if (!strcmp(str, "poll")) {
56 pr_info("using polling idle threads.\n"); 54 pr_info("using polling idle threads.\n");
57 no_idle_nap = 1; 55 cpu_idle_poll_ctrl(true);
58 } else if (!strcmp(str, "halt")) 56 return 0;
59 no_idle_nap = 0; 57 } else if (!strcmp(str, "halt")) {
60 else 58 return 0;
61 return -1; 59 }
62 60 return -1;
63 return 0;
64} 61}
65early_param("idle", idle_setup); 62early_param("idle", idle_setup);
66 63
67/* 64void arch_cpu_idle(void)
68 * The idle thread. There's no useful work to be
69 * done, so just try to conserve power and have a
70 * low exit latency (ie sit in a loop waiting for
71 * somebody to say that they'd like to reschedule)
72 */
73void cpu_idle(void)
74{ 65{
75 int cpu = smp_processor_id(); 66 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
76 67 _cpu_idle();
77
78 current_thread_info()->status |= TS_POLLING;
79
80 if (no_idle_nap) {
81 while (1) {
82 while (!need_resched())
83 cpu_relax();
84 schedule();
85 }
86 }
87
88 /* endless idle loop with no priority at all */
89 while (1) {
90 tick_nohz_idle_enter();
91 rcu_idle_enter();
92 while (!need_resched()) {
93 if (cpu_is_offline(cpu))
94 BUG(); /* no HOTPLUG_CPU */
95
96 local_irq_disable();
97 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
98 current_thread_info()->status &= ~TS_POLLING;
99 /*
100 * TS_POLLING-cleared state must be visible before we
101 * test NEED_RESCHED:
102 */
103 smp_mb();
104
105 if (!need_resched())
106 _cpu_idle();
107 else
108 local_irq_enable();
109 current_thread_info()->status |= TS_POLLING;
110 }
111 rcu_idle_exit();
112 tick_nohz_idle_exit();
113 schedule_preempt_disabled();
114 }
115} 68}
116 69
117/* 70/*
@@ -620,8 +573,7 @@ void show_regs(struct pt_regs *regs)
620 int i; 573 int i;
621 574
622 pr_err("\n"); 575 pr_err("\n");
623 pr_err(" Pid: %d, comm: %20s, CPU: %d\n", 576 show_regs_print_info(KERN_ERR);
624 tsk->pid, tsk->comm, smp_processor_id());
625#ifdef __tilegx__ 577#ifdef __tilegx__
626 for (i = 0; i < 51; i += 3) 578 for (i = 0; i < 51; i += 3)
627 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", 579 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index e686c5ac90be..44bab29bf2f3 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -207,9 +207,7 @@ void __cpuinit online_secondary(void)
207 /* Set up tile-timer clock-event device on this cpu */ 207 /* Set up tile-timer clock-event device on this cpu */
208 setup_tile_timer(); 208 setup_tile_timer();
209 209
210 preempt_enable(); 210 cpu_startup_entry(CPUHP_ONLINE);
211
212 cpu_idle();
213} 211}
214 212
215int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 213int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
diff --git a/arch/tile/lib/uaccess.c b/arch/tile/lib/uaccess.c
index f8d398c9ee7f..030abe3ee4f1 100644
--- a/arch/tile/lib/uaccess.c
+++ b/arch/tile/lib/uaccess.c
@@ -22,11 +22,3 @@ int __range_ok(unsigned long addr, unsigned long size)
22 is_arch_mappable_range(addr, size)); 22 is_arch_mappable_range(addr, size));
23} 23}
24EXPORT_SYMBOL(__range_ok); 24EXPORT_SYMBOL(__range_ok);
25
26#ifdef CONFIG_DEBUG_COPY_FROM_USER
27void copy_from_user_overflow(void)
28{
29 WARN(1, "Buffer overflow detected!\n");
30}
31EXPORT_SYMBOL(copy_from_user_overflow);
32#endif
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index b3b4972c2451..dfd63ce87327 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
592 in parallel. Reuse of the virtual address is prevented by 592 in parallel. Reuse of the virtual address is prevented by
593 leaving it in the global lists until we're done with it. 593 leaving it in the global lists until we're done with it.
594 cpa takes care of the direct mappings. */ 594 cpa takes care of the direct mappings. */
595 read_lock(&vmlist_lock); 595 p = find_vm_area((void *)addr);
596 for (p = vmlist; p; p = p->next) {
597 if (p->addr == addr)
598 break;
599 }
600 read_unlock(&vmlist_lock);
601 596
602 if (!p) { 597 if (!p) {
603 pr_err("iounmap: bad address %p\n", addr); 598 pr_err("iounmap: bad address %p\n", addr);
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 80b47cb71e0a..acbe6c67afba 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -568,11 +568,7 @@ void chan_interrupt(struct line *line, int irq)
568 reactivate_fd(chan->fd, irq); 568 reactivate_fd(chan->fd, irq);
569 if (err == -EIO) { 569 if (err == -EIO) {
570 if (chan->primary) { 570 if (chan->primary) {
571 struct tty_struct *tty = tty_port_tty_get(&line->port); 571 tty_port_tty_hangup(&line->port, false);
572 if (tty != NULL) {
573 tty_hangup(tty);
574 tty_kref_put(tty);
575 }
576 if (line->chan_out != chan) 572 if (line->chan_out != chan)
577 close_one_chan(line->chan_out, 1); 573 close_one_chan(line->chan_out, 1);
578 } 574 }
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index be541cf69fd2..8035145f043b 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -248,7 +248,6 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
248{ 248{
249 struct chan *chan = data; 249 struct chan *chan = data;
250 struct line *line = chan->line; 250 struct line *line = chan->line;
251 struct tty_struct *tty;
252 int err; 251 int err;
253 252
254 /* 253 /*
@@ -267,12 +266,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
267 } 266 }
268 spin_unlock(&line->lock); 267 spin_unlock(&line->lock);
269 268
270 tty = tty_port_tty_get(&line->port); 269 tty_port_tty_wakeup(&line->port);
271 if (tty == NULL)
272 return IRQ_NONE;
273
274 tty_wakeup(tty);
275 tty_kref_put(tty);
276 270
277 return IRQ_HANDLED; 271 return IRQ_HANDLED;
278} 272}
diff --git a/arch/um/kernel/early_printk.c b/arch/um/kernel/early_printk.c
index 49480f092456..4a0800bc37b2 100644
--- a/arch/um/kernel/early_printk.c
+++ b/arch/um/kernel/early_printk.c
@@ -16,7 +16,7 @@ static void early_console_write(struct console *con, const char *s, unsigned int
16 um_early_printk(s, n); 16 um_early_printk(s, n);
17} 17}
18 18
19static struct console early_console = { 19static struct console early_console_dev = {
20 .name = "earlycon", 20 .name = "earlycon",
21 .write = early_console_write, 21 .write = early_console_write,
22 .flags = CON_BOOT, 22 .flags = CON_BOOT,
@@ -25,8 +25,10 @@ static struct console early_console = {
25 25
26static int __init setup_early_printk(char *buf) 26static int __init setup_early_printk(char *buf)
27{ 27{
28 register_console(&early_console); 28 if (!early_console) {
29 29 early_console = &early_console_dev;
30 register_console(&early_console_dev);
31 }
30 return 0; 32 return 0;
31} 33}
32 34
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 5abcbfbe7e25..9df292b270a8 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -42,17 +42,12 @@ static unsigned long brk_end;
42static void setup_highmem(unsigned long highmem_start, 42static void setup_highmem(unsigned long highmem_start,
43 unsigned long highmem_len) 43 unsigned long highmem_len)
44{ 44{
45 struct page *page;
46 unsigned long highmem_pfn; 45 unsigned long highmem_pfn;
47 int i; 46 int i;
48 47
49 highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT; 48 highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
50 for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) { 49 for (i = 0; i < highmem_len >> PAGE_SHIFT; i++)
51 page = &mem_map[highmem_pfn + i]; 50 free_highmem_page(&mem_map[highmem_pfn + i]);
52 ClearPageReserved(page);
53 init_page_count(page);
54 __free_page(page);
55 }
56} 51}
57#endif 52#endif
58 53
@@ -73,18 +68,13 @@ void __init mem_init(void)
73 totalram_pages = free_all_bootmem(); 68 totalram_pages = free_all_bootmem();
74 max_low_pfn = totalram_pages; 69 max_low_pfn = totalram_pages;
75#ifdef CONFIG_HIGHMEM 70#ifdef CONFIG_HIGHMEM
76 totalhigh_pages = highmem >> PAGE_SHIFT; 71 setup_highmem(end_iomem, highmem);
77 totalram_pages += totalhigh_pages;
78#endif 72#endif
79 num_physpages = totalram_pages; 73 num_physpages = totalram_pages;
80 max_pfn = totalram_pages; 74 max_pfn = totalram_pages;
81 printk(KERN_INFO "Memory: %luk available\n", 75 printk(KERN_INFO "Memory: %luk available\n",
82 nr_free_pages() << (PAGE_SHIFT-10)); 76 nr_free_pages() << (PAGE_SHIFT-10));
83 kmalloc_ok = 1; 77 kmalloc_ok = 1;
84
85#ifdef CONFIG_HIGHMEM
86 setup_highmem(end_iomem, highmem);
87#endif
88} 78}
89 79
90/* 80/*
@@ -254,15 +244,7 @@ void free_initmem(void)
254#ifdef CONFIG_BLK_DEV_INITRD 244#ifdef CONFIG_BLK_DEV_INITRD
255void free_initrd_mem(unsigned long start, unsigned long end) 245void free_initrd_mem(unsigned long start, unsigned long end)
256{ 246{
257 if (start < end) 247 free_reserved_area(start, end, 0, "initrd");
258 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
259 (end - start) >> 10);
260 for (; start < end; start += PAGE_SIZE) {
261 ClearPageReserved(virt_to_page(start));
262 init_page_count(virt_to_page(start));
263 free_page(start);
264 totalram_pages++;
265 }
266} 248}
267#endif 249#endif
268 250
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index b462b13c5bae..bbcef522bcb1 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -210,33 +210,14 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
210 kmalloc_ok = save_kmalloc_ok; 210 kmalloc_ok = save_kmalloc_ok;
211} 211}
212 212
213void default_idle(void) 213void arch_cpu_idle(void)
214{ 214{
215 unsigned long long nsecs; 215 unsigned long long nsecs;
216 216
217 while (1) {
218 /* endless idle loop with no priority at all */
219
220 /*
221 * although we are an idle CPU, we do not want to
222 * get into the scheduler unnecessarily.
223 */
224 if (need_resched())
225 schedule();
226
227 tick_nohz_idle_enter();
228 rcu_idle_enter();
229 nsecs = disable_timer();
230 idle_sleep(nsecs);
231 rcu_idle_exit();
232 tick_nohz_idle_exit();
233 }
234}
235
236void cpu_idle(void)
237{
238 cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); 217 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
239 default_idle(); 218 nsecs = disable_timer();
219 idle_sleep(nsecs);
220 local_irq_enable();
240} 221}
241 222
242int __cant_sleep(void) { 223int __cant_sleep(void) {
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index e562ff80409a..7d101a2a1541 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -35,18 +35,6 @@ void show_trace(struct task_struct *task, unsigned long * stack)
35} 35}
36#endif 36#endif
37 37
38/*
39 * stack dumps generator - this is used by arch-independent code.
40 * And this is identical to i386 currently.
41 */
42void dump_stack(void)
43{
44 unsigned long stack;
45
46 show_trace(current, &stack);
47}
48EXPORT_SYMBOL(dump_stack);
49
50/*Stolen from arch/i386/kernel/traps.c */ 38/*Stolen from arch/i386/kernel/traps.c */
51static const int kstack_depth_to_print = 24; 39static const int kstack_depth_to_print = 24;
52 40
diff --git a/arch/um/sys-ppc/sysrq.c b/arch/um/sys-ppc/sysrq.c
index f889449f9285..1ff1ad7f27da 100644
--- a/arch/um/sys-ppc/sysrq.c
+++ b/arch/um/sys-ppc/sysrq.c
@@ -11,6 +11,8 @@
11void show_regs(struct pt_regs_subarch *regs) 11void show_regs(struct pt_regs_subarch *regs)
12{ 12{
13 printk("\n"); 13 printk("\n");
14 show_regs_print_info(KERN_DEFAULT);
15
14 printk("show_regs(): insert regs here.\n"); 16 printk("show_regs(): insert regs here.\n");
15#if 0 17#if 0
16 printk("\n"); 18 printk("\n");
diff --git a/arch/unicore32/kernel/Makefile b/arch/unicore32/kernel/Makefile
index fa497e0efe5a..607a72f2ae35 100644
--- a/arch/unicore32/kernel/Makefile
+++ b/arch/unicore32/kernel/Makefile
@@ -9,7 +9,6 @@ obj-y += setup.o signal.o sys.o stacktrace.o traps.o
9obj-$(CONFIG_MODULES) += ksyms.o module.o 9obj-$(CONFIG_MODULES) += ksyms.o module.o
10obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 10obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
11 11
12obj-$(CONFIG_CPU_FREQ) += cpu-ucv2.o
13obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o 12obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o
14 13
15# obj-y for architecture PKUnity v3 14# obj-y for architecture PKUnity v3
diff --git a/arch/unicore32/kernel/cpu-ucv2.c b/arch/unicore32/kernel/cpu-ucv2.c
deleted file mode 100644
index 4a99f62584c7..000000000000
--- a/arch/unicore32/kernel/cpu-ucv2.c
+++ /dev/null
@@ -1,93 +0,0 @@
1/*
2 * linux/arch/unicore32/kernel/cpu-ucv2.c: clock scaling for the UniCore-II
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
7 * Copyright (C) 2001-2010 Guan Xuetao
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/init.h>
17#include <linux/clk.h>
18#include <linux/cpufreq.h>
19
20#include <mach/hardware.h>
21
22static struct cpufreq_driver ucv2_driver;
23
24/* make sure that only the "userspace" governor is run
25 * -- anything else wouldn't make sense on this platform, anyway.
26 */
27int ucv2_verify_speed(struct cpufreq_policy *policy)
28{
29 if (policy->cpu)
30 return -EINVAL;
31
32 cpufreq_verify_within_limits(policy,
33 policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
34
35 return 0;
36}
37
38static unsigned int ucv2_getspeed(unsigned int cpu)
39{
40 struct clk *mclk = clk_get(NULL, "MAIN_CLK");
41
42 if (cpu)
43 return 0;
44 return clk_get_rate(mclk)/1000;
45}
46
47static int ucv2_target(struct cpufreq_policy *policy,
48 unsigned int target_freq,
49 unsigned int relation)
50{
51 unsigned int cur = ucv2_getspeed(0);
52 struct cpufreq_freqs freqs;
53 struct clk *mclk = clk_get(NULL, "MAIN_CLK");
54
55 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
56
57 if (!clk_set_rate(mclk, target_freq * 1000)) {
58 freqs.old = cur;
59 freqs.new = target_freq;
60 freqs.cpu = 0;
61 }
62
63 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
64
65 return 0;
66}
67
68static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
69{
70 if (policy->cpu != 0)
71 return -EINVAL;
72 policy->cur = ucv2_getspeed(0);
73 policy->min = policy->cpuinfo.min_freq = 250000;
74 policy->max = policy->cpuinfo.max_freq = 1000000;
75 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
76 return 0;
77}
78
79static struct cpufreq_driver ucv2_driver = {
80 .flags = CPUFREQ_STICKY,
81 .verify = ucv2_verify_speed,
82 .target = ucv2_target,
83 .get = ucv2_getspeed,
84 .init = ucv2_cpu_init,
85 .name = "UniCore-II",
86};
87
88static int __init ucv2_cpufreq_init(void)
89{
90 return cpufreq_register_driver(&ucv2_driver);
91}
92
93arch_initcall(ucv2_cpufreq_init);
diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c
index 3922255f1fa8..9be0d5d02a9a 100644
--- a/arch/unicore32/kernel/early_printk.c
+++ b/arch/unicore32/kernel/early_printk.c
@@ -33,21 +33,17 @@ static struct console early_ocd_console = {
33 .index = -1, 33 .index = -1,
34}; 34};
35 35
36/* Direct interface for emergencies */
37static struct console *early_console = &early_ocd_console;
38
39static int __initdata keep_early;
40
41static int __init setup_early_printk(char *buf) 36static int __init setup_early_printk(char *buf)
42{ 37{
43 if (!buf) 38 int keep_early;
39
40 if (!buf || early_console)
44 return 0; 41 return 0;
45 42
46 if (strstr(buf, "keep")) 43 if (strstr(buf, "keep"))
47 keep_early = 1; 44 keep_early = 1;
48 45
49 if (!strncmp(buf, "ocd", 3)) 46 early_console = &early_ocd_console;
50 early_console = &early_ocd_console;
51 47
52 if (keep_early) 48 if (keep_early)
53 early_console->flags &= ~CON_BOOT; 49 early_console->flags &= ~CON_BOOT;
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 872d7e22d847..c9447691bdac 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -45,25 +45,10 @@ static const char * const processor_modes[] = {
45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" 45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR"
46}; 46};
47 47
48void cpu_idle(void) 48void arch_cpu_idle(void)
49{ 49{
50 /* endless idle loop with no priority at all */ 50 cpu_do_idle();
51 while (1) { 51 local_irq_enable();
52 tick_nohz_idle_enter();
53 rcu_idle_enter();
54 while (!need_resched()) {
55 local_irq_disable();
56 stop_critical_timings();
57 cpu_do_idle();
58 local_irq_enable();
59 start_critical_timings();
60 }
61 rcu_idle_exit();
62 tick_nohz_idle_exit();
63 preempt_enable_no_resched();
64 schedule();
65 preempt_disable();
66 }
67} 52}
68 53
69static char reboot_mode = 'h'; 54static char reboot_mode = 'h';
@@ -159,11 +144,7 @@ void __show_regs(struct pt_regs *regs)
159 unsigned long flags; 144 unsigned long flags;
160 char buf[64]; 145 char buf[64];
161 146
162 printk(KERN_DEFAULT "CPU: %d %s (%s %.*s)\n", 147 show_regs_print_info(KERN_DEFAULT);
163 raw_smp_processor_id(), print_tainted(),
164 init_utsname()->release,
165 (int)strcspn(init_utsname()->version, " "),
166 init_utsname()->version);
167 print_symbol("PC is at %s\n", instruction_pointer(regs)); 148 print_symbol("PC is at %s\n", instruction_pointer(regs));
168 print_symbol("LR is at %s\n", regs->UCreg_lr); 149 print_symbol("LR is at %s\n", regs->UCreg_lr);
169 printk(KERN_DEFAULT "pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" 150 printk(KERN_DEFAULT "pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
index 0870b68d2ad9..c54e32410ead 100644
--- a/arch/unicore32/kernel/traps.c
+++ b/arch/unicore32/kernel/traps.c
@@ -170,12 +170,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
170 c_backtrace(fp, mode); 170 c_backtrace(fp, mode);
171} 171}
172 172
173void dump_stack(void)
174{
175 dump_backtrace(NULL, NULL);
176}
177EXPORT_SYMBOL(dump_stack);
178
179void show_stack(struct task_struct *tsk, unsigned long *sp) 173void show_stack(struct task_struct *tsk, unsigned long *sp)
180{ 174{
181 dump_backtrace(NULL, tsk); 175 dump_backtrace(NULL, tsk);
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index de186bde8975..63df12d71ce3 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -66,6 +66,9 @@ void show_mem(unsigned int filter)
66 printk(KERN_DEFAULT "Mem-info:\n"); 66 printk(KERN_DEFAULT "Mem-info:\n");
67 show_free_areas(filter); 67 show_free_areas(filter);
68 68
69 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
70 return;
71
69 for_each_bank(i, mi) { 72 for_each_bank(i, mi) {
70 struct membank *bank = &mi->bank[i]; 73 struct membank *bank = &mi->bank[i];
71 unsigned int pfn1, pfn2; 74 unsigned int pfn1, pfn2;
@@ -313,24 +316,6 @@ void __init bootmem_init(void)
313 max_pfn = max_high - PHYS_PFN_OFFSET; 316 max_pfn = max_high - PHYS_PFN_OFFSET;
314} 317}
315 318
316static inline int free_area(unsigned long pfn, unsigned long end, char *s)
317{
318 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
319
320 for (; pfn < end; pfn++) {
321 struct page *page = pfn_to_page(pfn);
322 ClearPageReserved(page);
323 init_page_count(page);
324 __free_page(page);
325 pages++;
326 }
327
328 if (size && s)
329 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
330
331 return pages;
332}
333
334static inline void 319static inline void
335free_memmap(unsigned long start_pfn, unsigned long end_pfn) 320free_memmap(unsigned long start_pfn, unsigned long end_pfn)
336{ 321{
@@ -404,9 +389,9 @@ void __init mem_init(void)
404 389
405 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 390 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
406 391
407 /* this will put all unused low memory onto the freelists */
408 free_unused_memmap(&meminfo); 392 free_unused_memmap(&meminfo);
409 393
394 /* this will put all unused low memory onto the freelists */
410 totalram_pages += free_all_bootmem(); 395 totalram_pages += free_all_bootmem();
411 396
412 reserved_pages = free_pages = 0; 397 reserved_pages = free_pages = 0;
@@ -491,9 +476,7 @@ void __init mem_init(void)
491 476
492void free_initmem(void) 477void free_initmem(void)
493{ 478{
494 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 479 free_initmem_default(0);
495 __phys_to_pfn(__pa(__init_end)),
496 "init");
497} 480}
498 481
499#ifdef CONFIG_BLK_DEV_INITRD 482#ifdef CONFIG_BLK_DEV_INITRD
@@ -503,9 +486,7 @@ static int keep_initrd;
503void free_initrd_mem(unsigned long start, unsigned long end) 486void free_initrd_mem(unsigned long start, unsigned long end)
504{ 487{
505 if (!keep_initrd) 488 if (!keep_initrd)
506 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 489 free_reserved_area(start, end, 0, "initrd");
507 __phys_to_pfn(__pa(end)),
508 "initrd");
509} 490}
510 491
511static int __init keepinitrd_setup(char *__unused) 492static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
index b7a605597b08..13068ee22f33 100644
--- a/arch/unicore32/mm/ioremap.c
+++ b/arch/unicore32/mm/ioremap.c
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
235void __uc32_iounmap(volatile void __iomem *io_addr) 235void __uc32_iounmap(volatile void __iomem *io_addr)
236{ 236{
237 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 237 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
238 struct vm_struct **p, *tmp; 238 struct vm_struct *vm;
239 239
240 /* 240 /*
241 * If this is a section based mapping we need to handle it 241 * If this is a section based mapping we need to handle it
@@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
244 * all the mappings before the area can be reclaimed 244 * all the mappings before the area can be reclaimed
245 * by someone else. 245 * by someone else.
246 */ 246 */
247 write_lock(&vmlist_lock); 247 vm = find_vm_area(addr);
248 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 248 if (vm && (vm->flags & VM_IOREMAP) &&
249 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { 249 (vm->flags & VM_UNICORE_SECTION_MAPPING))
250 if (tmp->flags & VM_UNICORE_SECTION_MAPPING) { 250 unmap_area_sections((unsigned long)vm->addr, vm->size);
251 unmap_area_sections((unsigned long)tmp->addr,
252 tmp->size);
253 }
254 break;
255 }
256 }
257 write_unlock(&vmlist_lock);
258 251
259 vunmap(addr); 252 vunmap(addr);
260} 253}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 15b5cef4aa38..5db2117ae288 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -20,6 +20,7 @@ config X86_64
20### Arch settings 20### Arch settings
21config X86 21config X86
22 def_bool y 22 def_bool y
23 select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
23 select HAVE_AOUT if X86_32 24 select HAVE_AOUT if X86_32
24 select HAVE_UNSTABLE_SCHED_CLOCK 25 select HAVE_UNSTABLE_SCHED_CLOCK
25 select ARCH_SUPPORTS_NUMA_BALANCING 26 select ARCH_SUPPORTS_NUMA_BALANCING
@@ -120,6 +121,7 @@ config X86
120 select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION 121 select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
121 select OLD_SIGACTION if X86_32 122 select OLD_SIGACTION if X86_32
122 select COMPAT_OLD_SIGACTION if IA32_EMULATION 123 select COMPAT_OLD_SIGACTION if IA32_EMULATION
124 select RTC_LIB
123 125
124config INSTRUCTION_DECODER 126config INSTRUCTION_DECODER
125 def_bool y 127 def_bool y
@@ -188,9 +190,6 @@ config GENERIC_CALIBRATE_DELAY
188config ARCH_HAS_CPU_RELAX 190config ARCH_HAS_CPU_RELAX
189 def_bool y 191 def_bool y
190 192
191config ARCH_HAS_DEFAULT_IDLE
192 def_bool y
193
194config ARCH_HAS_CACHE_LINE_SIZE 193config ARCH_HAS_CACHE_LINE_SIZE
195 def_bool y 194 def_bool y
196 195
@@ -389,7 +388,7 @@ config X86_NUMACHIP
389 388
390config X86_VSMP 389config X86_VSMP
391 bool "ScaleMP vSMP" 390 bool "ScaleMP vSMP"
392 select PARAVIRT_GUEST 391 select HYPERVISOR_GUEST
393 select PARAVIRT 392 select PARAVIRT
394 depends on X86_64 && PCI 393 depends on X86_64 && PCI
395 depends on X86_EXTENDED_PLATFORM 394 depends on X86_EXTENDED_PLATFORM
@@ -596,44 +595,17 @@ config SCHED_OMIT_FRAME_POINTER
596 595
597 If in doubt, say "Y". 596 If in doubt, say "Y".
598 597
599menuconfig PARAVIRT_GUEST 598menuconfig HYPERVISOR_GUEST
600 bool "Paravirtualized guest support" 599 bool "Linux guest support"
601 ---help---
602 Say Y here to get to see options related to running Linux under
603 various hypervisors. This option alone does not add any kernel code.
604
605 If you say N, all options in this submenu will be skipped and disabled.
606
607if PARAVIRT_GUEST
608
609config PARAVIRT_TIME_ACCOUNTING
610 bool "Paravirtual steal time accounting"
611 select PARAVIRT
612 default n
613 ---help--- 600 ---help---
614 Select this option to enable fine granularity task steal time 601 Say Y here to enable options for running Linux under various hyper-
615 accounting. Time spent executing other tasks in parallel with 602 visors. This option enables basic hypervisor detection and platform
616 the current vCPU is discounted from the vCPU power. To account for 603 setup.
617 that, there can be a small performance impact.
618
619 If in doubt, say N here.
620
621source "arch/x86/xen/Kconfig"
622 604
623config KVM_GUEST 605 If you say N, all options in this submenu will be skipped and
624 bool "KVM Guest support (including kvmclock)" 606 disabled, and Linux guest support won't be built in.
625 select PARAVIRT
626 select PARAVIRT
627 select PARAVIRT_CLOCK
628 default y if PARAVIRT_GUEST
629 ---help---
630 This option enables various optimizations for running under the KVM
631 hypervisor. It includes a paravirtualized clock, so that instead
632 of relying on a PIT (or probably other) emulation by the
633 underlying device model, the host provides the guest with
634 timing infrastructure such as time of day, and system time
635 607
636source "arch/x86/lguest/Kconfig" 608if HYPERVISOR_GUEST
637 609
638config PARAVIRT 610config PARAVIRT
639 bool "Enable paravirtualization code" 611 bool "Enable paravirtualization code"
@@ -643,6 +615,13 @@ config PARAVIRT
643 over full virtualization. However, when run without a hypervisor 615 over full virtualization. However, when run without a hypervisor
644 the kernel is theoretically slower and slightly larger. 616 the kernel is theoretically slower and slightly larger.
645 617
618config PARAVIRT_DEBUG
619 bool "paravirt-ops debugging"
620 depends on PARAVIRT && DEBUG_KERNEL
621 ---help---
622 Enable to debug paravirt_ops internals. Specifically, BUG if
623 a paravirt_op is missing when it is called.
624
646config PARAVIRT_SPINLOCKS 625config PARAVIRT_SPINLOCKS
647 bool "Paravirtualization layer for spinlocks" 626 bool "Paravirtualization layer for spinlocks"
648 depends on PARAVIRT && SMP 627 depends on PARAVIRT && SMP
@@ -656,17 +635,38 @@ config PARAVIRT_SPINLOCKS
656 635
657 If you are unsure how to answer this question, answer N. 636 If you are unsure how to answer this question, answer N.
658 637
659config PARAVIRT_CLOCK 638source "arch/x86/xen/Kconfig"
660 bool
661 639
662endif 640config KVM_GUEST
641 bool "KVM Guest support (including kvmclock)"
642 depends on PARAVIRT
643 select PARAVIRT_CLOCK
644 default y
645 ---help---
646 This option enables various optimizations for running under the KVM
647 hypervisor. It includes a paravirtualized clock, so that instead
648 of relying on a PIT (or probably other) emulation by the
649 underlying device model, the host provides the guest with
650 timing infrastructure such as time of day, and system time
663 651
664config PARAVIRT_DEBUG 652source "arch/x86/lguest/Kconfig"
665 bool "paravirt-ops debugging" 653
666 depends on PARAVIRT && DEBUG_KERNEL 654config PARAVIRT_TIME_ACCOUNTING
655 bool "Paravirtual steal time accounting"
656 depends on PARAVIRT
657 default n
667 ---help--- 658 ---help---
668 Enable to debug paravirt_ops internals. Specifically, BUG if 659 Select this option to enable fine granularity task steal time
669 a paravirt_op is missing when it is called. 660 accounting. Time spent executing other tasks in parallel with
661 the current vCPU is discounted from the vCPU power. To account for
662 that, there can be a small performance impact.
663
664 If in doubt, say N here.
665
666config PARAVIRT_CLOCK
667 bool
668
669endif #HYPERVISOR_GUEST
670 670
671config NO_BOOTMEM 671config NO_BOOTMEM
672 def_bool y 672 def_bool y
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index b322f124ee3c..c198b7e13e7b 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -131,7 +131,7 @@ config DOUBLEFAULT
131 131
132config DEBUG_TLBFLUSH 132config DEBUG_TLBFLUSH
133 bool "Set upper limit of TLB entries to flush one-by-one" 133 bool "Set upper limit of TLB entries to flush one-by-one"
134 depends on DEBUG_KERNEL && (X86_64 || X86_INVLPG) 134 depends on DEBUG_KERNEL
135 ---help--- 135 ---help---
136 136
137 X86-only for now. 137 X86-only for now.
@@ -292,20 +292,6 @@ config OPTIMIZE_INLINING
292 292
293 If unsure, say N. 293 If unsure, say N.
294 294
295config DEBUG_STRICT_USER_COPY_CHECKS
296 bool "Strict copy size checks"
297 depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
298 ---help---
299 Enabling this option turns a certain set of sanity checks for user
300 copy operations into compile time failures.
301
302 The copy_from_user() etc checks are there to help test if there
303 are sufficient security checks on the length argument of
304 the copy operation, by having gcc prove that the argument is
305 within bounds.
306
307 If unsure, or if you run an older (pre 4.4) gcc, say N.
308
309config DEBUG_NMI_SELFTEST 295config DEBUG_NMI_SELFTEST
310 bool "NMI Selftest" 296 bool "NMI Selftest"
311 depends on DEBUG_KERNEL && X86_LOCAL_APIC 297 depends on DEBUG_KERNEL && X86_LOCAL_APIC
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index c1d383d1fb7e..16f24e6dad79 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -52,7 +52,7 @@ ENTRY(startup_32)
52 jnz 1f 52 jnz 1f
53 53
54 cli 54 cli
55 movl $(__KERNEL_DS), %eax 55 movl $(__BOOT_DS), %eax
56 movl %eax, %ds 56 movl %eax, %ds
57 movl %eax, %es 57 movl %eax, %es
58 movl %eax, %ss 58 movl %eax, %ss
diff --git a/arch/x86/ia32/Makefile b/arch/x86/ia32/Makefile
index 455646e0e532..e785b422b766 100644
--- a/arch/x86/ia32/Makefile
+++ b/arch/x86/ia32/Makefile
@@ -5,9 +5,6 @@
5obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o 5obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o
6obj-$(CONFIG_IA32_EMULATION) += nosyscall.o syscall_ia32.o 6obj-$(CONFIG_IA32_EMULATION) += nosyscall.o syscall_ia32.o
7 7
8sysv-$(CONFIG_SYSVIPC) := ipc32.o
9obj-$(CONFIG_IA32_EMULATION) += $(sysv-y)
10
11obj-$(CONFIG_IA32_AOUT) += ia32_aout.o 8obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
12 9
13audit-class-$(CONFIG_AUDIT) := audit.o 10audit-class-$(CONFIG_AUDIT) := audit.o
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 03abf9b70011..81e94d972f1b 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -162,7 +162,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
162 fs = get_fs(); 162 fs = get_fs();
163 set_fs(KERNEL_DS); 163 set_fs(KERNEL_DS);
164 has_dumped = 1; 164 has_dumped = 1;
165 current->flags |= PF_DUMPCORE;
166 strncpy(dump.u_comm, current->comm, sizeof(current->comm)); 165 strncpy(dump.u_comm, current->comm, sizeof(current->comm));
167 dump.u_ar0 = offsetof(struct user32, regs); 166 dump.u_ar0 = offsetof(struct user32, regs);
168 dump.signal = signr; 167 dump.signal = signr;
diff --git a/arch/x86/ia32/ipc32.c b/arch/x86/ia32/ipc32.c
deleted file mode 100644
index 29cdcd02ead3..000000000000
--- a/arch/x86/ia32/ipc32.c
+++ /dev/null
@@ -1,54 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <linux/syscalls.h>
5#include <linux/time.h>
6#include <linux/sem.h>
7#include <linux/msg.h>
8#include <linux/shm.h>
9#include <linux/ipc.h>
10#include <linux/compat.h>
11#include <asm/sys_ia32.h>
12
13asmlinkage long sys32_ipc(u32 call, int first, int second, int third,
14 compat_uptr_t ptr, u32 fifth)
15{
16 int version;
17
18 version = call >> 16; /* hack for backward compatibility */
19 call &= 0xffff;
20
21 switch (call) {
22 case SEMOP:
23 /* struct sembuf is the same on 32 and 64bit :)) */
24 return sys_semtimedop(first, compat_ptr(ptr), second, NULL);
25 case SEMTIMEDOP:
26 return compat_sys_semtimedop(first, compat_ptr(ptr), second,
27 compat_ptr(fifth));
28 case SEMGET:
29 return sys_semget(first, second, third);
30 case SEMCTL:
31 return compat_sys_semctl(first, second, third, compat_ptr(ptr));
32
33 case MSGSND:
34 return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
35 case MSGRCV:
36 return compat_sys_msgrcv(first, second, fifth, third,
37 version, compat_ptr(ptr));
38 case MSGGET:
39 return sys_msgget((key_t) first, second);
40 case MSGCTL:
41 return compat_sys_msgctl(first, second, compat_ptr(ptr));
42
43 case SHMAT:
44 return compat_sys_shmat(first, second, third, version,
45 compat_ptr(ptr));
46 case SHMDT:
47 return sys_shmdt(compat_ptr(ptr));
48 case SHMGET:
49 return sys_shmget(first, (unsigned)second, third);
50 case SHMCTL:
51 return compat_sys_shmctl(first, second, compat_ptr(ptr));
52 }
53 return -ENOSYS;
54}
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index ad7a20cbc699..4e4907c67d92 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -166,12 +166,6 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
166 a.offset>>PAGE_SHIFT); 166 a.offset>>PAGE_SHIFT);
167} 167}
168 168
169asmlinkage long sys32_mprotect(unsigned long start, size_t len,
170 unsigned long prot)
171{
172 return sys_mprotect(start, len, prot);
173}
174
175asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr, 169asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
176 int options) 170 int options)
177{ 171{
@@ -194,35 +188,10 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
194} 188}
195 189
196 190
197asmlinkage long sys32_sendfile(int out_fd, int in_fd,
198 compat_off_t __user *offset, s32 count)
199{
200 mm_segment_t old_fs = get_fs();
201 int ret;
202 off_t of;
203
204 if (offset && get_user(of, offset))
205 return -EFAULT;
206
207 set_fs(KERNEL_DS);
208 ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
209 count);
210 set_fs(old_fs);
211
212 if (offset && put_user(of, offset))
213 return -EFAULT;
214 return ret;
215}
216
217/* 191/*
218 * Some system calls that need sign extended arguments. This could be 192 * Some system calls that need sign extended arguments. This could be
219 * done by a generic wrapper. 193 * done by a generic wrapper.
220 */ 194 */
221long sys32_kill(int pid, int sig)
222{
223 return sys_kill(pid, sig);
224}
225
226long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 195long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
227 __u32 len_low, __u32 len_high, int advice) 196 __u32 len_low, __u32 len_high, int advice)
228{ 197{
@@ -246,12 +215,6 @@ long sys32_vm86_warning(void)
246 return -ENOSYS; 215 return -ENOSYS;
247} 216}
248 217
249long sys32_lookup_dcookie(u32 addr_low, u32 addr_high,
250 char __user *buf, size_t len)
251{
252 return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
253}
254
255asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, 218asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
256 size_t count) 219 size_t count)
257{ 220{
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 11e1152222d0..2f03ff018d36 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -37,7 +37,4 @@ do { \
37 37
38#include <asm-generic/bug.h> 38#include <asm-generic/bug.h>
39 39
40
41extern void show_regs_common(void);
42
43#endif /* _ASM_X86_BUG_H */ 40#endif /* _ASM_X86_BUG_H */
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 8d871eaddb66..d47786acb016 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -35,7 +35,7 @@ extern void __add_wrong_size(void)
35 35
36/* 36/*
37 * An exchange-type operation, which takes a value and a pointer, and 37 * An exchange-type operation, which takes a value and a pointer, and
38 * returns a the old value. 38 * returns the old value.
39 */ 39 */
40#define __xchg_op(ptr, arg, op, lock) \ 40#define __xchg_op(ptr, arg, op, lock) \
41 ({ \ 41 ({ \
diff --git a/arch/x86/include/asm/context_tracking.h b/arch/x86/include/asm/context_tracking.h
index 1616562683e9..1fe49704b146 100644
--- a/arch/x86/include/asm/context_tracking.h
+++ b/arch/x86/include/asm/context_tracking.h
@@ -1,31 +1,10 @@
1#ifndef _ASM_X86_CONTEXT_TRACKING_H 1#ifndef _ASM_X86_CONTEXT_TRACKING_H
2#define _ASM_X86_CONTEXT_TRACKING_H 2#define _ASM_X86_CONTEXT_TRACKING_H
3 3
4#ifndef __ASSEMBLY__
5#include <linux/context_tracking.h>
6#include <asm/ptrace.h>
7
8static inline void exception_enter(struct pt_regs *regs)
9{
10 user_exit();
11}
12
13static inline void exception_exit(struct pt_regs *regs)
14{
15#ifdef CONFIG_CONTEXT_TRACKING
16 if (user_mode(regs))
17 user_enter();
18#endif
19}
20
21#else /* __ASSEMBLY__ */
22
23#ifdef CONFIG_CONTEXT_TRACKING 4#ifdef CONFIG_CONTEXT_TRACKING
24# define SCHEDULE_USER call schedule_user 5# define SCHEDULE_USER call schedule_user
25#else 6#else
26# define SCHEDULE_USER call schedule 7# define SCHEDULE_USER call schedule
27#endif 8#endif
28 9
29#endif /* !__ASSEMBLY__ */
30
31#endif 10#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 93fe929d1cee..8010ebc5705f 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -9,6 +9,7 @@
9#endif 9#endif
10 10
11#define NCAPINTS 10 /* N 32-bit words worth of info */ 11#define NCAPINTS 10 /* N 32-bit words worth of info */
12#define NBUGINTS 1 /* N 32-bit bug flags */
12 13
13/* 14/*
14 * Note: If the comment begins with a quoted string, that string is used 15 * Note: If the comment begins with a quoted string, that string is used
@@ -100,6 +101,7 @@
100#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ 101#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
101#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ 102#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
102#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */ 103#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
104#define X86_FEATURE_NONSTOP_TSC_S3 (3*32+30) /* TSC doesn't stop in S3 state */
103 105
104/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 106/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
105#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 107#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
@@ -168,6 +170,7 @@
168#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ 170#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
169#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ 171#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
170#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */ 172#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */
173#define X86_FEATURE_PERFCTR_L2 (6*32+28) /* L2 performance counter extensions */
171 174
172/* 175/*
173 * Auxiliary flags: Linux defined - For features scattered in various 176 * Auxiliary flags: Linux defined - For features scattered in various
@@ -182,6 +185,7 @@
182#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ 185#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
183#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */ 186#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */
184#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */ 187#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
188#define X86_FEATURE_PROC_FEEDBACK (7*32+ 9) /* AMD ProcFeedbackInterface */
185 189
186/* Virtualization flags: Linux defined, word 8 */ 190/* Virtualization flags: Linux defined, word 8 */
187#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 191#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -216,6 +220,17 @@
216#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ 220#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
217#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ 221#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
218 222
223/*
224 * BUG word(s)
225 */
226#define X86_BUG(x) (NCAPINTS*32 + (x))
227
228#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
229#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
230#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
231#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* AMD Erratum 383 */
232#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* AMD Erratum 400 */
233
219#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 234#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
220 235
221#include <asm/asm.h> 236#include <asm/asm.h>
@@ -311,6 +326,7 @@ extern const char * const x86_power_flags[32];
311#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 326#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
312#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 327#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
313#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) 328#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
329#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
314#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 330#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
315#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 331#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
316#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 332#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
@@ -401,6 +417,13 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
401#define static_cpu_has(bit) boot_cpu_has(bit) 417#define static_cpu_has(bit) boot_cpu_has(bit)
402#endif 418#endif
403 419
420#define cpu_has_bug(c, bit) cpu_has(c, (bit))
421#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
422#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit));
423
424#define static_cpu_has_bug(bit) static_cpu_has((bit))
425#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
426
404#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 427#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
405 428
406#endif /* _ASM_X86_CPUFEATURE_H */ 429#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index a09c28571064..0dc7d9e21c34 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -104,12 +104,7 @@ enum fixed_addresses {
104 FIX_LI_PCIA, /* Lithium PCI Bridge A */ 104 FIX_LI_PCIA, /* Lithium PCI Bridge A */
105 FIX_LI_PCIB, /* Lithium PCI Bridge B */ 105 FIX_LI_PCIB, /* Lithium PCI Bridge B */
106#endif 106#endif
107#ifdef CONFIG_X86_F00F_BUG 107 FIX_RO_IDT, /* Virtual mapping for read-only IDT */
108 FIX_F00F_IDT, /* Virtual mapping for IDT */
109#endif
110#ifdef CONFIG_X86_CYCLONE_TIMER
111 FIX_CYCLONE_TIMER, /*cyclone timer register*/
112#endif
113#ifdef CONFIG_X86_32 108#ifdef CONFIG_X86_32
114 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 109 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
115 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 110 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index bdd35dbd0605..a8091216963b 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
2#define _ASM_X86_HUGETLB_H 2#define _ASM_X86_HUGETLB_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
5 6
6 7
7static inline int is_hugepage_only_range(struct mm_struct *mm, 8static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 86095ed14135..2d4b5e6107cd 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -20,13 +20,11 @@
20#ifndef _ASM_X86_HYPERVISOR_H 20#ifndef _ASM_X86_HYPERVISOR_H
21#define _ASM_X86_HYPERVISOR_H 21#define _ASM_X86_HYPERVISOR_H
22 22
23#ifdef CONFIG_HYPERVISOR_GUEST
24
23#include <asm/kvm_para.h> 25#include <asm/kvm_para.h>
24#include <asm/xen/hypervisor.h> 26#include <asm/xen/hypervisor.h>
25 27
26extern void init_hypervisor(struct cpuinfo_x86 *c);
27extern void init_hypervisor_platform(void);
28extern bool hypervisor_x2apic_available(void);
29
30/* 28/*
31 * x86 hypervisor information 29 * x86 hypervisor information
32 */ 30 */
@@ -55,4 +53,12 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
55extern const struct hypervisor_x86 x86_hyper_xen_hvm; 53extern const struct hypervisor_x86 x86_hyper_xen_hvm;
56extern const struct hypervisor_x86 x86_hyper_kvm; 54extern const struct hypervisor_x86 x86_hyper_kvm;
57 55
58#endif 56extern void init_hypervisor(struct cpuinfo_x86 *c);
57extern void init_hypervisor_platform(void);
58extern bool hypervisor_x2apic_available(void);
59#else
60static inline void init_hypervisor(struct cpuinfo_x86 *c) { }
61static inline void init_hypervisor_platform(void) { }
62static inline bool hypervisor_x2apic_available(void) { return false; }
63#endif /* CONFIG_HYPERVISOR_GUEST */
64#endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index f4076af1f4ed..fa5f71e021d5 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -146,13 +146,13 @@ DECLARE_PER_CPU(struct device *, mce_device);
146void mce_intel_feature_init(struct cpuinfo_x86 *c); 146void mce_intel_feature_init(struct cpuinfo_x86 *c);
147void cmci_clear(void); 147void cmci_clear(void);
148void cmci_reenable(void); 148void cmci_reenable(void);
149void cmci_rediscover(int dying); 149void cmci_rediscover(void);
150void cmci_recheck(void); 150void cmci_recheck(void);
151#else 151#else
152static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } 152static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
153static inline void cmci_clear(void) {} 153static inline void cmci_clear(void) {}
154static inline void cmci_reenable(void) {} 154static inline void cmci_reenable(void) {}
155static inline void cmci_rediscover(int dying) {} 155static inline void cmci_rediscover(void) {}
156static inline void cmci_recheck(void) {} 156static inline void cmci_recheck(void) {}
157#endif 157#endif
158 158
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 9264802e2824..cb7502852acb 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -137,11 +137,11 @@ static inline unsigned long long native_read_pmc(int counter)
137 * pointer indirection), this allows gcc to optimize better 137 * pointer indirection), this allows gcc to optimize better
138 */ 138 */
139 139
140#define rdmsr(msr, val1, val2) \ 140#define rdmsr(msr, low, high) \
141do { \ 141do { \
142 u64 __val = native_read_msr((msr)); \ 142 u64 __val = native_read_msr((msr)); \
143 (void)((val1) = (u32)__val); \ 143 (void)((low) = (u32)__val); \
144 (void)((val2) = (u32)(__val >> 32)); \ 144 (void)((high) = (u32)(__val >> 32)); \
145} while (0) 145} while (0)
146 146
147static inline void wrmsr(unsigned msr, unsigned low, unsigned high) 147static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
@@ -162,12 +162,12 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
162} 162}
163 163
164/* rdmsr with exception handling */ 164/* rdmsr with exception handling */
165#define rdmsr_safe(msr, p1, p2) \ 165#define rdmsr_safe(msr, low, high) \
166({ \ 166({ \
167 int __err; \ 167 int __err; \
168 u64 __val = native_read_msr_safe((msr), &__err); \ 168 u64 __val = native_read_msr_safe((msr), &__err); \
169 (*p1) = (u32)__val; \ 169 (*low) = (u32)__val; \
170 (*p2) = (u32)(__val >> 32); \ 170 (*high) = (u32)(__val >> 32); \
171 __err; \ 171 __err; \
172}) 172})
173 173
@@ -208,7 +208,7 @@ do { \
208#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ 208#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
209 (u32)((val) >> 32)) 209 (u32)((val) >> 32))
210 210
211#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) 211#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
212 212
213#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) 213#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
214 214
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8b491e66eaa8..6c896fbe21db 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -48,6 +48,5 @@
48 * arch/x86/kernel/head_64.S), and it is mapped here: 48 * arch/x86/kernel/head_64.S), and it is mapped here:
49 */ 49 */
50#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 50#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
51#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
52 51
53#endif /* _ASM_X86_PAGE_64_DEFS_H */ 52#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 7361e47db79f..cfdc9ee4c900 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -262,10 +262,6 @@ static inline void set_ldt(const void *addr, unsigned entries)
262{ 262{
263 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); 263 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
264} 264}
265static inline void store_gdt(struct desc_ptr *dtr)
266{
267 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
268}
269static inline void store_idt(struct desc_ptr *dtr) 265static inline void store_idt(struct desc_ptr *dtr)
270{ 266{
271 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); 267 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index b3b0ec1dac86..0db1fcac668c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -123,7 +123,7 @@ struct pv_cpu_ops {
123 void (*load_tr_desc)(void); 123 void (*load_tr_desc)(void);
124 void (*load_gdt)(const struct desc_ptr *); 124 void (*load_gdt)(const struct desc_ptr *);
125 void (*load_idt)(const struct desc_ptr *); 125 void (*load_idt)(const struct desc_ptr *);
126 void (*store_gdt)(struct desc_ptr *); 126 /* store_gdt has been removed. */
127 void (*store_idt)(struct desc_ptr *); 127 void (*store_idt)(struct desc_ptr *);
128 void (*set_ldt)(const void *desc, unsigned entries); 128 void (*set_ldt)(const void *desc, unsigned entries);
129 unsigned long (*store_tr)(void); 129 unsigned long (*store_tr)(void);
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 4f7e67e2345e..85e13ccf15c4 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -24,45 +24,45 @@
24#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) 24#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
25#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1)) 25#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
26 26
27#define P4_ESCR_EVENT_MASK 0x7e000000U 27#define P4_ESCR_EVENT_MASK 0x7e000000ULL
28#define P4_ESCR_EVENT_SHIFT 25 28#define P4_ESCR_EVENT_SHIFT 25
29#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U 29#define P4_ESCR_EVENTMASK_MASK 0x01fffe00ULL
30#define P4_ESCR_EVENTMASK_SHIFT 9 30#define P4_ESCR_EVENTMASK_SHIFT 9
31#define P4_ESCR_TAG_MASK 0x000001e0U 31#define P4_ESCR_TAG_MASK 0x000001e0ULL
32#define P4_ESCR_TAG_SHIFT 5 32#define P4_ESCR_TAG_SHIFT 5
33#define P4_ESCR_TAG_ENABLE 0x00000010U 33#define P4_ESCR_TAG_ENABLE 0x00000010ULL
34#define P4_ESCR_T0_OS 0x00000008U 34#define P4_ESCR_T0_OS 0x00000008ULL
35#define P4_ESCR_T0_USR 0x00000004U 35#define P4_ESCR_T0_USR 0x00000004ULL
36#define P4_ESCR_T1_OS 0x00000002U 36#define P4_ESCR_T1_OS 0x00000002ULL
37#define P4_ESCR_T1_USR 0x00000001U 37#define P4_ESCR_T1_USR 0x00000001ULL
38 38
39#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT) 39#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
40#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) 40#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
41#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) 41#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
42 42
43#define P4_CCCR_OVF 0x80000000U 43#define P4_CCCR_OVF 0x80000000ULL
44#define P4_CCCR_CASCADE 0x40000000U 44#define P4_CCCR_CASCADE 0x40000000ULL
45#define P4_CCCR_OVF_PMI_T0 0x04000000U 45#define P4_CCCR_OVF_PMI_T0 0x04000000ULL
46#define P4_CCCR_OVF_PMI_T1 0x08000000U 46#define P4_CCCR_OVF_PMI_T1 0x08000000ULL
47#define P4_CCCR_FORCE_OVF 0x02000000U 47#define P4_CCCR_FORCE_OVF 0x02000000ULL
48#define P4_CCCR_EDGE 0x01000000U 48#define P4_CCCR_EDGE 0x01000000ULL
49#define P4_CCCR_THRESHOLD_MASK 0x00f00000U 49#define P4_CCCR_THRESHOLD_MASK 0x00f00000ULL
50#define P4_CCCR_THRESHOLD_SHIFT 20 50#define P4_CCCR_THRESHOLD_SHIFT 20
51#define P4_CCCR_COMPLEMENT 0x00080000U 51#define P4_CCCR_COMPLEMENT 0x00080000ULL
52#define P4_CCCR_COMPARE 0x00040000U 52#define P4_CCCR_COMPARE 0x00040000ULL
53#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U 53#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000ULL
54#define P4_CCCR_ESCR_SELECT_SHIFT 13 54#define P4_CCCR_ESCR_SELECT_SHIFT 13
55#define P4_CCCR_ENABLE 0x00001000U 55#define P4_CCCR_ENABLE 0x00001000ULL
56#define P4_CCCR_THREAD_SINGLE 0x00010000U 56#define P4_CCCR_THREAD_SINGLE 0x00010000ULL
57#define P4_CCCR_THREAD_BOTH 0x00020000U 57#define P4_CCCR_THREAD_BOTH 0x00020000ULL
58#define P4_CCCR_THREAD_ANY 0x00030000U 58#define P4_CCCR_THREAD_ANY 0x00030000ULL
59#define P4_CCCR_RESERVED 0x00000fffU 59#define P4_CCCR_RESERVED 0x00000fffULL
60 60
61#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) 61#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
62#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) 62#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
63 63
64#define P4_GEN_ESCR_EMASK(class, name, bit) \ 64#define P4_GEN_ESCR_EMASK(class, name, bit) \
65 class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) 65 class##__##name = ((1ULL << bit) << P4_ESCR_EVENTMASK_SHIFT)
66#define P4_ESCR_EMASK_BIT(class, name) class##__##name 66#define P4_ESCR_EMASK_BIT(class, name) class##__##name
67 67
68/* 68/*
@@ -107,7 +107,7 @@
107 * P4_PEBS_CONFIG_MASK and related bits on 107 * P4_PEBS_CONFIG_MASK and related bits on
108 * modification.) 108 * modification.)
109 */ 109 */
110#define P4_CONFIG_ALIASABLE (1 << 9) 110#define P4_CONFIG_ALIASABLE (1ULL << 9)
111 111
112/* 112/*
113 * The bits we allow to pass for RAW events 113 * The bits we allow to pass for RAW events
@@ -784,17 +784,17 @@ enum P4_ESCR_EMASKS {
784 * Note we have UOP and PEBS bits reserved for now 784 * Note we have UOP and PEBS bits reserved for now
785 * just in case if we will need them once 785 * just in case if we will need them once
786 */ 786 */
787#define P4_PEBS_CONFIG_ENABLE (1 << 7) 787#define P4_PEBS_CONFIG_ENABLE (1ULL << 7)
788#define P4_PEBS_CONFIG_UOP_TAG (1 << 8) 788#define P4_PEBS_CONFIG_UOP_TAG (1ULL << 8)
789#define P4_PEBS_CONFIG_METRIC_MASK 0x3f 789#define P4_PEBS_CONFIG_METRIC_MASK 0x3FLL
790#define P4_PEBS_CONFIG_MASK 0xff 790#define P4_PEBS_CONFIG_MASK 0xFFLL
791 791
792/* 792/*
793 * mem: Only counters MSR_IQ_COUNTER4 (16) and 793 * mem: Only counters MSR_IQ_COUNTER4 (16) and
794 * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling 794 * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling
795 */ 795 */
796#define P4_PEBS_ENABLE 0x02000000U 796#define P4_PEBS_ENABLE 0x02000000ULL
797#define P4_PEBS_ENABLE_UOP_TAG 0x01000000U 797#define P4_PEBS_ENABLE_UOP_TAG 0x01000000ULL
798 798
799#define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK) 799#define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK)
800#define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK) 800#define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 567b5d0632b2..e6423002c10b 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -351,7 +351,6 @@ static inline void update_page_count(int level, unsigned long pages) { }
351 * as a pte too. 351 * as a pte too.
352 */ 352 */
353extern pte_t *lookup_address(unsigned long address, unsigned int *level); 353extern pte_t *lookup_address(unsigned long address, unsigned int *level);
354extern int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase);
355extern phys_addr_t slow_virt_to_phys(void *__address); 354extern phys_addr_t slow_virt_to_phys(void *__address);
356 355
357#endif /* !__ASSEMBLY__ */ 356#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3270116b1488..22224b3b43bb 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -91,9 +91,6 @@ struct cpuinfo_x86 {
91 /* Problems on some 486Dx4's and old 386's: */ 91 /* Problems on some 486Dx4's and old 386's: */
92 char hard_math; 92 char hard_math;
93 char rfu; 93 char rfu;
94 char fdiv_bug;
95 char f00f_bug;
96 char coma_bug;
97 char pad0; 94 char pad0;
98#else 95#else
99 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
@@ -107,7 +104,7 @@ struct cpuinfo_x86 {
107 __u32 extended_cpuid_level; 104 __u32 extended_cpuid_level;
108 /* Maximum supported CPUID level, -1=no CPUID: */ 105 /* Maximum supported CPUID level, -1=no CPUID: */
109 int cpuid_level; 106 int cpuid_level;
110 __u32 x86_capability[NCAPINTS]; 107 __u32 x86_capability[NCAPINTS + NBUGINTS];
111 char x86_vendor_id[16]; 108 char x86_vendor_id[16];
112 char x86_model_id[64]; 109 char x86_model_id[64];
113 /* in KB - valid for CPUS which support this call: */ 110 /* in KB - valid for CPUS which support this call: */
@@ -973,26 +970,6 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
973 return ratio; 970 return ratio;
974} 971}
975 972
976/*
977 * AMD errata checking
978 */
979#ifdef CONFIG_CPU_SUP_AMD
980extern const int amd_erratum_383[];
981extern const int amd_erratum_400[];
982extern bool cpu_has_amd_erratum(const int *);
983
984#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
985#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
986#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
987 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
988#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
989#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
990#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
991
992#else
993#define cpu_has_amd_erratum(x) (false)
994#endif /* CONFIG_CPU_SUP_AMD */
995
996extern unsigned long arch_align_stack(unsigned long sp); 973extern unsigned long arch_align_stack(unsigned long sp);
997extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 974extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
998 975
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index 487055c8c1aa..f6064b7385b0 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -15,7 +15,6 @@ struct saved_context {
15 unsigned long cr0, cr2, cr3, cr4; 15 unsigned long cr0, cr2, cr3, cr4;
16 u64 misc_enable; 16 u64 misc_enable;
17 bool misc_enable_saved; 17 bool misc_enable_saved;
18 struct desc_ptr gdt;
19 struct desc_ptr idt; 18 struct desc_ptr idt;
20 u16 ldt; 19 u16 ldt;
21 u16 tss; 20 u16 tss;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 09b0bf104156..97b84e08a211 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -25,9 +25,6 @@ struct saved_context {
25 u64 misc_enable; 25 u64 misc_enable;
26 bool misc_enable_saved; 26 bool misc_enable_saved;
27 unsigned long efer; 27 unsigned long efer;
28 u16 gdt_pad;
29 u16 gdt_limit;
30 unsigned long gdt_base;
31 u16 idt_pad; 28 u16 idt_pad;
32 u16 idt_limit; 29 u16 idt_limit;
33 unsigned long idt_base; 30 unsigned long idt_base;
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 8459efc39686..0ef202e232d6 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -30,23 +30,14 @@ asmlinkage long sys32_fstatat(unsigned int, const char __user *,
30 struct stat64 __user *, int); 30 struct stat64 __user *, int);
31struct mmap_arg_struct32; 31struct mmap_arg_struct32;
32asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *); 32asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *);
33asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long);
34
35asmlinkage long sys32_alarm(unsigned int);
36 33
37asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); 34asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
38asmlinkage long sys32_sysfs(int, u32, u32);
39 35
40asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); 36asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32);
41asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); 37asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
42 38
43asmlinkage long sys32_personality(unsigned long);
44asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
45
46long sys32_kill(int, int);
47long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); 39long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
48long sys32_vm86_warning(void); 40long sys32_vm86_warning(void);
49long sys32_lookup_dcookie(u32, u32, char __user *, size_t);
50 41
51asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); 42asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t);
52asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, 43asmlinkage long sys32_sync_file_range(int, unsigned, unsigned,
@@ -59,9 +50,6 @@ asmlinkage long sys32_fallocate(int, int, unsigned,
59asmlinkage long sys32_sigreturn(void); 50asmlinkage long sys32_sigreturn(void);
60asmlinkage long sys32_rt_sigreturn(void); 51asmlinkage long sys32_rt_sigreturn(void);
61 52
62/* ia32/ipc32.c */
63asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
64
65asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, 53asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
66 const char __user *); 54 const char __user *);
67 55
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 6cf0a9cc60cd..5f87b35fd2ef 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -27,8 +27,8 @@ asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
27long sys_rt_sigreturn(void); 27long sys_rt_sigreturn(void);
28 28
29/* kernel/tls.c */ 29/* kernel/tls.c */
30asmlinkage int sys_set_thread_area(struct user_desc __user *); 30asmlinkage long sys_set_thread_area(struct user_desc __user *);
31asmlinkage int sys_get_thread_area(struct user_desc __user *); 31asmlinkage long sys_get_thread_area(struct user_desc __user *);
32 32
33/* X86_32 only */ 33/* X86_32 only */
34#ifdef CONFIG_X86_32 34#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2cd056e3ada3..a1df6e84691f 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -241,8 +241,6 @@ static inline struct thread_info *current_thread_info(void)
241 skip sending interrupt */ 241 skip sending interrupt */
242#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ 242#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
243 243
244#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
245
246#ifndef __ASSEMBLY__ 244#ifndef __ASSEMBLY__
247#define HAVE_SET_RESTORE_SIGMASK 1 245#define HAVE_SET_RESTORE_SIGMASK 1
248static inline void set_restore_sigmask(void) 246static inline void set_restore_sigmask(void)
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 3d5df1c4447f..c2a48139c340 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -50,12 +50,4 @@
50# define __ARCH_WANT_SYS_VFORK 50# define __ARCH_WANT_SYS_VFORK
51# define __ARCH_WANT_SYS_CLONE 51# define __ARCH_WANT_SYS_CLONE
52 52
53/*
54 * "Conditional" syscalls
55 *
56 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
57 * but it doesn't work on all toolchains, so we just do it by hand
58 */
59# define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
60
61#endif /* _ASM_X86_UNISTD_H */ 53#endif /* _ASM_X86_UNISTD_H */
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 8ff8be7835ab..6e5197910fd8 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -55,4 +55,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
55extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); 55extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
56extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); 56extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
57extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 57extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
58extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
58#endif /* _ASM_UPROBES_H */ 59#endif /* _ASM_UPROBES_H */
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 7a060f4b411f..b5757885d7a4 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -72,6 +72,7 @@
72#define MSR_IA32_PEBS_ENABLE 0x000003f1 72#define MSR_IA32_PEBS_ENABLE 0x000003f1
73#define MSR_IA32_DS_AREA 0x00000600 73#define MSR_IA32_DS_AREA 0x00000600
74#define MSR_IA32_PERF_CAPABILITIES 0x00000345 74#define MSR_IA32_PERF_CAPABILITIES 0x00000345
75#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
75 76
76#define MSR_MTRRfix64K_00000 0x00000250 77#define MSR_MTRRfix64K_00000 0x00000250
77#define MSR_MTRRfix16K_80000 0x00000258 78#define MSR_MTRRfix16K_80000 0x00000258
@@ -195,6 +196,10 @@
195#define MSR_AMD64_IBSBRTARGET 0xc001103b 196#define MSR_AMD64_IBSBRTARGET 0xc001103b
196#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ 197#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
197 198
199/* Fam 16h MSRs */
200#define MSR_F16H_L2I_PERF_CTL 0xc0010230
201#define MSR_F16H_L2I_PERF_CTR 0xc0010231
202
198/* Fam 15h MSRs */ 203/* Fam 15h MSRs */
199#define MSR_F15H_PERF_CTL 0xc0010200 204#define MSR_F15H_PERF_CTL 0xc0010200
200#define MSR_F15H_PERF_CTR 0xc0010201 205#define MSR_F15H_PERF_CTR 0xc0010201
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 0532f5d6e4ef..b44577bc9744 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -46,7 +46,7 @@ int acpi_suspend_lowlevel(void)
46 header->pmode_behavior = 0; 46 header->pmode_behavior = 0;
47 47
48#ifndef CONFIG_64BIT 48#ifndef CONFIG_64BIT
49 store_gdt((struct desc_ptr *)&header->pmode_gdt); 49 native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
50 50
51 if (!rdmsr_safe(MSR_EFER, 51 if (!rdmsr_safe(MSR_EFER,
52 &header->pmode_efer_low, 52 &header->pmode_efer_low,
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 13ab720573e3..d1daa66ab162 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -1,4 +1,4 @@
1 .section .text..page_aligned 1 .text
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/page_types.h> 4#include <asm/page_types.h>
@@ -18,7 +18,6 @@ wakeup_pmode_return:
18 movw %ax, %gs 18 movw %ax, %gs
19 19
20 # reload the gdt, as we need the full 32 bit address 20 # reload the gdt, as we need the full 32 bit address
21 lgdt saved_gdt
22 lidt saved_idt 21 lidt saved_idt
23 lldt saved_ldt 22 lldt saved_ldt
24 ljmp $(__KERNEL_CS), $1f 23 ljmp $(__KERNEL_CS), $1f
@@ -44,7 +43,6 @@ bogus_magic:
44 43
45 44
46save_registers: 45save_registers:
47 sgdt saved_gdt
48 sidt saved_idt 46 sidt saved_idt
49 sldt saved_ldt 47 sldt saved_ldt
50 str saved_tss 48 str saved_tss
@@ -93,7 +91,6 @@ ENTRY(saved_magic) .long 0
93ENTRY(saved_eip) .long 0 91ENTRY(saved_eip) .long 0
94 92
95# saved registers 93# saved registers
96saved_gdt: .long 0,0
97saved_idt: .long 0,0 94saved_idt: .long 0,0
98saved_ldt: .long 0 95saved_ldt: .long 0
99saved_tss: .long 0 96saved_tss: .long 0
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index ef5ccca79a6c..c15cf9a25e27 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -271,7 +271,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
271 replacement = (u8 *)&a->repl_offset + a->repl_offset; 271 replacement = (u8 *)&a->repl_offset + a->repl_offset;
272 BUG_ON(a->replacementlen > a->instrlen); 272 BUG_ON(a->replacementlen > a->instrlen);
273 BUG_ON(a->instrlen > sizeof(insnbuf)); 273 BUG_ON(a->instrlen > sizeof(insnbuf));
274 BUG_ON(a->cpuid >= NCAPINTS*32); 274 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
275 if (!boot_cpu_has(a->cpuid)) 275 if (!boot_cpu_has(a->cpuid))
276 continue; 276 continue;
277 277
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index aadf3359e2a7..3048ded1b598 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,12 +20,14 @@ const struct pci_device_id amd_nb_misc_ids[] = {
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
23 {} 24 {}
24}; 25};
25EXPORT_SYMBOL(amd_nb_misc_ids); 26EXPORT_SYMBOL(amd_nb_misc_ids);
26 27
27static struct pci_device_id amd_nb_link_ids[] = { 28static const struct pci_device_id amd_nb_link_ids[] = {
28 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 29 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
30 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
29 {} 31 {}
30}; 32};
31 33
@@ -81,7 +83,6 @@ int amd_cache_northbridges(void)
81 next_northbridge(link, amd_nb_link_ids); 83 next_northbridge(link, amd_nb_link_ids);
82 } 84 }
83 85
84 /* some CPU families (e.g. family 0x11) do not support GART */
85 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 86 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
86 boot_cpu_data.x86 == 0x15) 87 boot_cpu_data.x86 == 0x15)
87 amd_northbridges.flags |= AMD_NB_GART; 88 amd_northbridges.flags |= AMD_NB_GART;
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index d5fd66f0d4cd..fd972a3e4cbb 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -87,7 +87,7 @@ static u32 __init allocate_aperture(void)
87 */ 87 */
88 addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, 88 addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
89 aper_size, aper_size); 89 aper_size, aper_size);
90 if (!addr || addr + aper_size > GART_MAX_ADDR) { 90 if (!addr) {
91 printk(KERN_ERR 91 printk(KERN_ERR
92 "Cannot allocate aperture memory hole (%lx,%uK)\n", 92 "Cannot allocate aperture memory hole (%lx,%uK)\n",
93 addr, aper_size>>10); 93 addr, aper_size>>10);
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 66b5faffe14a..53a4e2744846 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -373,7 +373,6 @@ static int apm_cpu_idle(struct cpuidle_device *dev,
373static struct cpuidle_driver apm_idle_driver = { 373static struct cpuidle_driver apm_idle_driver = {
374 .name = "apm_idle", 374 .name = "apm_idle",
375 .owner = THIS_MODULE, 375 .owner = THIS_MODULE,
376 .en_core_tk_irqen = 1,
377 .states = { 376 .states = {
378 { /* entry 0 is for polling */ }, 377 { /* entry 0 is for polling */ },
379 { /* entry 1 is for APM idle */ 378 { /* entry 1 is for APM idle */
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index a0e067d3d96c..b0684e4a73aa 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -14,7 +14,6 @@ CFLAGS_common.o := $(nostackp)
14 14
15obj-y := intel_cacheinfo.o scattered.o topology.o 15obj-y := intel_cacheinfo.o scattered.o topology.o
16obj-y += proc.o capflags.o powerflags.o common.o 16obj-y += proc.o capflags.o powerflags.o common.o
17obj-y += vmware.o hypervisor.o mshyperv.o
18obj-y += rdrand.o 17obj-y += rdrand.o
19obj-y += match.o 18obj-y += match.o
20 19
@@ -31,7 +30,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
31obj-$(CONFIG_PERF_EVENTS) += perf_event.o 30obj-$(CONFIG_PERF_EVENTS) += perf_event.o
32 31
33ifdef CONFIG_PERF_EVENTS 32ifdef CONFIG_PERF_EVENTS
34obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o 33obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o
35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o 34obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
36obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
37obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o 36obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
@@ -42,11 +41,13 @@ obj-$(CONFIG_MTRR) += mtrr/
42 41
43obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o 42obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
44 43
44obj-$(CONFIG_HYPERVISOR_GUEST) += vmware.o hypervisor.o mshyperv.o
45
45quiet_cmd_mkcapflags = MKCAP $@ 46quiet_cmd_mkcapflags = MKCAP $@
46 cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ 47 cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
47 48
48cpufeature = $(src)/../../include/asm/cpufeature.h 49cpufeature = $(src)/../../include/asm/cpufeature.h
49 50
50targets += capflags.c 51targets += capflags.c
51$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE 52$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
52 $(call if_changed,mkcapflags) 53 $(call if_changed,mkcapflags)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index fa96eb0d02fb..5013a48d1aff 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -20,11 +20,11 @@
20 20
21static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 21static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
22{ 22{
23 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
24 u32 gprs[8] = { 0 }; 23 u32 gprs[8] = { 0 };
25 int err; 24 int err;
26 25
27 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); 26 WARN_ONCE((boot_cpu_data.x86 != 0xf),
27 "%s should only be used on K8!\n", __func__);
28 28
29 gprs[1] = msr; 29 gprs[1] = msr;
30 gprs[7] = 0x9c5a203a; 30 gprs[7] = 0x9c5a203a;
@@ -38,10 +38,10 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
38 38
39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) 39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
40{ 40{
41 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
42 u32 gprs[8] = { 0 }; 41 u32 gprs[8] = { 0 };
43 42
44 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); 43 WARN_ONCE((boot_cpu_data.x86 != 0xf),
44 "%s should only be used on K8!\n", __func__);
45 45
46 gprs[0] = (u32)val; 46 gprs[0] = (u32)val;
47 gprs[1] = msr; 47 gprs[1] = msr;
@@ -192,11 +192,11 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
192 /* Athlon 660/661 is valid. */ 192 /* Athlon 660/661 is valid. */
193 if ((c->x86_model == 6) && ((c->x86_mask == 0) || 193 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
194 (c->x86_mask == 1))) 194 (c->x86_mask == 1)))
195 goto valid_k7; 195 return;
196 196
197 /* Duron 670 is valid */ 197 /* Duron 670 is valid */
198 if ((c->x86_model == 7) && (c->x86_mask == 0)) 198 if ((c->x86_model == 7) && (c->x86_mask == 0))
199 goto valid_k7; 199 return;
200 200
201 /* 201 /*
202 * Athlon 662, Duron 671, and Athlon >model 7 have capability 202 * Athlon 662, Duron 671, and Athlon >model 7 have capability
@@ -209,7 +209,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
209 ((c->x86_model == 7) && (c->x86_mask >= 1)) || 209 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
210 (c->x86_model > 7)) 210 (c->x86_model > 7))
211 if (cpu_has_mp) 211 if (cpu_has_mp)
212 goto valid_k7; 212 return;
213 213
214 /* If we get here, not a certified SMP capable AMD system. */ 214 /* If we get here, not a certified SMP capable AMD system. */
215 215
@@ -220,9 +220,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
220 WARN_ONCE(1, "WARNING: This combination of AMD" 220 WARN_ONCE(1, "WARNING: This combination of AMD"
221 " processors is not suitable for SMP.\n"); 221 " processors is not suitable for SMP.\n");
222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); 222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
223
224valid_k7:
225 ;
226} 223}
227 224
228static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 225static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
@@ -513,6 +510,10 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
513#endif 510#endif
514} 511}
515 512
513static const int amd_erratum_383[];
514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum);
516
516static void __cpuinit init_amd(struct cpuinfo_x86 *c) 517static void __cpuinit init_amd(struct cpuinfo_x86 *c)
517{ 518{
518 u32 dummy; 519 u32 dummy;
@@ -727,8 +728,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
727 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value); 728 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
728 value &= ~(1ULL << 24); 729 value &= ~(1ULL << 24);
729 wrmsrl_safe(MSR_AMD64_BU_CFG2, value); 730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
731
732 if (cpu_has_amd_erratum(amd_erratum_383))
733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
730 } 734 }
731 735
736 if (cpu_has_amd_erratum(amd_erratum_400))
737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
738
732 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
733} 740}
734 741
@@ -847,8 +854,7 @@ cpu_dev_register(amd_cpu_dev);
847 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that 854 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
848 * have an OSVW id assigned, which it takes as first argument. Both take a 855 * have an OSVW id assigned, which it takes as first argument. Both take a
849 * variable number of family-specific model-stepping ranges created by 856 * variable number of family-specific model-stepping ranges created by
850 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const 857 * AMD_MODEL_RANGE().
851 * int[] in arch/x86/include/asm/processor.h.
852 * 858 *
853 * Example: 859 * Example:
854 * 860 *
@@ -858,16 +864,22 @@ cpu_dev_register(amd_cpu_dev);
858 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); 864 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
859 */ 865 */
860 866
861const int amd_erratum_400[] = 867#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
868#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
869#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
870 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
871#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
872#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
873#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
874
875static const int amd_erratum_400[] =
862 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), 876 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
863 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 877 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
864EXPORT_SYMBOL_GPL(amd_erratum_400);
865 878
866const int amd_erratum_383[] = 879static const int amd_erratum_383[] =
867 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 880 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
868EXPORT_SYMBOL_GPL(amd_erratum_383);
869 881
870bool cpu_has_amd_erratum(const int *erratum) 882static bool cpu_has_amd_erratum(const int *erratum)
871{ 883{
872 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); 884 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
873 int osvw_id = *erratum++; 885 int osvw_id = *erratum++;
@@ -908,5 +920,3 @@ bool cpu_has_amd_erratum(const int *erratum)
908 920
909 return false; 921 return false;
910} 922}
911
912EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index af6455e3fcc9..4112be9a4659 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -59,7 +59,7 @@ static void __init check_fpu(void)
59 * trap_init() enabled FXSR and company _before_ testing for FP 59 * trap_init() enabled FXSR and company _before_ testing for FP
60 * problems here. 60 * problems here.
61 * 61 *
62 * Test for the divl bug.. 62 * Test for the divl bug: http://en.wikipedia.org/wiki/Fdiv_bug
63 */ 63 */
64 __asm__("fninit\n\t" 64 __asm__("fninit\n\t"
65 "fldl %1\n\t" 65 "fldl %1\n\t"
@@ -75,26 +75,12 @@ static void __init check_fpu(void)
75 75
76 kernel_fpu_end(); 76 kernel_fpu_end();
77 77
78 boot_cpu_data.fdiv_bug = fdiv_bug; 78 if (fdiv_bug) {
79 if (boot_cpu_data.fdiv_bug) 79 set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV);
80 pr_warn("Hmm, FPU with FDIV bug\n"); 80 pr_warn("Hmm, FPU with FDIV bug\n");
81 }
81} 82}
82 83
83/*
84 * Check whether we are able to run this kernel safely on SMP.
85 *
86 * - i386 is no longer supported.
87 * - In order to run on anything without a TSC, we need to be
88 * compiled for a i486.
89 */
90
91static void __init check_config(void)
92{
93 if (boot_cpu_data.x86 < 4)
94 panic("Kernel requires i486+ for 'invlpg' and other features");
95}
96
97
98void __init check_bugs(void) 84void __init check_bugs(void)
99{ 85{
100 identify_boot_cpu(); 86 identify_boot_cpu();
@@ -102,7 +88,17 @@ void __init check_bugs(void)
102 pr_info("CPU: "); 88 pr_info("CPU: ");
103 print_cpu_info(&boot_cpu_data); 89 print_cpu_info(&boot_cpu_data);
104#endif 90#endif
105 check_config(); 91
92 /*
93 * Check whether we are able to run this kernel safely on SMP.
94 *
95 * - i386 is no longer supported.
96 * - In order to run on anything without a TSC, we need to be
97 * compiled for a i486.
98 */
99 if (boot_cpu_data.x86 < 4)
100 panic("Kernel requires i486+ for 'invlpg' and other features");
101
106 init_utsname()->machine[1] = 102 init_utsname()->machine[1] =
107 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 103 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
108 alternative_instructions(); 104 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d814772c5bed..22018f70a671 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -920,6 +920,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
920 /* AND the already accumulated flags with these */ 920 /* AND the already accumulated flags with these */
921 for (i = 0; i < NCAPINTS; i++) 921 for (i = 0; i < NCAPINTS; i++)
922 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 922 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
923
924 /* OR, i.e. replicate the bug flags */
925 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
926 c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
923 } 927 }
924 928
925 /* Init Machine Check Exception if available. */ 929 /* Init Machine Check Exception if available. */
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 4fbd384fb645..d048d5ca43c1 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -249,7 +249,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
249 /* Emulate MTRRs using Cyrix's ARRs. */ 249 /* Emulate MTRRs using Cyrix's ARRs. */
250 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); 250 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
251 /* 6x86's contain this bug */ 251 /* 6x86's contain this bug */
252 c->coma_bug = 1; 252 set_cpu_bug(c, X86_BUG_COMA);
253 break; 253 break;
254 254
255 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ 255 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
@@ -317,7 +317,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
317 /* Enable MMX extensions (App note 108) */ 317 /* Enable MMX extensions (App note 108) */
318 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); 318 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
319 } else { 319 } else {
320 c->coma_bug = 1; /* 6x86MX, it has the bug. */ 320 /* A 6x86MX - it has the bug. */
321 set_cpu_bug(c, X86_BUG_COMA);
321 } 322 }
322 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; 323 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
323 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; 324 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1905ce98bee0..9b0c441c03f5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -96,6 +96,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
96 sched_clock_stable = 1; 96 sched_clock_stable = 1;
97 } 97 }
98 98
99 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
100 if (c->x86 == 6) {
101 switch (c->x86_model) {
102 case 0x27: /* Penwell */
103 case 0x35: /* Cloverview */
104 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
105 break;
106 default:
107 break;
108 }
109 }
110
99 /* 111 /*
100 * There is a known erratum on Pentium III and Core Solo 112 * There is a known erratum on Pentium III and Core Solo
101 * and Core Duo CPUs. 113 * and Core Duo CPUs.
@@ -164,20 +176,6 @@ int __cpuinit ppro_with_ram_bug(void)
164 return 0; 176 return 0;
165} 177}
166 178
167#ifdef CONFIG_X86_F00F_BUG
168static void __cpuinit trap_init_f00f_bug(void)
169{
170 __set_fixmap(FIX_F00F_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
171
172 /*
173 * Update the IDT descriptor and reload the IDT so that
174 * it uses the read-only mapped virtual address.
175 */
176 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
177 load_idt(&idt_descr);
178}
179#endif
180
181static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) 179static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
182{ 180{
183 /* calling is from identify_secondary_cpu() ? */ 181 /* calling is from identify_secondary_cpu() ? */
@@ -206,16 +204,14 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
206 /* 204 /*
207 * All current models of Pentium and Pentium with MMX technology CPUs 205 * All current models of Pentium and Pentium with MMX technology CPUs
208 * have the F0 0F bug, which lets nonprivileged users lock up the 206 * have the F0 0F bug, which lets nonprivileged users lock up the
209 * system. 207 * system. Announce that the fault handler will be checking for it.
210 * Note that the workaround only should be initialized once...
211 */ 208 */
212 c->f00f_bug = 0; 209 clear_cpu_bug(c, X86_BUG_F00F);
213 if (!paravirt_enabled() && c->x86 == 5) { 210 if (!paravirt_enabled() && c->x86 == 5) {
214 static int f00f_workaround_enabled; 211 static int f00f_workaround_enabled;
215 212
216 c->f00f_bug = 1; 213 set_cpu_bug(c, X86_BUG_F00F);
217 if (!f00f_workaround_enabled) { 214 if (!f00f_workaround_enabled) {
218 trap_init_f00f_bug();
219 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); 215 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
220 f00f_workaround_enabled = 1; 216 f00f_workaround_enabled = 1;
221 } 217 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 7bc126346ace..9239504b41cb 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2358,7 +2358,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2358 2358
2359 if (action == CPU_POST_DEAD) { 2359 if (action == CPU_POST_DEAD) {
2360 /* intentionally ignoring frozen here */ 2360 /* intentionally ignoring frozen here */
2361 cmci_rediscover(cpu); 2361 cmci_rediscover();
2362 } 2362 }
2363 2363
2364 return NOTIFY_OK; 2364 return NOTIFY_OK;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 1ac581f38dfa..9cb52767999a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -33,7 +33,6 @@
33#include <asm/mce.h> 33#include <asm/mce.h>
34#include <asm/msr.h> 34#include <asm/msr.h>
35 35
36#define NR_BANKS 6
37#define NR_BLOCKS 9 36#define NR_BLOCKS 9
38#define THRESHOLD_MAX 0xFFF 37#define THRESHOLD_MAX 0xFFF
39#define INT_TYPE_APIC 0x00020000 38#define INT_TYPE_APIC 0x00020000
@@ -57,12 +56,7 @@ static const char * const th_names[] = {
57 "execution_unit", 56 "execution_unit",
58}; 57};
59 58
60static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); 59static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
61
62static unsigned char shared_bank[NR_BANKS] = {
63 0, 0, 0, 0, 1
64};
65
66static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ 60static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
67 61
68static void amd_threshold_interrupt(void); 62static void amd_threshold_interrupt(void);
@@ -79,6 +73,12 @@ struct thresh_restart {
79 u16 old_limit; 73 u16 old_limit;
80}; 74};
81 75
76static inline bool is_shared_bank(int bank)
77{
78 /* Bank 4 is for northbridge reporting and is thus shared */
79 return (bank == 4);
80}
81
82static const char * const bank4_names(struct threshold_block *b) 82static const char * const bank4_names(struct threshold_block *b)
83{ 83{
84 switch (b->address) { 84 switch (b->address) {
@@ -214,7 +214,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
214 unsigned int bank, block; 214 unsigned int bank, block;
215 int offset = -1; 215 int offset = -1;
216 216
217 for (bank = 0; bank < NR_BANKS; ++bank) { 217 for (bank = 0; bank < mca_cfg.banks; ++bank) {
218 for (block = 0; block < NR_BLOCKS; ++block) { 218 for (block = 0; block < NR_BLOCKS; ++block) {
219 if (block == 0) 219 if (block == 0)
220 address = MSR_IA32_MC0_MISC + bank * 4; 220 address = MSR_IA32_MC0_MISC + bank * 4;
@@ -276,7 +276,7 @@ static void amd_threshold_interrupt(void)
276 mce_setup(&m); 276 mce_setup(&m);
277 277
278 /* assume first bank caused it */ 278 /* assume first bank caused it */
279 for (bank = 0; bank < NR_BANKS; ++bank) { 279 for (bank = 0; bank < mca_cfg.banks; ++bank) {
280 if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) 280 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
281 continue; 281 continue;
282 for (block = 0; block < NR_BLOCKS; ++block) { 282 for (block = 0; block < NR_BLOCKS; ++block) {
@@ -467,7 +467,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
467 u32 low, high; 467 u32 low, high;
468 int err; 468 int err;
469 469
470 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) 470 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
471 return 0; 471 return 0;
472 472
473 if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) 473 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
@@ -575,7 +575,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
575 const char *name = th_names[bank]; 575 const char *name = th_names[bank];
576 int err = 0; 576 int err = 0;
577 577
578 if (shared_bank[bank]) { 578 if (is_shared_bank(bank)) {
579 nb = node_to_amd_nb(amd_get_nb_id(cpu)); 579 nb = node_to_amd_nb(amd_get_nb_id(cpu));
580 580
581 /* threshold descriptor already initialized on this node? */ 581 /* threshold descriptor already initialized on this node? */
@@ -609,7 +609,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
609 609
610 per_cpu(threshold_banks, cpu)[bank] = b; 610 per_cpu(threshold_banks, cpu)[bank] = b;
611 611
612 if (shared_bank[bank]) { 612 if (is_shared_bank(bank)) {
613 atomic_set(&b->cpus, 1); 613 atomic_set(&b->cpus, 1);
614 614
615 /* nb is already initialized, see above */ 615 /* nb is already initialized, see above */
@@ -635,9 +635,17 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
635static __cpuinit int threshold_create_device(unsigned int cpu) 635static __cpuinit int threshold_create_device(unsigned int cpu)
636{ 636{
637 unsigned int bank; 637 unsigned int bank;
638 struct threshold_bank **bp;
638 int err = 0; 639 int err = 0;
639 640
640 for (bank = 0; bank < NR_BANKS; ++bank) { 641 bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
642 GFP_KERNEL);
643 if (!bp)
644 return -ENOMEM;
645
646 per_cpu(threshold_banks, cpu) = bp;
647
648 for (bank = 0; bank < mca_cfg.banks; ++bank) {
641 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 649 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
642 continue; 650 continue;
643 err = threshold_create_bank(cpu, bank); 651 err = threshold_create_bank(cpu, bank);
@@ -691,7 +699,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
691 if (!b->blocks) 699 if (!b->blocks)
692 goto free_out; 700 goto free_out;
693 701
694 if (shared_bank[bank]) { 702 if (is_shared_bank(bank)) {
695 if (!atomic_dec_and_test(&b->cpus)) { 703 if (!atomic_dec_and_test(&b->cpus)) {
696 __threshold_remove_blocks(b); 704 __threshold_remove_blocks(b);
697 per_cpu(threshold_banks, cpu)[bank] = NULL; 705 per_cpu(threshold_banks, cpu)[bank] = NULL;
@@ -719,11 +727,12 @@ static void threshold_remove_device(unsigned int cpu)
719{ 727{
720 unsigned int bank; 728 unsigned int bank;
721 729
722 for (bank = 0; bank < NR_BANKS; ++bank) { 730 for (bank = 0; bank < mca_cfg.banks; ++bank) {
723 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 731 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
724 continue; 732 continue;
725 threshold_remove_bank(cpu, bank); 733 threshold_remove_bank(cpu, bank);
726 } 734 }
735 kfree(per_cpu(threshold_banks, cpu));
727} 736}
728 737
729/* get notified when a cpu comes on/off */ 738/* get notified when a cpu comes on/off */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 402c454fbff0..ae1697c2afe3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -285,39 +285,24 @@ void cmci_clear(void)
285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
286} 286}
287 287
288static long cmci_rediscover_work_func(void *arg) 288static void cmci_rediscover_work_func(void *arg)
289{ 289{
290 int banks; 290 int banks;
291 291
292 /* Recheck banks in case CPUs don't all have the same */ 292 /* Recheck banks in case CPUs don't all have the same */
293 if (cmci_supported(&banks)) 293 if (cmci_supported(&banks))
294 cmci_discover(banks); 294 cmci_discover(banks);
295
296 return 0;
297} 295}
298 296
299/* 297/* After a CPU went down cycle through all the others and rediscover */
300 * After a CPU went down cycle through all the others and rediscover 298void cmci_rediscover(void)
301 * Must run in process context.
302 */
303void cmci_rediscover(int dying)
304{ 299{
305 int cpu, banks; 300 int banks;
306 301
307 if (!cmci_supported(&banks)) 302 if (!cmci_supported(&banks))
308 return; 303 return;
309 304
310 for_each_online_cpu(cpu) { 305 on_each_cpu(cmci_rediscover_work_func, NULL, 1);
311 if (cpu == dying)
312 continue;
313
314 if (cpu == smp_processor_id()) {
315 cmci_rediscover_work_func(NULL);
316 continue;
317 }
318
319 work_on_cpu(cpu, cmci_rediscover_work_func, NULL);
320 }
321} 306}
322 307
323/* 308/*
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl
deleted file mode 100644
index 091972ef49de..000000000000
--- a/arch/x86/kernel/cpu/mkcapflags.pl
+++ /dev/null
@@ -1,48 +0,0 @@
1#!/usr/bin/perl -w
2#
3# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
4#
5
6($in, $out) = @ARGV;
7
8open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n";
9open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
10
11print OUT "#ifndef _ASM_X86_CPUFEATURE_H\n";
12print OUT "#include <asm/cpufeature.h>\n";
13print OUT "#endif\n";
14print OUT "\n";
15print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
16
17%features = ();
18$err = 0;
19
20while (defined($line = <IN>)) {
21 if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
22 $macro = $1;
23 $feature = "\L$2";
24 $tail = $3;
25 if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
26 $feature = "\L$1";
27 }
28
29 next if ($feature eq '');
30
31 if ($features{$feature}++) {
32 print STDERR "$in: duplicate feature name: $feature\n";
33 $err++;
34 }
35 printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature;
36 }
37}
38print OUT "};\n";
39
40close(IN);
41close(OUT);
42
43if ($err) {
44 unlink($out);
45 exit(1);
46}
47
48exit(0);
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
new file mode 100644
index 000000000000..2bf616505499
--- /dev/null
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -0,0 +1,41 @@
1#!/bin/sh
2#
3# Generate the x86_cap_flags[] array from include/asm/cpufeature.h
4#
5
6IN=$1
7OUT=$2
8
9TABS="$(printf '\t\t\t\t\t')"
10trap 'rm "$OUT"' EXIT
11
12(
13 echo "#ifndef _ASM_X86_CPUFEATURE_H"
14 echo "#include <asm/cpufeature.h>"
15 echo "#endif"
16 echo ""
17 echo "const char * const x86_cap_flags[NCAPINTS*32] = {"
18
19 # Iterate through any input lines starting with #define X86_FEATURE_
20 sed -n -e 's/\t/ /g' -e 's/^ *# *define *X86_FEATURE_//p' $IN |
21 while read i
22 do
23 # Name is everything up to the first whitespace
24 NAME="$(echo "$i" | sed 's/ .*//')"
25
26 # If the /* comment */ starts with a quote string, grab that.
27 VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
28 [ -z "$VALUE" ] && VALUE="\"$NAME\""
29 [ "$VALUE" == '""' ] && continue
30
31 # Name is uppercase, VALUE is all lowercase
32 VALUE="$(echo "$VALUE" | tr A-Z a-z)"
33
34 TABCOUNT=$(( ( 5*8 - 14 - $(echo "$NAME" | wc -c) ) / 8 ))
35 printf "\t[%s]%.*s = %s,\n" \
36 "X86_FEATURE_$NAME" "$TABCOUNT" "$TABS" "$VALUE"
37 done
38 echo "};"
39) > $OUT
40
41trap - EXIT
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index bf0f01aea994..1025f3c99d20 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -180,8 +180,9 @@ static void release_pmc_hardware(void) {}
180 180
181static bool check_hw_exists(void) 181static bool check_hw_exists(void)
182{ 182{
183 u64 val, val_new = ~0; 183 u64 val, val_fail, val_new= ~0;
184 int i, reg, ret = 0; 184 int i, reg, reg_fail, ret = 0;
185 int bios_fail = 0;
185 186
186 /* 187 /*
187 * Check to see if the BIOS enabled any of the counters, if so 188 * Check to see if the BIOS enabled any of the counters, if so
@@ -192,8 +193,11 @@ static bool check_hw_exists(void)
192 ret = rdmsrl_safe(reg, &val); 193 ret = rdmsrl_safe(reg, &val);
193 if (ret) 194 if (ret)
194 goto msr_fail; 195 goto msr_fail;
195 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) 196 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
196 goto bios_fail; 197 bios_fail = 1;
198 val_fail = val;
199 reg_fail = reg;
200 }
197 } 201 }
198 202
199 if (x86_pmu.num_counters_fixed) { 203 if (x86_pmu.num_counters_fixed) {
@@ -202,8 +206,11 @@ static bool check_hw_exists(void)
202 if (ret) 206 if (ret)
203 goto msr_fail; 207 goto msr_fail;
204 for (i = 0; i < x86_pmu.num_counters_fixed; i++) { 208 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
205 if (val & (0x03 << i*4)) 209 if (val & (0x03 << i*4)) {
206 goto bios_fail; 210 bios_fail = 1;
211 val_fail = val;
212 reg_fail = reg;
213 }
207 } 214 }
208 } 215 }
209 216
@@ -221,14 +228,13 @@ static bool check_hw_exists(void)
221 if (ret || val != val_new) 228 if (ret || val != val_new)
222 goto msr_fail; 229 goto msr_fail;
223 230
224 return true;
225
226bios_fail:
227 /* 231 /*
228 * We still allow the PMU driver to operate: 232 * We still allow the PMU driver to operate:
229 */ 233 */
230 printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n"); 234 if (bios_fail) {
231 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val); 235 printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
236 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg_fail, val_fail);
237 }
232 238
233 return true; 239 return true;
234 240
@@ -1316,9 +1322,16 @@ static struct attribute_group x86_pmu_format_group = {
1316 */ 1322 */
1317static void __init filter_events(struct attribute **attrs) 1323static void __init filter_events(struct attribute **attrs)
1318{ 1324{
1325 struct device_attribute *d;
1326 struct perf_pmu_events_attr *pmu_attr;
1319 int i, j; 1327 int i, j;
1320 1328
1321 for (i = 0; attrs[i]; i++) { 1329 for (i = 0; attrs[i]; i++) {
1330 d = (struct device_attribute *)attrs[i];
1331 pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
1332 /* str trumps id */
1333 if (pmu_attr->event_str)
1334 continue;
1322 if (x86_pmu.event_map(i)) 1335 if (x86_pmu.event_map(i))
1323 continue; 1336 continue;
1324 1337
@@ -1330,22 +1343,45 @@ static void __init filter_events(struct attribute **attrs)
1330 } 1343 }
1331} 1344}
1332 1345
1333static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, 1346/* Merge two pointer arrays */
1347static __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
1348{
1349 struct attribute **new;
1350 int j, i;
1351
1352 for (j = 0; a[j]; j++)
1353 ;
1354 for (i = 0; b[i]; i++)
1355 j++;
1356 j++;
1357
1358 new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
1359 if (!new)
1360 return NULL;
1361
1362 j = 0;
1363 for (i = 0; a[i]; i++)
1364 new[j++] = a[i];
1365 for (i = 0; b[i]; i++)
1366 new[j++] = b[i];
1367 new[j] = NULL;
1368
1369 return new;
1370}
1371
1372ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
1334 char *page) 1373 char *page)
1335{ 1374{
1336 struct perf_pmu_events_attr *pmu_attr = \ 1375 struct perf_pmu_events_attr *pmu_attr = \
1337 container_of(attr, struct perf_pmu_events_attr, attr); 1376 container_of(attr, struct perf_pmu_events_attr, attr);
1338
1339 u64 config = x86_pmu.event_map(pmu_attr->id); 1377 u64 config = x86_pmu.event_map(pmu_attr->id);
1340 return x86_pmu.events_sysfs_show(page, config);
1341}
1342 1378
1343#define EVENT_VAR(_id) event_attr_##_id 1379 /* string trumps id */
1344#define EVENT_PTR(_id) &event_attr_##_id.attr.attr 1380 if (pmu_attr->event_str)
1381 return sprintf(page, "%s", pmu_attr->event_str);
1345 1382
1346#define EVENT_ATTR(_name, _id) \ 1383 return x86_pmu.events_sysfs_show(page, config);
1347 PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \ 1384}
1348 events_sysfs_show)
1349 1385
1350EVENT_ATTR(cpu-cycles, CPU_CYCLES ); 1386EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1351EVENT_ATTR(instructions, INSTRUCTIONS ); 1387EVENT_ATTR(instructions, INSTRUCTIONS );
@@ -1459,16 +1495,27 @@ static int __init init_hw_perf_events(void)
1459 1495
1460 unconstrained = (struct event_constraint) 1496 unconstrained = (struct event_constraint)
1461 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1497 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1462 0, x86_pmu.num_counters, 0); 1498 0, x86_pmu.num_counters, 0, 0);
1463 1499
1464 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 1500 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1465 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 1501 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1466 1502
1503 if (x86_pmu.event_attrs)
1504 x86_pmu_events_group.attrs = x86_pmu.event_attrs;
1505
1467 if (!x86_pmu.events_sysfs_show) 1506 if (!x86_pmu.events_sysfs_show)
1468 x86_pmu_events_group.attrs = &empty_attrs; 1507 x86_pmu_events_group.attrs = &empty_attrs;
1469 else 1508 else
1470 filter_events(x86_pmu_events_group.attrs); 1509 filter_events(x86_pmu_events_group.attrs);
1471 1510
1511 if (x86_pmu.cpu_events) {
1512 struct attribute **tmp;
1513
1514 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
1515 if (!WARN_ON(!tmp))
1516 x86_pmu_events_group.attrs = tmp;
1517 }
1518
1472 pr_info("... version: %d\n", x86_pmu.version); 1519 pr_info("... version: %d\n", x86_pmu.version);
1473 pr_info("... bit width: %d\n", x86_pmu.cntval_bits); 1520 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1474 pr_info("... generic registers: %d\n", x86_pmu.num_counters); 1521 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 7f5c75c2afdd..ba9aadfa683b 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -46,6 +46,7 @@ enum extra_reg_type {
46 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ 46 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
47 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ 47 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
48 EXTRA_REG_LBR = 2, /* lbr_select */ 48 EXTRA_REG_LBR = 2, /* lbr_select */
49 EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */
49 50
50 EXTRA_REG_MAX /* number of entries needed */ 51 EXTRA_REG_MAX /* number of entries needed */
51}; 52};
@@ -59,7 +60,13 @@ struct event_constraint {
59 u64 cmask; 60 u64 cmask;
60 int weight; 61 int weight;
61 int overlap; 62 int overlap;
63 int flags;
62}; 64};
65/*
66 * struct event_constraint flags
67 */
68#define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */
69#define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */
63 70
64struct amd_nb { 71struct amd_nb {
65 int nb_id; /* NorthBridge id */ 72 int nb_id; /* NorthBridge id */
@@ -170,16 +177,17 @@ struct cpu_hw_events {
170 void *kfree_on_online; 177 void *kfree_on_online;
171}; 178};
172 179
173#define __EVENT_CONSTRAINT(c, n, m, w, o) {\ 180#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
174 { .idxmsk64 = (n) }, \ 181 { .idxmsk64 = (n) }, \
175 .code = (c), \ 182 .code = (c), \
176 .cmask = (m), \ 183 .cmask = (m), \
177 .weight = (w), \ 184 .weight = (w), \
178 .overlap = (o), \ 185 .overlap = (o), \
186 .flags = f, \
179} 187}
180 188
181#define EVENT_CONSTRAINT(c, n, m) \ 189#define EVENT_CONSTRAINT(c, n, m) \
182 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0) 190 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
183 191
184/* 192/*
185 * The overlap flag marks event constraints with overlapping counter 193 * The overlap flag marks event constraints with overlapping counter
@@ -203,7 +211,7 @@ struct cpu_hw_events {
203 * and its counter masks must be kept at a minimum. 211 * and its counter masks must be kept at a minimum.
204 */ 212 */
205#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ 213#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
206 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1) 214 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
207 215
208/* 216/*
209 * Constraint on the Event code. 217 * Constraint on the Event code.
@@ -231,6 +239,14 @@ struct cpu_hw_events {
231#define INTEL_UEVENT_CONSTRAINT(c, n) \ 239#define INTEL_UEVENT_CONSTRAINT(c, n) \
232 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) 240 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
233 241
242#define INTEL_PLD_CONSTRAINT(c, n) \
243 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
244 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
245
246#define INTEL_PST_CONSTRAINT(c, n) \
247 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
248 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
249
234#define EVENT_CONSTRAINT_END \ 250#define EVENT_CONSTRAINT_END \
235 EVENT_CONSTRAINT(0, 0, 0) 251 EVENT_CONSTRAINT(0, 0, 0)
236 252
@@ -260,12 +276,22 @@ struct extra_reg {
260 .msr = (ms), \ 276 .msr = (ms), \
261 .config_mask = (m), \ 277 .config_mask = (m), \
262 .valid_mask = (vm), \ 278 .valid_mask = (vm), \
263 .idx = EXTRA_REG_##i \ 279 .idx = EXTRA_REG_##i, \
264 } 280 }
265 281
266#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ 282#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
267 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) 283 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
268 284
285#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
286 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
287 ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
288
289#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
290 INTEL_UEVENT_EXTRA_REG(c, \
291 MSR_PEBS_LD_LAT_THRESHOLD, \
292 0xffff, \
293 LDLAT)
294
269#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) 295#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
270 296
271union perf_capabilities { 297union perf_capabilities {
@@ -355,8 +381,10 @@ struct x86_pmu {
355 */ 381 */
356 int attr_rdpmc; 382 int attr_rdpmc;
357 struct attribute **format_attrs; 383 struct attribute **format_attrs;
384 struct attribute **event_attrs;
358 385
359 ssize_t (*events_sysfs_show)(char *page, u64 config); 386 ssize_t (*events_sysfs_show)(char *page, u64 config);
387 struct attribute **cpu_events;
360 388
361 /* 389 /*
362 * CPU Hotplug hooks 390 * CPU Hotplug hooks
@@ -421,6 +449,23 @@ do { \
421#define ERF_NO_HT_SHARING 1 449#define ERF_NO_HT_SHARING 1
422#define ERF_HAS_RSP_1 2 450#define ERF_HAS_RSP_1 2
423 451
452#define EVENT_VAR(_id) event_attr_##_id
453#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
454
455#define EVENT_ATTR(_name, _id) \
456static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
457 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
458 .id = PERF_COUNT_HW_##_id, \
459 .event_str = NULL, \
460};
461
462#define EVENT_ATTR_STR(_name, v, str) \
463static struct perf_pmu_events_attr event_attr_##v = { \
464 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
465 .id = 0, \
466 .event_str = str, \
467};
468
424extern struct x86_pmu x86_pmu __read_mostly; 469extern struct x86_pmu x86_pmu __read_mostly;
425 470
426DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 471DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
@@ -628,6 +673,9 @@ int p6_pmu_init(void);
628 673
629int knc_pmu_init(void); 674int knc_pmu_init(void);
630 675
676ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
677 char *page);
678
631#else /* CONFIG_CPU_SUP_INTEL */ 679#else /* CONFIG_CPU_SUP_INTEL */
632 680
633static inline void reserve_ds_buffers(void) 681static inline void reserve_ds_buffers(void)
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index dfdab42aed27..7e28d9467bb4 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -132,14 +132,11 @@ static u64 amd_pmu_event_map(int hw_event)
132 return amd_perfmon_event_map[hw_event]; 132 return amd_perfmon_event_map[hw_event];
133} 133}
134 134
135static struct event_constraint *amd_nb_event_constraint;
136
137/* 135/*
138 * Previously calculated offsets 136 * Previously calculated offsets
139 */ 137 */
140static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; 138static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
141static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; 139static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
142static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
143 140
144/* 141/*
145 * Legacy CPUs: 142 * Legacy CPUs:
@@ -147,14 +144,10 @@ static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
147 * 144 *
148 * CPUs with core performance counter extensions: 145 * CPUs with core performance counter extensions:
149 * 6 counters starting at 0xc0010200 each offset by 2 146 * 6 counters starting at 0xc0010200 each offset by 2
150 *
151 * CPUs with north bridge performance counter extensions:
152 * 4 additional counters starting at 0xc0010240 each offset by 2
153 * (indexed right above either one of the above core counters)
154 */ 147 */
155static inline int amd_pmu_addr_offset(int index, bool eventsel) 148static inline int amd_pmu_addr_offset(int index, bool eventsel)
156{ 149{
157 int offset, first, base; 150 int offset;
158 151
159 if (!index) 152 if (!index)
160 return index; 153 return index;
@@ -167,23 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
167 if (offset) 160 if (offset)
168 return offset; 161 return offset;
169 162
170 if (amd_nb_event_constraint && 163 if (!cpu_has_perfctr_core)
171 test_bit(index, amd_nb_event_constraint->idxmsk)) {
172 /*
173 * calculate the offset of NB counters with respect to
174 * base eventsel or perfctr
175 */
176
177 first = find_first_bit(amd_nb_event_constraint->idxmsk,
178 X86_PMC_IDX_MAX);
179
180 if (eventsel)
181 base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel;
182 else
183 base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr;
184
185 offset = base + ((index - first) << 1);
186 } else if (!cpu_has_perfctr_core)
187 offset = index; 164 offset = index;
188 else 165 else
189 offset = index << 1; 166 offset = index << 1;
@@ -196,36 +173,6 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
196 return offset; 173 return offset;
197} 174}
198 175
199static inline int amd_pmu_rdpmc_index(int index)
200{
201 int ret, first;
202
203 if (!index)
204 return index;
205
206 ret = rdpmc_indexes[index];
207
208 if (ret)
209 return ret;
210
211 if (amd_nb_event_constraint &&
212 test_bit(index, amd_nb_event_constraint->idxmsk)) {
213 /*
214 * according to the mnual, ECX value of the NB counters is
215 * the index of the NB counter (0, 1, 2 or 3) plus 6
216 */
217
218 first = find_first_bit(amd_nb_event_constraint->idxmsk,
219 X86_PMC_IDX_MAX);
220 ret = index - first + 6;
221 } else
222 ret = index;
223
224 rdpmc_indexes[index] = ret;
225
226 return ret;
227}
228
229static int amd_core_hw_config(struct perf_event *event) 176static int amd_core_hw_config(struct perf_event *event)
230{ 177{
231 if (event->attr.exclude_host && event->attr.exclude_guest) 178 if (event->attr.exclude_host && event->attr.exclude_guest)
@@ -245,34 +192,6 @@ static int amd_core_hw_config(struct perf_event *event)
245} 192}
246 193
247/* 194/*
248 * NB counters do not support the following event select bits:
249 * Host/Guest only
250 * Counter mask
251 * Invert counter mask
252 * Edge detect
253 * OS/User mode
254 */
255static int amd_nb_hw_config(struct perf_event *event)
256{
257 /* for NB, we only allow system wide counting mode */
258 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
259 return -EINVAL;
260
261 if (event->attr.exclude_user || event->attr.exclude_kernel ||
262 event->attr.exclude_host || event->attr.exclude_guest)
263 return -EINVAL;
264
265 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
266 ARCH_PERFMON_EVENTSEL_OS);
267
268 if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB |
269 ARCH_PERFMON_EVENTSEL_INT))
270 return -EINVAL;
271
272 return 0;
273}
274
275/*
276 * AMD64 events are detected based on their event codes. 195 * AMD64 events are detected based on their event codes.
277 */ 196 */
278static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) 197static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
@@ -285,11 +204,6 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
285 return (hwc->config & 0xe0) == 0xe0; 204 return (hwc->config & 0xe0) == 0xe0;
286} 205}
287 206
288static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
289{
290 return amd_nb_event_constraint && amd_is_nb_event(hwc);
291}
292
293static inline int amd_has_nb(struct cpu_hw_events *cpuc) 207static inline int amd_has_nb(struct cpu_hw_events *cpuc)
294{ 208{
295 struct amd_nb *nb = cpuc->amd_nb; 209 struct amd_nb *nb = cpuc->amd_nb;
@@ -315,9 +229,6 @@ static int amd_pmu_hw_config(struct perf_event *event)
315 if (event->attr.type == PERF_TYPE_RAW) 229 if (event->attr.type == PERF_TYPE_RAW)
316 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; 230 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
317 231
318 if (amd_is_perfctr_nb_event(&event->hw))
319 return amd_nb_hw_config(event);
320
321 return amd_core_hw_config(event); 232 return amd_core_hw_config(event);
322} 233}
323 234
@@ -341,19 +252,6 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
341 } 252 }
342} 253}
343 254
344static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
345{
346 int core_id = cpu_data(smp_processor_id()).cpu_core_id;
347
348 /* deliver interrupts only to this core */
349 if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) {
350 hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE;
351 hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK;
352 hwc->config |= (u64)(core_id) <<
353 AMD64_EVENTSEL_INT_CORE_SEL_SHIFT;
354 }
355}
356
357 /* 255 /*
358 * AMD64 NorthBridge events need special treatment because 256 * AMD64 NorthBridge events need special treatment because
359 * counter access needs to be synchronized across all cores 257 * counter access needs to be synchronized across all cores
@@ -441,9 +339,6 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
441 if (new == -1) 339 if (new == -1)
442 return &emptyconstraint; 340 return &emptyconstraint;
443 341
444 if (amd_is_perfctr_nb_event(hwc))
445 amd_nb_interrupt_hw_config(hwc);
446
447 return &nb->event_constraints[new]; 342 return &nb->event_constraints[new];
448} 343}
449 344
@@ -543,8 +438,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
543 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) 438 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
544 return &unconstrained; 439 return &unconstrained;
545 440
546 return __amd_get_nb_event_constraints(cpuc, event, 441 return __amd_get_nb_event_constraints(cpuc, event, NULL);
547 amd_nb_event_constraint);
548} 442}
549 443
550static void amd_put_event_constraints(struct cpu_hw_events *cpuc, 444static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
@@ -643,9 +537,6 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09,
643static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); 537static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
644static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); 538static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
645 539
646static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0);
647static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0);
648
649static struct event_constraint * 540static struct event_constraint *
650amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) 541amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
651{ 542{
@@ -711,8 +602,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
711 return &amd_f15_PMC20; 602 return &amd_f15_PMC20;
712 } 603 }
713 case AMD_EVENT_NB: 604 case AMD_EVENT_NB:
714 return __amd_get_nb_event_constraints(cpuc, event, 605 /* moved to perf_event_amd_uncore.c */
715 amd_nb_event_constraint); 606 return &emptyconstraint;
716 default: 607 default:
717 return &emptyconstraint; 608 return &emptyconstraint;
718 } 609 }
@@ -738,7 +629,6 @@ static __initconst const struct x86_pmu amd_pmu = {
738 .eventsel = MSR_K7_EVNTSEL0, 629 .eventsel = MSR_K7_EVNTSEL0,
739 .perfctr = MSR_K7_PERFCTR0, 630 .perfctr = MSR_K7_PERFCTR0,
740 .addr_offset = amd_pmu_addr_offset, 631 .addr_offset = amd_pmu_addr_offset,
741 .rdpmc_index = amd_pmu_rdpmc_index,
742 .event_map = amd_pmu_event_map, 632 .event_map = amd_pmu_event_map,
743 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 633 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
744 .num_counters = AMD64_NUM_COUNTERS, 634 .num_counters = AMD64_NUM_COUNTERS,
@@ -790,23 +680,6 @@ static int setup_perfctr_core(void)
790 return 0; 680 return 0;
791} 681}
792 682
793static int setup_perfctr_nb(void)
794{
795 if (!cpu_has_perfctr_nb)
796 return -ENODEV;
797
798 x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB;
799
800 if (cpu_has_perfctr_core)
801 amd_nb_event_constraint = &amd_NBPMC96;
802 else
803 amd_nb_event_constraint = &amd_NBPMC74;
804
805 printk(KERN_INFO "perf: AMD northbridge performance counters detected\n");
806
807 return 0;
808}
809
810__init int amd_pmu_init(void) 683__init int amd_pmu_init(void)
811{ 684{
812 /* Performance-monitoring supported from K7 and later: */ 685 /* Performance-monitoring supported from K7 and later: */
@@ -817,7 +690,6 @@ __init int amd_pmu_init(void)
817 690
818 setup_event_constraints(); 691 setup_event_constraints();
819 setup_perfctr_core(); 692 setup_perfctr_core();
820 setup_perfctr_nb();
821 693
822 /* Events are common for all AMDs */ 694 /* Events are common for all AMDs */
823 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 695 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
new file mode 100644
index 000000000000..c0c661adf03e
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -0,0 +1,547 @@
1/*
2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
3 *
4 * Author: Jacob Shin <jacob.shin@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/perf_event.h>
12#include <linux/percpu.h>
13#include <linux/types.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/cpu.h>
17#include <linux/cpumask.h>
18
19#include <asm/cpufeature.h>
20#include <asm/perf_event.h>
21#include <asm/msr.h>
22
23#define NUM_COUNTERS_NB 4
24#define NUM_COUNTERS_L2 4
25#define MAX_COUNTERS NUM_COUNTERS_NB
26
27#define RDPMC_BASE_NB 6
28#define RDPMC_BASE_L2 10
29
30#define COUNTER_SHIFT 16
31
32struct amd_uncore {
33 int id;
34 int refcnt;
35 int cpu;
36 int num_counters;
37 int rdpmc_base;
38 u32 msr_base;
39 cpumask_t *active_mask;
40 struct pmu *pmu;
41 struct perf_event *events[MAX_COUNTERS];
42 struct amd_uncore *free_when_cpu_online;
43};
44
45static struct amd_uncore * __percpu *amd_uncore_nb;
46static struct amd_uncore * __percpu *amd_uncore_l2;
47
48static struct pmu amd_nb_pmu;
49static struct pmu amd_l2_pmu;
50
51static cpumask_t amd_nb_active_mask;
52static cpumask_t amd_l2_active_mask;
53
54static bool is_nb_event(struct perf_event *event)
55{
56 return event->pmu->type == amd_nb_pmu.type;
57}
58
59static bool is_l2_event(struct perf_event *event)
60{
61 return event->pmu->type == amd_l2_pmu.type;
62}
63
64static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
65{
66 if (is_nb_event(event) && amd_uncore_nb)
67 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
68 else if (is_l2_event(event) && amd_uncore_l2)
69 return *per_cpu_ptr(amd_uncore_l2, event->cpu);
70
71 return NULL;
72}
73
74static void amd_uncore_read(struct perf_event *event)
75{
76 struct hw_perf_event *hwc = &event->hw;
77 u64 prev, new;
78 s64 delta;
79
80 /*
81 * since we do not enable counter overflow interrupts,
82 * we do not have to worry about prev_count changing on us
83 */
84
85 prev = local64_read(&hwc->prev_count);
86 rdpmcl(hwc->event_base_rdpmc, new);
87 local64_set(&hwc->prev_count, new);
88 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
89 delta >>= COUNTER_SHIFT;
90 local64_add(delta, &event->count);
91}
92
93static void amd_uncore_start(struct perf_event *event, int flags)
94{
95 struct hw_perf_event *hwc = &event->hw;
96
97 if (flags & PERF_EF_RELOAD)
98 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
99
100 hwc->state = 0;
101 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
102 perf_event_update_userpage(event);
103}
104
105static void amd_uncore_stop(struct perf_event *event, int flags)
106{
107 struct hw_perf_event *hwc = &event->hw;
108
109 wrmsrl(hwc->config_base, hwc->config);
110 hwc->state |= PERF_HES_STOPPED;
111
112 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
113 amd_uncore_read(event);
114 hwc->state |= PERF_HES_UPTODATE;
115 }
116}
117
118static int amd_uncore_add(struct perf_event *event, int flags)
119{
120 int i;
121 struct amd_uncore *uncore = event_to_amd_uncore(event);
122 struct hw_perf_event *hwc = &event->hw;
123
124 /* are we already assigned? */
125 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
126 goto out;
127
128 for (i = 0; i < uncore->num_counters; i++) {
129 if (uncore->events[i] == event) {
130 hwc->idx = i;
131 goto out;
132 }
133 }
134
135 /* if not, take the first available counter */
136 hwc->idx = -1;
137 for (i = 0; i < uncore->num_counters; i++) {
138 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
139 hwc->idx = i;
140 break;
141 }
142 }
143
144out:
145 if (hwc->idx == -1)
146 return -EBUSY;
147
148 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
149 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
150 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
151 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
152
153 if (flags & PERF_EF_START)
154 amd_uncore_start(event, PERF_EF_RELOAD);
155
156 return 0;
157}
158
159static void amd_uncore_del(struct perf_event *event, int flags)
160{
161 int i;
162 struct amd_uncore *uncore = event_to_amd_uncore(event);
163 struct hw_perf_event *hwc = &event->hw;
164
165 amd_uncore_stop(event, PERF_EF_UPDATE);
166
167 for (i = 0; i < uncore->num_counters; i++) {
168 if (cmpxchg(&uncore->events[i], event, NULL) == event)
169 break;
170 }
171
172 hwc->idx = -1;
173}
174
175static int amd_uncore_event_init(struct perf_event *event)
176{
177 struct amd_uncore *uncore;
178 struct hw_perf_event *hwc = &event->hw;
179
180 if (event->attr.type != event->pmu->type)
181 return -ENOENT;
182
183 /*
184 * NB and L2 counters (MSRs) are shared across all cores that share the
185 * same NB / L2 cache. Interrupts can be directed to a single target
186 * core, however, event counts generated by processes running on other
187 * cores cannot be masked out. So we do not support sampling and
188 * per-thread events.
189 */
190 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
191 return -EINVAL;
192
193 /* NB and L2 counters do not have usr/os/guest/host bits */
194 if (event->attr.exclude_user || event->attr.exclude_kernel ||
195 event->attr.exclude_host || event->attr.exclude_guest)
196 return -EINVAL;
197
198 /* and we do not enable counter overflow interrupts */
199 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
200 hwc->idx = -1;
201
202 if (event->cpu < 0)
203 return -EINVAL;
204
205 uncore = event_to_amd_uncore(event);
206 if (!uncore)
207 return -ENODEV;
208
209 /*
210 * since request can come in to any of the shared cores, we will remap
211 * to a single common cpu.
212 */
213 event->cpu = uncore->cpu;
214
215 return 0;
216}
217
218static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
219 struct device_attribute *attr,
220 char *buf)
221{
222 int n;
223 cpumask_t *active_mask;
224 struct pmu *pmu = dev_get_drvdata(dev);
225
226 if (pmu->type == amd_nb_pmu.type)
227 active_mask = &amd_nb_active_mask;
228 else if (pmu->type == amd_l2_pmu.type)
229 active_mask = &amd_l2_active_mask;
230 else
231 return 0;
232
233 n = cpulist_scnprintf(buf, PAGE_SIZE - 2, active_mask);
234 buf[n++] = '\n';
235 buf[n] = '\0';
236 return n;
237}
238static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
239
240static struct attribute *amd_uncore_attrs[] = {
241 &dev_attr_cpumask.attr,
242 NULL,
243};
244
245static struct attribute_group amd_uncore_attr_group = {
246 .attrs = amd_uncore_attrs,
247};
248
249PMU_FORMAT_ATTR(event, "config:0-7,32-35");
250PMU_FORMAT_ATTR(umask, "config:8-15");
251
252static struct attribute *amd_uncore_format_attr[] = {
253 &format_attr_event.attr,
254 &format_attr_umask.attr,
255 NULL,
256};
257
258static struct attribute_group amd_uncore_format_group = {
259 .name = "format",
260 .attrs = amd_uncore_format_attr,
261};
262
263static const struct attribute_group *amd_uncore_attr_groups[] = {
264 &amd_uncore_attr_group,
265 &amd_uncore_format_group,
266 NULL,
267};
268
269static struct pmu amd_nb_pmu = {
270 .attr_groups = amd_uncore_attr_groups,
271 .name = "amd_nb",
272 .event_init = amd_uncore_event_init,
273 .add = amd_uncore_add,
274 .del = amd_uncore_del,
275 .start = amd_uncore_start,
276 .stop = amd_uncore_stop,
277 .read = amd_uncore_read,
278};
279
280static struct pmu amd_l2_pmu = {
281 .attr_groups = amd_uncore_attr_groups,
282 .name = "amd_l2",
283 .event_init = amd_uncore_event_init,
284 .add = amd_uncore_add,
285 .del = amd_uncore_del,
286 .start = amd_uncore_start,
287 .stop = amd_uncore_stop,
288 .read = amd_uncore_read,
289};
290
291static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu)
292{
293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
294 cpu_to_node(cpu));
295}
296
297static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
298{
299 struct amd_uncore *uncore;
300
301 if (amd_uncore_nb) {
302 uncore = amd_uncore_alloc(cpu);
303 uncore->cpu = cpu;
304 uncore->num_counters = NUM_COUNTERS_NB;
305 uncore->rdpmc_base = RDPMC_BASE_NB;
306 uncore->msr_base = MSR_F15H_NB_PERF_CTL;
307 uncore->active_mask = &amd_nb_active_mask;
308 uncore->pmu = &amd_nb_pmu;
309 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
310 }
311
312 if (amd_uncore_l2) {
313 uncore = amd_uncore_alloc(cpu);
314 uncore->cpu = cpu;
315 uncore->num_counters = NUM_COUNTERS_L2;
316 uncore->rdpmc_base = RDPMC_BASE_L2;
317 uncore->msr_base = MSR_F16H_L2I_PERF_CTL;
318 uncore->active_mask = &amd_l2_active_mask;
319 uncore->pmu = &amd_l2_pmu;
320 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
321 }
322}
323
324static struct amd_uncore *
325__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
326 struct amd_uncore * __percpu *uncores)
327{
328 unsigned int cpu;
329 struct amd_uncore *that;
330
331 for_each_online_cpu(cpu) {
332 that = *per_cpu_ptr(uncores, cpu);
333
334 if (!that)
335 continue;
336
337 if (this == that)
338 continue;
339
340 if (this->id == that->id) {
341 that->free_when_cpu_online = this;
342 this = that;
343 break;
344 }
345 }
346
347 this->refcnt++;
348 return this;
349}
350
351static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
352{
353 unsigned int eax, ebx, ecx, edx;
354 struct amd_uncore *uncore;
355
356 if (amd_uncore_nb) {
357 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
358 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
359 uncore->id = ecx & 0xff;
360
361 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
362 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
363 }
364
365 if (amd_uncore_l2) {
366 unsigned int apicid = cpu_data(cpu).apicid;
367 unsigned int nshared;
368
369 uncore = *per_cpu_ptr(amd_uncore_l2, cpu);
370 cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
371 nshared = ((eax >> 14) & 0xfff) + 1;
372 uncore->id = apicid - (apicid % nshared);
373
374 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
375 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
376 }
377}
378
379static void __cpuinit uncore_online(unsigned int cpu,
380 struct amd_uncore * __percpu *uncores)
381{
382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
383
384 kfree(uncore->free_when_cpu_online);
385 uncore->free_when_cpu_online = NULL;
386
387 if (cpu == uncore->cpu)
388 cpumask_set_cpu(cpu, uncore->active_mask);
389}
390
391static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
392{
393 if (amd_uncore_nb)
394 uncore_online(cpu, amd_uncore_nb);
395
396 if (amd_uncore_l2)
397 uncore_online(cpu, amd_uncore_l2);
398}
399
400static void __cpuinit uncore_down_prepare(unsigned int cpu,
401 struct amd_uncore * __percpu *uncores)
402{
403 unsigned int i;
404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
405
406 if (this->cpu != cpu)
407 return;
408
409 /* this cpu is going down, migrate to a shared sibling if possible */
410 for_each_online_cpu(i) {
411 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
412
413 if (cpu == i)
414 continue;
415
416 if (this == that) {
417 perf_pmu_migrate_context(this->pmu, cpu, i);
418 cpumask_clear_cpu(cpu, that->active_mask);
419 cpumask_set_cpu(i, that->active_mask);
420 that->cpu = i;
421 break;
422 }
423 }
424}
425
426static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
427{
428 if (amd_uncore_nb)
429 uncore_down_prepare(cpu, amd_uncore_nb);
430
431 if (amd_uncore_l2)
432 uncore_down_prepare(cpu, amd_uncore_l2);
433}
434
435static void __cpuinit uncore_dead(unsigned int cpu,
436 struct amd_uncore * __percpu *uncores)
437{
438 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
439
440 if (cpu == uncore->cpu)
441 cpumask_clear_cpu(cpu, uncore->active_mask);
442
443 if (!--uncore->refcnt)
444 kfree(uncore);
445 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
446}
447
448static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
449{
450 if (amd_uncore_nb)
451 uncore_dead(cpu, amd_uncore_nb);
452
453 if (amd_uncore_l2)
454 uncore_dead(cpu, amd_uncore_l2);
455}
456
457static int __cpuinit
458amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
459 void *hcpu)
460{
461 unsigned int cpu = (long)hcpu;
462
463 switch (action & ~CPU_TASKS_FROZEN) {
464 case CPU_UP_PREPARE:
465 amd_uncore_cpu_up_prepare(cpu);
466 break;
467
468 case CPU_STARTING:
469 amd_uncore_cpu_starting(cpu);
470 break;
471
472 case CPU_ONLINE:
473 amd_uncore_cpu_online(cpu);
474 break;
475
476 case CPU_DOWN_PREPARE:
477 amd_uncore_cpu_down_prepare(cpu);
478 break;
479
480 case CPU_UP_CANCELED:
481 case CPU_DEAD:
482 amd_uncore_cpu_dead(cpu);
483 break;
484
485 default:
486 break;
487 }
488
489 return NOTIFY_OK;
490}
491
492static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = {
493 .notifier_call = amd_uncore_cpu_notifier,
494 .priority = CPU_PRI_PERF + 1,
495};
496
497static void __init init_cpu_already_online(void *dummy)
498{
499 unsigned int cpu = smp_processor_id();
500
501 amd_uncore_cpu_starting(cpu);
502 amd_uncore_cpu_online(cpu);
503}
504
505static int __init amd_uncore_init(void)
506{
507 unsigned int cpu;
508 int ret = -ENODEV;
509
510 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
511 return -ENODEV;
512
513 if (!cpu_has_topoext)
514 return -ENODEV;
515
516 if (cpu_has_perfctr_nb) {
517 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
518 perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
519
520 printk(KERN_INFO "perf: AMD NB counters detected\n");
521 ret = 0;
522 }
523
524 if (cpu_has_perfctr_l2) {
525 amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
526 perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
527
528 printk(KERN_INFO "perf: AMD L2I counters detected\n");
529 ret = 0;
530 }
531
532 if (ret)
533 return -ENODEV;
534
535 get_online_cpus();
536 /* init cpus already online before registering for hotplug notifier */
537 for_each_online_cpu(cpu) {
538 amd_uncore_cpu_up_prepare(cpu);
539 smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
540 }
541
542 register_cpu_notifier(&amd_uncore_cpu_notifier_block);
543 put_online_cpus();
544
545 return 0;
546}
547device_initcall(amd_uncore_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index cc45deb791b0..ffd6050a1de4 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -81,6 +81,7 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
81static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = 81static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
82{ 82{
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
84 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
84 EVENT_EXTRA_END 85 EVENT_EXTRA_END
85}; 86};
86 87
@@ -108,6 +109,8 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
108 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ 109 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 110 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
110 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 111 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
112 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
113 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
111 EVENT_CONSTRAINT_END 114 EVENT_CONSTRAINT_END
112}; 115};
113 116
@@ -136,6 +139,7 @@ static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
136{ 139{
137 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 140 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
138 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), 141 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
142 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
139 EVENT_EXTRA_END 143 EVENT_EXTRA_END
140}; 144};
141 145
@@ -155,6 +159,8 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
155static struct extra_reg intel_snb_extra_regs[] __read_mostly = { 159static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
156 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), 160 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
157 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), 161 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
162 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
163 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
158 EVENT_EXTRA_END 164 EVENT_EXTRA_END
159}; 165};
160 166
@@ -164,6 +170,21 @@ static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
164 EVENT_EXTRA_END 170 EVENT_EXTRA_END
165}; 171};
166 172
173EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
174EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
175EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
176
177struct attribute *nhm_events_attrs[] = {
178 EVENT_PTR(mem_ld_nhm),
179 NULL,
180};
181
182struct attribute *snb_events_attrs[] = {
183 EVENT_PTR(mem_ld_snb),
184 EVENT_PTR(mem_st_snb),
185 NULL,
186};
187
167static u64 intel_pmu_event_map(int hw_event) 188static u64 intel_pmu_event_map(int hw_event)
168{ 189{
169 return intel_perfmon_event_map[hw_event]; 190 return intel_perfmon_event_map[hw_event];
@@ -1398,8 +1419,11 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1398 1419
1399 if (x86_pmu.event_constraints) { 1420 if (x86_pmu.event_constraints) {
1400 for_each_event_constraint(c, x86_pmu.event_constraints) { 1421 for_each_event_constraint(c, x86_pmu.event_constraints) {
1401 if ((event->hw.config & c->cmask) == c->code) 1422 if ((event->hw.config & c->cmask) == c->code) {
1423 /* hw.flags zeroed at initialization */
1424 event->hw.flags |= c->flags;
1402 return c; 1425 return c;
1426 }
1403 } 1427 }
1404 } 1428 }
1405 1429
@@ -1444,6 +1468,7 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1444static void intel_put_event_constraints(struct cpu_hw_events *cpuc, 1468static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1445 struct perf_event *event) 1469 struct perf_event *event)
1446{ 1470{
1471 event->hw.flags = 0;
1447 intel_put_shared_regs_event_constraints(cpuc, event); 1472 intel_put_shared_regs_event_constraints(cpuc, event);
1448} 1473}
1449 1474
@@ -1767,6 +1792,8 @@ static void intel_pmu_flush_branch_stack(void)
1767 1792
1768PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 1793PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
1769 1794
1795PMU_FORMAT_ATTR(ldlat, "config1:0-15");
1796
1770static struct attribute *intel_arch3_formats_attr[] = { 1797static struct attribute *intel_arch3_formats_attr[] = {
1771 &format_attr_event.attr, 1798 &format_attr_event.attr,
1772 &format_attr_umask.attr, 1799 &format_attr_umask.attr,
@@ -1777,6 +1804,7 @@ static struct attribute *intel_arch3_formats_attr[] = {
1777 &format_attr_cmask.attr, 1804 &format_attr_cmask.attr,
1778 1805
1779 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */ 1806 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
1807 &format_attr_ldlat.attr, /* PEBS load latency */
1780 NULL, 1808 NULL,
1781}; 1809};
1782 1810
@@ -2037,6 +2065,8 @@ __init int intel_pmu_init(void)
2037 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 2065 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2038 x86_pmu.extra_regs = intel_nehalem_extra_regs; 2066 x86_pmu.extra_regs = intel_nehalem_extra_regs;
2039 2067
2068 x86_pmu.cpu_events = nhm_events_attrs;
2069
2040 /* UOPS_ISSUED.STALLED_CYCLES */ 2070 /* UOPS_ISSUED.STALLED_CYCLES */
2041 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 2071 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2042 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 2072 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
@@ -2080,6 +2110,8 @@ __init int intel_pmu_init(void)
2080 x86_pmu.extra_regs = intel_westmere_extra_regs; 2110 x86_pmu.extra_regs = intel_westmere_extra_regs;
2081 x86_pmu.er_flags |= ERF_HAS_RSP_1; 2111 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2082 2112
2113 x86_pmu.cpu_events = nhm_events_attrs;
2114
2083 /* UOPS_ISSUED.STALLED_CYCLES */ 2115 /* UOPS_ISSUED.STALLED_CYCLES */
2084 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 2116 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2085 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 2117 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
@@ -2111,6 +2143,8 @@ __init int intel_pmu_init(void)
2111 x86_pmu.er_flags |= ERF_HAS_RSP_1; 2143 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2112 x86_pmu.er_flags |= ERF_NO_HT_SHARING; 2144 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2113 2145
2146 x86_pmu.cpu_events = snb_events_attrs;
2147
2114 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 2148 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2115 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 2149 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2116 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 2150 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
@@ -2140,6 +2174,8 @@ __init int intel_pmu_init(void)
2140 x86_pmu.er_flags |= ERF_HAS_RSP_1; 2174 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2141 x86_pmu.er_flags |= ERF_NO_HT_SHARING; 2175 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2142 2176
2177 x86_pmu.cpu_events = snb_events_attrs;
2178
2143 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 2179 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2144 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 2180 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2145 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 2181 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 26830f3af0df..60250f687052 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -24,6 +24,130 @@ struct pebs_record_32 {
24 24
25 */ 25 */
26 26
27union intel_x86_pebs_dse {
28 u64 val;
29 struct {
30 unsigned int ld_dse:4;
31 unsigned int ld_stlb_miss:1;
32 unsigned int ld_locked:1;
33 unsigned int ld_reserved:26;
34 };
35 struct {
36 unsigned int st_l1d_hit:1;
37 unsigned int st_reserved1:3;
38 unsigned int st_stlb_miss:1;
39 unsigned int st_locked:1;
40 unsigned int st_reserved2:26;
41 };
42};
43
44
45/*
46 * Map PEBS Load Latency Data Source encodings to generic
47 * memory data source information
48 */
49#define P(a, b) PERF_MEM_S(a, b)
50#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
51#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
52
53static const u64 pebs_data_source[] = {
54 P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
55 OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */
56 OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
57 OP_LH | P(LVL, L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
58 OP_LH | P(LVL, L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
59 OP_LH | P(LVL, L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
60 OP_LH | P(LVL, L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
61 OP_LH | P(LVL, L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
62 OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
63 OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
64 OP_LH | P(LVL, LOC_RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
65 OP_LH | P(LVL, REM_RAM1) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
66 OP_LH | P(LVL, LOC_RAM) | SNOOP_NONE_MISS,/* 0x0c: L3 miss, excl */
67 OP_LH | P(LVL, REM_RAM1) | SNOOP_NONE_MISS,/* 0x0d: L3 miss, excl */
68 OP_LH | P(LVL, IO) | P(SNOOP, NONE), /* 0x0e: I/O */
69 OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
70};
71
72static u64 precise_store_data(u64 status)
73{
74 union intel_x86_pebs_dse dse;
75 u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
76
77 dse.val = status;
78
79 /*
80 * bit 4: TLB access
81 * 1 = stored missed 2nd level TLB
82 *
83 * so it either hit the walker or the OS
84 * otherwise hit 2nd level TLB
85 */
86 if (dse.st_stlb_miss)
87 val |= P(TLB, MISS);
88 else
89 val |= P(TLB, HIT);
90
91 /*
92 * bit 0: hit L1 data cache
93 * if not set, then all we know is that
94 * it missed L1D
95 */
96 if (dse.st_l1d_hit)
97 val |= P(LVL, HIT);
98 else
99 val |= P(LVL, MISS);
100
101 /*
102 * bit 5: Locked prefix
103 */
104 if (dse.st_locked)
105 val |= P(LOCK, LOCKED);
106
107 return val;
108}
109
110static u64 load_latency_data(u64 status)
111{
112 union intel_x86_pebs_dse dse;
113 u64 val;
114 int model = boot_cpu_data.x86_model;
115 int fam = boot_cpu_data.x86;
116
117 dse.val = status;
118
119 /*
120 * use the mapping table for bit 0-3
121 */
122 val = pebs_data_source[dse.ld_dse];
123
124 /*
125 * Nehalem models do not support TLB, Lock infos
126 */
127 if (fam == 0x6 && (model == 26 || model == 30
128 || model == 31 || model == 46)) {
129 val |= P(TLB, NA) | P(LOCK, NA);
130 return val;
131 }
132 /*
133 * bit 4: TLB access
134 * 0 = did not miss 2nd level TLB
135 * 1 = missed 2nd level TLB
136 */
137 if (dse.ld_stlb_miss)
138 val |= P(TLB, MISS) | P(TLB, L2);
139 else
140 val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
141
142 /*
143 * bit 5: locked prefix
144 */
145 if (dse.ld_locked)
146 val |= P(LOCK, LOCKED);
147
148 return val;
149}
150
27struct pebs_record_core { 151struct pebs_record_core {
28 u64 flags, ip; 152 u64 flags, ip;
29 u64 ax, bx, cx, dx; 153 u64 ax, bx, cx, dx;
@@ -365,7 +489,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
365}; 489};
366 490
367struct event_constraint intel_nehalem_pebs_event_constraints[] = { 491struct event_constraint intel_nehalem_pebs_event_constraints[] = {
368 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ 492 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
369 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 493 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
370 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 494 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
371 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ 495 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
@@ -380,7 +504,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
380}; 504};
381 505
382struct event_constraint intel_westmere_pebs_event_constraints[] = { 506struct event_constraint intel_westmere_pebs_event_constraints[] = {
383 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ 507 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
384 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 508 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
385 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 509 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
386 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ 510 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
@@ -400,7 +524,8 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
400 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ 524 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
401 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ 525 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
402 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ 526 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
403 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ 527 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
528 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
404 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 529 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
405 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 530 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
406 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 531 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -414,7 +539,8 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
414 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ 539 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
415 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ 540 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
416 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ 541 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
417 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ 542 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
543 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
418 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 544 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
419 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 545 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
420 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 546 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -431,8 +557,10 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
431 557
432 if (x86_pmu.pebs_constraints) { 558 if (x86_pmu.pebs_constraints) {
433 for_each_event_constraint(c, x86_pmu.pebs_constraints) { 559 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
434 if ((event->hw.config & c->cmask) == c->code) 560 if ((event->hw.config & c->cmask) == c->code) {
561 event->hw.flags |= c->flags;
435 return c; 562 return c;
563 }
436 } 564 }
437 } 565 }
438 566
@@ -447,6 +575,11 @@ void intel_pmu_pebs_enable(struct perf_event *event)
447 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; 575 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
448 576
449 cpuc->pebs_enabled |= 1ULL << hwc->idx; 577 cpuc->pebs_enabled |= 1ULL << hwc->idx;
578
579 if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
580 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
581 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
582 cpuc->pebs_enabled |= 1ULL << 63;
450} 583}
451 584
452void intel_pmu_pebs_disable(struct perf_event *event) 585void intel_pmu_pebs_disable(struct perf_event *event)
@@ -559,20 +692,51 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
559 struct pt_regs *iregs, void *__pebs) 692 struct pt_regs *iregs, void *__pebs)
560{ 693{
561 /* 694 /*
562 * We cast to pebs_record_core since that is a subset of 695 * We cast to pebs_record_nhm to get the load latency data
563 * both formats and we don't use the other fields in this 696 * if extra_reg MSR_PEBS_LD_LAT_THRESHOLD used
564 * routine.
565 */ 697 */
566 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 698 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
567 struct pebs_record_core *pebs = __pebs; 699 struct pebs_record_nhm *pebs = __pebs;
568 struct perf_sample_data data; 700 struct perf_sample_data data;
569 struct pt_regs regs; 701 struct pt_regs regs;
702 u64 sample_type;
703 int fll, fst;
570 704
571 if (!intel_pmu_save_and_restart(event)) 705 if (!intel_pmu_save_and_restart(event))
572 return; 706 return;
573 707
708 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
709 fst = event->hw.flags & PERF_X86_EVENT_PEBS_ST;
710
574 perf_sample_data_init(&data, 0, event->hw.last_period); 711 perf_sample_data_init(&data, 0, event->hw.last_period);
575 712
713 data.period = event->hw.last_period;
714 sample_type = event->attr.sample_type;
715
716 /*
717 * if PEBS-LL or PreciseStore
718 */
719 if (fll || fst) {
720 if (sample_type & PERF_SAMPLE_ADDR)
721 data.addr = pebs->dla;
722
723 /*
724 * Use latency for weight (only avail with PEBS-LL)
725 */
726 if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
727 data.weight = pebs->lat;
728
729 /*
730 * data.data_src encodes the data source
731 */
732 if (sample_type & PERF_SAMPLE_DATA_SRC) {
733 if (fll)
734 data.data_src.val = load_latency_data(pebs->dse);
735 else
736 data.data_src.val = precise_store_data(pebs->dse);
737 }
738 }
739
576 /* 740 /*
577 * We use the interrupt regs as a base because the PEBS record 741 * We use the interrupt regs as a base because the PEBS record
578 * does not contain a full regs set, specifically it seems to 742 * does not contain a full regs set, specifically it seems to
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index b43200dbfe7e..d0f9e5aa2151 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -17,6 +17,9 @@ static struct event_constraint constraint_fixed =
17static struct event_constraint constraint_empty = 17static struct event_constraint constraint_empty =
18 EVENT_CONSTRAINT(0, 0, 0); 18 EVENT_CONSTRAINT(0, 0, 0);
19 19
20#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
21 ((1ULL << (n)) - 1)))
22
20DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 23DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
21DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); 24DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
22DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 25DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
@@ -31,9 +34,13 @@ DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
31DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); 34DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
32DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); 35DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
33DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); 36DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
37DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
34DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); 38DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
39DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
35DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); 40DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
41DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
36DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); 42DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
43DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
37DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); 44DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
38DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); 45DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
39DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); 46DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
@@ -110,6 +117,21 @@ static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_even
110 reg1->alloc = 0; 117 reg1->alloc = 0;
111} 118}
112 119
120static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
121{
122 struct intel_uncore_extra_reg *er;
123 unsigned long flags;
124 u64 config;
125
126 er = &box->shared_regs[idx];
127
128 raw_spin_lock_irqsave(&er->lock, flags);
129 config = er->config;
130 raw_spin_unlock_irqrestore(&er->lock, flags);
131
132 return config;
133}
134
113/* Sandy Bridge-EP uncore support */ 135/* Sandy Bridge-EP uncore support */
114static struct intel_uncore_type snbep_uncore_cbox; 136static struct intel_uncore_type snbep_uncore_cbox;
115static struct intel_uncore_type snbep_uncore_pcu; 137static struct intel_uncore_type snbep_uncore_pcu;
@@ -205,7 +227,7 @@ static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
205 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 227 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
206 228
207 if (reg1->idx != EXTRA_REG_NONE) 229 if (reg1->idx != EXTRA_REG_NONE)
208 wrmsrl(reg1->reg, reg1->config); 230 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
209 231
210 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 232 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
211} 233}
@@ -226,29 +248,6 @@ static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
226 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); 248 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
227} 249}
228 250
229static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
230{
231 struct hw_perf_event *hwc = &event->hw;
232 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
233
234 if (box->pmu->type == &snbep_uncore_cbox) {
235 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
236 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
237 reg1->config = event->attr.config1 &
238 SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
239 } else {
240 if (box->pmu->type == &snbep_uncore_pcu) {
241 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
242 reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
243 } else {
244 return 0;
245 }
246 }
247 reg1->idx = 0;
248
249 return 0;
250}
251
252static struct attribute *snbep_uncore_formats_attr[] = { 251static struct attribute *snbep_uncore_formats_attr[] = {
253 &format_attr_event.attr, 252 &format_attr_event.attr,
254 &format_attr_umask.attr, 253 &format_attr_umask.attr,
@@ -345,16 +344,16 @@ static struct attribute_group snbep_uncore_qpi_format_group = {
345 .attrs = snbep_uncore_qpi_formats_attr, 344 .attrs = snbep_uncore_qpi_formats_attr,
346}; 345};
347 346
347#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
348 .init_box = snbep_uncore_msr_init_box, \
349 .disable_box = snbep_uncore_msr_disable_box, \
350 .enable_box = snbep_uncore_msr_enable_box, \
351 .disable_event = snbep_uncore_msr_disable_event, \
352 .enable_event = snbep_uncore_msr_enable_event, \
353 .read_counter = uncore_msr_read_counter
354
348static struct intel_uncore_ops snbep_uncore_msr_ops = { 355static struct intel_uncore_ops snbep_uncore_msr_ops = {
349 .init_box = snbep_uncore_msr_init_box, 356 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
350 .disable_box = snbep_uncore_msr_disable_box,
351 .enable_box = snbep_uncore_msr_enable_box,
352 .disable_event = snbep_uncore_msr_disable_event,
353 .enable_event = snbep_uncore_msr_enable_event,
354 .read_counter = uncore_msr_read_counter,
355 .get_constraint = uncore_get_constraint,
356 .put_constraint = uncore_put_constraint,
357 .hw_config = snbep_uncore_hw_config,
358}; 357};
359 358
360static struct intel_uncore_ops snbep_uncore_pci_ops = { 359static struct intel_uncore_ops snbep_uncore_pci_ops = {
@@ -372,6 +371,7 @@ static struct event_constraint snbep_uncore_cbox_constraints[] = {
372 UNCORE_EVENT_CONSTRAINT(0x04, 0x3), 371 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
373 UNCORE_EVENT_CONSTRAINT(0x05, 0x3), 372 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
374 UNCORE_EVENT_CONSTRAINT(0x07, 0x3), 373 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
374 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
375 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 375 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
376 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 376 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
377 UNCORE_EVENT_CONSTRAINT(0x13, 0x3), 377 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
@@ -421,6 +421,14 @@ static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
421 UNCORE_EVENT_CONSTRAINT(0x24, 0x3), 421 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
422 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 422 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
423 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 423 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
424 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
427 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
428 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
429 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
430 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
431 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
424 UNCORE_EVENT_CONSTRAINT(0x30, 0x3), 432 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 433 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 434 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
@@ -428,6 +436,8 @@ static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
428 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 436 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
429 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 437 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
430 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 438 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
439 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
440 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
431 EVENT_CONSTRAINT_END 441 EVENT_CONSTRAINT_END
432}; 442};
433 443
@@ -446,6 +456,145 @@ static struct intel_uncore_type snbep_uncore_ubox = {
446 .format_group = &snbep_uncore_ubox_format_group, 456 .format_group = &snbep_uncore_ubox_format_group,
447}; 457};
448 458
459static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
460 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
461 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
462 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
463 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
464 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
465 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
466 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
467 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
468 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
469 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
470 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
471 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
472 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
473 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
474 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
475 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
476 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
477 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
478 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
479 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
480 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
481 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
482 EVENT_EXTRA_END
483};
484
485static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
486{
487 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
488 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
489 int i;
490
491 if (uncore_box_is_fake(box))
492 return;
493
494 for (i = 0; i < 5; i++) {
495 if (reg1->alloc & (0x1 << i))
496 atomic_sub(1 << (i * 6), &er->ref);
497 }
498 reg1->alloc = 0;
499}
500
501static struct event_constraint *
502__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
503 u64 (*cbox_filter_mask)(int fields))
504{
505 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
506 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
507 int i, alloc = 0;
508 unsigned long flags;
509 u64 mask;
510
511 if (reg1->idx == EXTRA_REG_NONE)
512 return NULL;
513
514 raw_spin_lock_irqsave(&er->lock, flags);
515 for (i = 0; i < 5; i++) {
516 if (!(reg1->idx & (0x1 << i)))
517 continue;
518 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
519 continue;
520
521 mask = cbox_filter_mask(0x1 << i);
522 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
523 !((reg1->config ^ er->config) & mask)) {
524 atomic_add(1 << (i * 6), &er->ref);
525 er->config &= ~mask;
526 er->config |= reg1->config & mask;
527 alloc |= (0x1 << i);
528 } else {
529 break;
530 }
531 }
532 raw_spin_unlock_irqrestore(&er->lock, flags);
533 if (i < 5)
534 goto fail;
535
536 if (!uncore_box_is_fake(box))
537 reg1->alloc |= alloc;
538
539 return 0;
540fail:
541 for (; i >= 0; i--) {
542 if (alloc & (0x1 << i))
543 atomic_sub(1 << (i * 6), &er->ref);
544 }
545 return &constraint_empty;
546}
547
548static u64 snbep_cbox_filter_mask(int fields)
549{
550 u64 mask = 0;
551
552 if (fields & 0x1)
553 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
554 if (fields & 0x2)
555 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
556 if (fields & 0x4)
557 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
558 if (fields & 0x8)
559 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
560
561 return mask;
562}
563
564static struct event_constraint *
565snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
566{
567 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
568}
569
570static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
571{
572 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
573 struct extra_reg *er;
574 int idx = 0;
575
576 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
577 if (er->event != (event->hw.config & er->config_mask))
578 continue;
579 idx |= er->idx;
580 }
581
582 if (idx) {
583 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
584 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
585 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
586 reg1->idx = idx;
587 }
588 return 0;
589}
590
591static struct intel_uncore_ops snbep_uncore_cbox_ops = {
592 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
593 .hw_config = snbep_cbox_hw_config,
594 .get_constraint = snbep_cbox_get_constraint,
595 .put_constraint = snbep_cbox_put_constraint,
596};
597
449static struct intel_uncore_type snbep_uncore_cbox = { 598static struct intel_uncore_type snbep_uncore_cbox = {
450 .name = "cbox", 599 .name = "cbox",
451 .num_counters = 4, 600 .num_counters = 4,
@@ -458,10 +607,104 @@ static struct intel_uncore_type snbep_uncore_cbox = {
458 .msr_offset = SNBEP_CBO_MSR_OFFSET, 607 .msr_offset = SNBEP_CBO_MSR_OFFSET,
459 .num_shared_regs = 1, 608 .num_shared_regs = 1,
460 .constraints = snbep_uncore_cbox_constraints, 609 .constraints = snbep_uncore_cbox_constraints,
461 .ops = &snbep_uncore_msr_ops, 610 .ops = &snbep_uncore_cbox_ops,
462 .format_group = &snbep_uncore_cbox_format_group, 611 .format_group = &snbep_uncore_cbox_format_group,
463}; 612};
464 613
614static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
615{
616 struct hw_perf_event *hwc = &event->hw;
617 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
618 u64 config = reg1->config;
619
620 if (new_idx > reg1->idx)
621 config <<= 8 * (new_idx - reg1->idx);
622 else
623 config >>= 8 * (reg1->idx - new_idx);
624
625 if (modify) {
626 hwc->config += new_idx - reg1->idx;
627 reg1->config = config;
628 reg1->idx = new_idx;
629 }
630 return config;
631}
632
633static struct event_constraint *
634snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
635{
636 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
637 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
638 unsigned long flags;
639 int idx = reg1->idx;
640 u64 mask, config1 = reg1->config;
641 bool ok = false;
642
643 if (reg1->idx == EXTRA_REG_NONE ||
644 (!uncore_box_is_fake(box) && reg1->alloc))
645 return NULL;
646again:
647 mask = 0xff << (idx * 8);
648 raw_spin_lock_irqsave(&er->lock, flags);
649 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
650 !((config1 ^ er->config) & mask)) {
651 atomic_add(1 << (idx * 8), &er->ref);
652 er->config &= ~mask;
653 er->config |= config1 & mask;
654 ok = true;
655 }
656 raw_spin_unlock_irqrestore(&er->lock, flags);
657
658 if (!ok) {
659 idx = (idx + 1) % 4;
660 if (idx != reg1->idx) {
661 config1 = snbep_pcu_alter_er(event, idx, false);
662 goto again;
663 }
664 return &constraint_empty;
665 }
666
667 if (!uncore_box_is_fake(box)) {
668 if (idx != reg1->idx)
669 snbep_pcu_alter_er(event, idx, true);
670 reg1->alloc = 1;
671 }
672 return NULL;
673}
674
675static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
676{
677 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
678 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
679
680 if (uncore_box_is_fake(box) || !reg1->alloc)
681 return;
682
683 atomic_sub(1 << (reg1->idx * 8), &er->ref);
684 reg1->alloc = 0;
685}
686
687static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
688{
689 struct hw_perf_event *hwc = &event->hw;
690 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
691 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
692
693 if (ev_sel >= 0xb && ev_sel <= 0xe) {
694 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
695 reg1->idx = ev_sel - 0xb;
696 reg1->config = event->attr.config1 & (0xff << reg1->idx);
697 }
698 return 0;
699}
700
701static struct intel_uncore_ops snbep_uncore_pcu_ops = {
702 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
703 .hw_config = snbep_pcu_hw_config,
704 .get_constraint = snbep_pcu_get_constraint,
705 .put_constraint = snbep_pcu_put_constraint,
706};
707
465static struct intel_uncore_type snbep_uncore_pcu = { 708static struct intel_uncore_type snbep_uncore_pcu = {
466 .name = "pcu", 709 .name = "pcu",
467 .num_counters = 4, 710 .num_counters = 4,
@@ -472,7 +715,7 @@ static struct intel_uncore_type snbep_uncore_pcu = {
472 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 715 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
473 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, 716 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
474 .num_shared_regs = 1, 717 .num_shared_regs = 1,
475 .ops = &snbep_uncore_msr_ops, 718 .ops = &snbep_uncore_pcu_ops,
476 .format_group = &snbep_uncore_pcu_format_group, 719 .format_group = &snbep_uncore_pcu_format_group,
477}; 720};
478 721
@@ -544,55 +787,63 @@ static struct intel_uncore_type snbep_uncore_r3qpi = {
544 SNBEP_UNCORE_PCI_COMMON_INIT(), 787 SNBEP_UNCORE_PCI_COMMON_INIT(),
545}; 788};
546 789
790enum {
791 SNBEP_PCI_UNCORE_HA,
792 SNBEP_PCI_UNCORE_IMC,
793 SNBEP_PCI_UNCORE_QPI,
794 SNBEP_PCI_UNCORE_R2PCIE,
795 SNBEP_PCI_UNCORE_R3QPI,
796};
797
547static struct intel_uncore_type *snbep_pci_uncores[] = { 798static struct intel_uncore_type *snbep_pci_uncores[] = {
548 &snbep_uncore_ha, 799 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
549 &snbep_uncore_imc, 800 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
550 &snbep_uncore_qpi, 801 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
551 &snbep_uncore_r2pcie, 802 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
552 &snbep_uncore_r3qpi, 803 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
553 NULL, 804 NULL,
554}; 805};
555 806
556static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = { 807static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
557 { /* Home Agent */ 808 { /* Home Agent */
558 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), 809 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
559 .driver_data = (unsigned long)&snbep_uncore_ha, 810 .driver_data = SNBEP_PCI_UNCORE_HA,
560 }, 811 },
561 { /* MC Channel 0 */ 812 { /* MC Channel 0 */
562 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), 813 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
563 .driver_data = (unsigned long)&snbep_uncore_imc, 814 .driver_data = SNBEP_PCI_UNCORE_IMC,
564 }, 815 },
565 { /* MC Channel 1 */ 816 { /* MC Channel 1 */
566 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), 817 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
567 .driver_data = (unsigned long)&snbep_uncore_imc, 818 .driver_data = SNBEP_PCI_UNCORE_IMC,
568 }, 819 },
569 { /* MC Channel 2 */ 820 { /* MC Channel 2 */
570 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), 821 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
571 .driver_data = (unsigned long)&snbep_uncore_imc, 822 .driver_data = SNBEP_PCI_UNCORE_IMC,
572 }, 823 },
573 { /* MC Channel 3 */ 824 { /* MC Channel 3 */
574 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), 825 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
575 .driver_data = (unsigned long)&snbep_uncore_imc, 826 .driver_data = SNBEP_PCI_UNCORE_IMC,
576 }, 827 },
577 { /* QPI Port 0 */ 828 { /* QPI Port 0 */
578 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), 829 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
579 .driver_data = (unsigned long)&snbep_uncore_qpi, 830 .driver_data = SNBEP_PCI_UNCORE_QPI,
580 }, 831 },
581 { /* QPI Port 1 */ 832 { /* QPI Port 1 */
582 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), 833 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
583 .driver_data = (unsigned long)&snbep_uncore_qpi, 834 .driver_data = SNBEP_PCI_UNCORE_QPI,
584 }, 835 },
585 { /* P2PCIe */ 836 { /* R2PCIe */
586 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), 837 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
587 .driver_data = (unsigned long)&snbep_uncore_r2pcie, 838 .driver_data = SNBEP_PCI_UNCORE_R2PCIE,
588 }, 839 },
589 { /* R3QPI Link 0 */ 840 { /* R3QPI Link 0 */
590 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), 841 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
591 .driver_data = (unsigned long)&snbep_uncore_r3qpi, 842 .driver_data = SNBEP_PCI_UNCORE_R3QPI,
592 }, 843 },
593 { /* R3QPI Link 1 */ 844 { /* R3QPI Link 1 */
594 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), 845 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
595 .driver_data = (unsigned long)&snbep_uncore_r3qpi, 846 .driver_data = SNBEP_PCI_UNCORE_R3QPI,
596 }, 847 },
597 { /* end: all zeroes */ } 848 { /* end: all zeroes */ }
598}; 849};
@@ -605,7 +856,7 @@ static struct pci_driver snbep_uncore_pci_driver = {
605/* 856/*
606 * build pci bus to socket mapping 857 * build pci bus to socket mapping
607 */ 858 */
608static int snbep_pci2phy_map_init(void) 859static int snbep_pci2phy_map_init(int devid)
609{ 860{
610 struct pci_dev *ubox_dev = NULL; 861 struct pci_dev *ubox_dev = NULL;
611 int i, bus, nodeid; 862 int i, bus, nodeid;
@@ -614,9 +865,7 @@ static int snbep_pci2phy_map_init(void)
614 865
615 while (1) { 866 while (1) {
616 /* find the UBOX device */ 867 /* find the UBOX device */
617 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 868 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
618 PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
619 ubox_dev);
620 if (!ubox_dev) 869 if (!ubox_dev)
621 break; 870 break;
622 bus = ubox_dev->bus->number; 871 bus = ubox_dev->bus->number;
@@ -639,7 +888,7 @@ static int snbep_pci2phy_map_init(void)
639 break; 888 break;
640 } 889 }
641 } 890 }
642 }; 891 }
643 892
644 if (ubox_dev) 893 if (ubox_dev)
645 pci_dev_put(ubox_dev); 894 pci_dev_put(ubox_dev);
@@ -648,6 +897,440 @@ static int snbep_pci2phy_map_init(void)
648} 897}
649/* end of Sandy Bridge-EP uncore support */ 898/* end of Sandy Bridge-EP uncore support */
650 899
900/* IvyTown uncore support */
901static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
902{
903 unsigned msr = uncore_msr_box_ctl(box);
904 if (msr)
905 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
906}
907
908static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
909{
910 struct pci_dev *pdev = box->pci_dev;
911
912 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
913}
914
915#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
916 .init_box = ivt_uncore_msr_init_box, \
917 .disable_box = snbep_uncore_msr_disable_box, \
918 .enable_box = snbep_uncore_msr_enable_box, \
919 .disable_event = snbep_uncore_msr_disable_event, \
920 .enable_event = snbep_uncore_msr_enable_event, \
921 .read_counter = uncore_msr_read_counter
922
923static struct intel_uncore_ops ivt_uncore_msr_ops = {
924 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
925};
926
927static struct intel_uncore_ops ivt_uncore_pci_ops = {
928 .init_box = ivt_uncore_pci_init_box,
929 .disable_box = snbep_uncore_pci_disable_box,
930 .enable_box = snbep_uncore_pci_enable_box,
931 .disable_event = snbep_uncore_pci_disable_event,
932 .enable_event = snbep_uncore_pci_enable_event,
933 .read_counter = snbep_uncore_pci_read_counter,
934};
935
936#define IVT_UNCORE_PCI_COMMON_INIT() \
937 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
938 .event_ctl = SNBEP_PCI_PMON_CTL0, \
939 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
940 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
941 .ops = &ivt_uncore_pci_ops, \
942 .format_group = &ivt_uncore_format_group
943
944static struct attribute *ivt_uncore_formats_attr[] = {
945 &format_attr_event.attr,
946 &format_attr_umask.attr,
947 &format_attr_edge.attr,
948 &format_attr_inv.attr,
949 &format_attr_thresh8.attr,
950 NULL,
951};
952
953static struct attribute *ivt_uncore_ubox_formats_attr[] = {
954 &format_attr_event.attr,
955 &format_attr_umask.attr,
956 &format_attr_edge.attr,
957 &format_attr_inv.attr,
958 &format_attr_thresh5.attr,
959 NULL,
960};
961
962static struct attribute *ivt_uncore_cbox_formats_attr[] = {
963 &format_attr_event.attr,
964 &format_attr_umask.attr,
965 &format_attr_edge.attr,
966 &format_attr_tid_en.attr,
967 &format_attr_thresh8.attr,
968 &format_attr_filter_tid.attr,
969 &format_attr_filter_link.attr,
970 &format_attr_filter_state2.attr,
971 &format_attr_filter_nid2.attr,
972 &format_attr_filter_opc2.attr,
973 NULL,
974};
975
976static struct attribute *ivt_uncore_pcu_formats_attr[] = {
977 &format_attr_event_ext.attr,
978 &format_attr_occ_sel.attr,
979 &format_attr_edge.attr,
980 &format_attr_thresh5.attr,
981 &format_attr_occ_invert.attr,
982 &format_attr_occ_edge.attr,
983 &format_attr_filter_band0.attr,
984 &format_attr_filter_band1.attr,
985 &format_attr_filter_band2.attr,
986 &format_attr_filter_band3.attr,
987 NULL,
988};
989
990static struct attribute *ivt_uncore_qpi_formats_attr[] = {
991 &format_attr_event_ext.attr,
992 &format_attr_umask.attr,
993 &format_attr_edge.attr,
994 &format_attr_thresh8.attr,
995 NULL,
996};
997
998static struct attribute_group ivt_uncore_format_group = {
999 .name = "format",
1000 .attrs = ivt_uncore_formats_attr,
1001};
1002
1003static struct attribute_group ivt_uncore_ubox_format_group = {
1004 .name = "format",
1005 .attrs = ivt_uncore_ubox_formats_attr,
1006};
1007
1008static struct attribute_group ivt_uncore_cbox_format_group = {
1009 .name = "format",
1010 .attrs = ivt_uncore_cbox_formats_attr,
1011};
1012
1013static struct attribute_group ivt_uncore_pcu_format_group = {
1014 .name = "format",
1015 .attrs = ivt_uncore_pcu_formats_attr,
1016};
1017
1018static struct attribute_group ivt_uncore_qpi_format_group = {
1019 .name = "format",
1020 .attrs = ivt_uncore_qpi_formats_attr,
1021};
1022
1023static struct intel_uncore_type ivt_uncore_ubox = {
1024 .name = "ubox",
1025 .num_counters = 2,
1026 .num_boxes = 1,
1027 .perf_ctr_bits = 44,
1028 .fixed_ctr_bits = 48,
1029 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1030 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1031 .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1032 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1033 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1034 .ops = &ivt_uncore_msr_ops,
1035 .format_group = &ivt_uncore_ubox_format_group,
1036};
1037
1038static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1039 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1040 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1041 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1042 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1043 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1044 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1045 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1046 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1047 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1048 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1049 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1050 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1051 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1052 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1053 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1054 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1055 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1056 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1057 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1058 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1059 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1060 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1061 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1062 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1063 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1064 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1065 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1066 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1067 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1068 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1069 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1070 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1071 EVENT_EXTRA_END
1072};
1073
1074static u64 ivt_cbox_filter_mask(int fields)
1075{
1076 u64 mask = 0;
1077
1078 if (fields & 0x1)
1079 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1080 if (fields & 0x2)
1081 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1082 if (fields & 0x4)
1083 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1084 if (fields & 0x8)
1085 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1086 if (fields & 0x10)
1087 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1088
1089 return mask;
1090}
1091
1092static struct event_constraint *
1093ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1094{
1095 return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1096}
1097
1098static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1099{
1100 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1101 struct extra_reg *er;
1102 int idx = 0;
1103
1104 for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1105 if (er->event != (event->hw.config & er->config_mask))
1106 continue;
1107 idx |= er->idx;
1108 }
1109
1110 if (idx) {
1111 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1112 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1113 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1114 reg1->idx = idx;
1115 }
1116 return 0;
1117}
1118
1119static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1120{
1121 struct hw_perf_event *hwc = &event->hw;
1122 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1123
1124 if (reg1->idx != EXTRA_REG_NONE) {
1125 u64 filter = uncore_shared_reg_config(box, 0);
1126 wrmsrl(reg1->reg, filter & 0xffffffff);
1127 wrmsrl(reg1->reg + 6, filter >> 32);
1128 }
1129
1130 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1131}
1132
1133static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1134 .init_box = ivt_uncore_msr_init_box,
1135 .disable_box = snbep_uncore_msr_disable_box,
1136 .enable_box = snbep_uncore_msr_enable_box,
1137 .disable_event = snbep_uncore_msr_disable_event,
1138 .enable_event = ivt_cbox_enable_event,
1139 .read_counter = uncore_msr_read_counter,
1140 .hw_config = ivt_cbox_hw_config,
1141 .get_constraint = ivt_cbox_get_constraint,
1142 .put_constraint = snbep_cbox_put_constraint,
1143};
1144
1145static struct intel_uncore_type ivt_uncore_cbox = {
1146 .name = "cbox",
1147 .num_counters = 4,
1148 .num_boxes = 15,
1149 .perf_ctr_bits = 44,
1150 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1151 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1152 .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1153 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1154 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1155 .num_shared_regs = 1,
1156 .constraints = snbep_uncore_cbox_constraints,
1157 .ops = &ivt_uncore_cbox_ops,
1158 .format_group = &ivt_uncore_cbox_format_group,
1159};
1160
1161static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1162 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1163 .hw_config = snbep_pcu_hw_config,
1164 .get_constraint = snbep_pcu_get_constraint,
1165 .put_constraint = snbep_pcu_put_constraint,
1166};
1167
1168static struct intel_uncore_type ivt_uncore_pcu = {
1169 .name = "pcu",
1170 .num_counters = 4,
1171 .num_boxes = 1,
1172 .perf_ctr_bits = 48,
1173 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1174 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1175 .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1176 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1177 .num_shared_regs = 1,
1178 .ops = &ivt_uncore_pcu_ops,
1179 .format_group = &ivt_uncore_pcu_format_group,
1180};
1181
1182static struct intel_uncore_type *ivt_msr_uncores[] = {
1183 &ivt_uncore_ubox,
1184 &ivt_uncore_cbox,
1185 &ivt_uncore_pcu,
1186 NULL,
1187};
1188
1189static struct intel_uncore_type ivt_uncore_ha = {
1190 .name = "ha",
1191 .num_counters = 4,
1192 .num_boxes = 2,
1193 .perf_ctr_bits = 48,
1194 IVT_UNCORE_PCI_COMMON_INIT(),
1195};
1196
1197static struct intel_uncore_type ivt_uncore_imc = {
1198 .name = "imc",
1199 .num_counters = 4,
1200 .num_boxes = 8,
1201 .perf_ctr_bits = 48,
1202 .fixed_ctr_bits = 48,
1203 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1204 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1205 IVT_UNCORE_PCI_COMMON_INIT(),
1206};
1207
1208static struct intel_uncore_type ivt_uncore_qpi = {
1209 .name = "qpi",
1210 .num_counters = 4,
1211 .num_boxes = 3,
1212 .perf_ctr_bits = 48,
1213 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1214 .event_ctl = SNBEP_PCI_PMON_CTL0,
1215 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1216 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1217 .ops = &ivt_uncore_pci_ops,
1218 .format_group = &ivt_uncore_qpi_format_group,
1219};
1220
1221static struct intel_uncore_type ivt_uncore_r2pcie = {
1222 .name = "r2pcie",
1223 .num_counters = 4,
1224 .num_boxes = 1,
1225 .perf_ctr_bits = 44,
1226 .constraints = snbep_uncore_r2pcie_constraints,
1227 IVT_UNCORE_PCI_COMMON_INIT(),
1228};
1229
1230static struct intel_uncore_type ivt_uncore_r3qpi = {
1231 .name = "r3qpi",
1232 .num_counters = 3,
1233 .num_boxes = 2,
1234 .perf_ctr_bits = 44,
1235 .constraints = snbep_uncore_r3qpi_constraints,
1236 IVT_UNCORE_PCI_COMMON_INIT(),
1237};
1238
1239enum {
1240 IVT_PCI_UNCORE_HA,
1241 IVT_PCI_UNCORE_IMC,
1242 IVT_PCI_UNCORE_QPI,
1243 IVT_PCI_UNCORE_R2PCIE,
1244 IVT_PCI_UNCORE_R3QPI,
1245};
1246
1247static struct intel_uncore_type *ivt_pci_uncores[] = {
1248 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
1249 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
1250 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
1251 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1252 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
1253 NULL,
1254};
1255
1256static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1257 { /* Home Agent 0 */
1258 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1259 .driver_data = IVT_PCI_UNCORE_HA,
1260 },
1261 { /* Home Agent 1 */
1262 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1263 .driver_data = IVT_PCI_UNCORE_HA,
1264 },
1265 { /* MC0 Channel 0 */
1266 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1267 .driver_data = IVT_PCI_UNCORE_IMC,
1268 },
1269 { /* MC0 Channel 1 */
1270 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1271 .driver_data = IVT_PCI_UNCORE_IMC,
1272 },
1273 { /* MC0 Channel 3 */
1274 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1275 .driver_data = IVT_PCI_UNCORE_IMC,
1276 },
1277 { /* MC0 Channel 4 */
1278 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1279 .driver_data = IVT_PCI_UNCORE_IMC,
1280 },
1281 { /* MC1 Channel 0 */
1282 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1283 .driver_data = IVT_PCI_UNCORE_IMC,
1284 },
1285 { /* MC1 Channel 1 */
1286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1287 .driver_data = IVT_PCI_UNCORE_IMC,
1288 },
1289 { /* MC1 Channel 3 */
1290 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1291 .driver_data = IVT_PCI_UNCORE_IMC,
1292 },
1293 { /* MC1 Channel 4 */
1294 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1295 .driver_data = IVT_PCI_UNCORE_IMC,
1296 },
1297 { /* QPI0 Port 0 */
1298 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1299 .driver_data = IVT_PCI_UNCORE_QPI,
1300 },
1301 { /* QPI0 Port 1 */
1302 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1303 .driver_data = IVT_PCI_UNCORE_QPI,
1304 },
1305 { /* QPI1 Port 2 */
1306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1307 .driver_data = IVT_PCI_UNCORE_QPI,
1308 },
1309 { /* R2PCIe */
1310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1311 .driver_data = IVT_PCI_UNCORE_R2PCIE,
1312 },
1313 { /* R3QPI0 Link 0 */
1314 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1315 .driver_data = IVT_PCI_UNCORE_R3QPI,
1316 },
1317 { /* R3QPI0 Link 1 */
1318 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1319 .driver_data = IVT_PCI_UNCORE_R3QPI,
1320 },
1321 { /* R3QPI1 Link 2 */
1322 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1323 .driver_data = IVT_PCI_UNCORE_R3QPI,
1324 },
1325 { /* end: all zeroes */ }
1326};
1327
1328static struct pci_driver ivt_uncore_pci_driver = {
1329 .name = "ivt_uncore",
1330 .id_table = ivt_uncore_pci_ids,
1331};
1332/* end of IvyTown uncore support */
1333
651/* Sandy Bridge uncore support */ 1334/* Sandy Bridge uncore support */
652static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1335static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
653{ 1336{
@@ -808,9 +1491,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = {
808/* end of Nehalem uncore support */ 1491/* end of Nehalem uncore support */
809 1492
810/* Nehalem-EX uncore support */ 1493/* Nehalem-EX uncore support */
811#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
812 ((1ULL << (n)) - 1)))
813
814DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); 1494DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
815DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); 1495DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
816DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); 1496DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
@@ -1161,7 +1841,7 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1161}; 1841};
1162 1842
1163/* Nehalem-EX or Westmere-EX ? */ 1843/* Nehalem-EX or Westmere-EX ? */
1164bool uncore_nhmex; 1844static bool uncore_nhmex;
1165 1845
1166static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) 1846static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1167{ 1847{
@@ -1239,7 +1919,7 @@ static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
1239 atomic_sub(1 << (idx * 8), &er->ref); 1919 atomic_sub(1 << (idx * 8), &er->ref);
1240} 1920}
1241 1921
1242u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) 1922static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
1243{ 1923{
1244 struct hw_perf_event *hwc = &event->hw; 1924 struct hw_perf_event *hwc = &event->hw;
1245 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1925 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@ -1554,7 +2234,7 @@ static struct intel_uncore_type nhmex_uncore_mbox = {
1554 .format_group = &nhmex_uncore_mbox_format_group, 2234 .format_group = &nhmex_uncore_mbox_format_group,
1555}; 2235};
1556 2236
1557void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) 2237static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1558{ 2238{
1559 struct hw_perf_event *hwc = &event->hw; 2239 struct hw_perf_event *hwc = &event->hw;
1560 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 2240 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@ -1724,21 +2404,6 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event
1724 return 0; 2404 return 0;
1725} 2405}
1726 2406
1727static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1728{
1729 struct intel_uncore_extra_reg *er;
1730 unsigned long flags;
1731 u64 config;
1732
1733 er = &box->shared_regs[idx];
1734
1735 raw_spin_lock_irqsave(&er->lock, flags);
1736 config = er->config;
1737 raw_spin_unlock_irqrestore(&er->lock, flags);
1738
1739 return config;
1740}
1741
1742static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 2407static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1743{ 2408{
1744 struct hw_perf_event *hwc = &event->hw; 2409 struct hw_perf_event *hwc = &event->hw;
@@ -1759,7 +2424,7 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
1759 case 2: 2424 case 2:
1760 case 3: 2425 case 3:
1761 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), 2426 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
1762 nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5)); 2427 uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
1763 break; 2428 break;
1764 case 4: 2429 case 4:
1765 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), 2430 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
@@ -2285,7 +2950,7 @@ out:
2285 return ret; 2950 return ret;
2286} 2951}
2287 2952
2288int uncore_pmu_event_init(struct perf_event *event) 2953static int uncore_pmu_event_init(struct perf_event *event)
2289{ 2954{
2290 struct intel_uncore_pmu *pmu; 2955 struct intel_uncore_pmu *pmu;
2291 struct intel_uncore_box *box; 2956 struct intel_uncore_box *box;
@@ -2438,7 +3103,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
2438 3103
2439 type->unconstrainted = (struct event_constraint) 3104 type->unconstrainted = (struct event_constraint)
2440 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, 3105 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
2441 0, type->num_counters, 0); 3106 0, type->num_counters, 0, 0);
2442 3107
2443 for (i = 0; i < type->num_boxes; i++) { 3108 for (i = 0; i < type->num_boxes; i++) {
2444 pmus[i].func_id = -1; 3109 pmus[i].func_id = -1;
@@ -2556,6 +3221,8 @@ static void uncore_pci_remove(struct pci_dev *pdev)
2556 if (WARN_ON_ONCE(phys_id != box->phys_id)) 3221 if (WARN_ON_ONCE(phys_id != box->phys_id))
2557 return; 3222 return;
2558 3223
3224 pci_set_drvdata(pdev, NULL);
3225
2559 raw_spin_lock(&uncore_box_lock); 3226 raw_spin_lock(&uncore_box_lock);
2560 list_del(&box->list); 3227 list_del(&box->list);
2561 raw_spin_unlock(&uncore_box_lock); 3228 raw_spin_unlock(&uncore_box_lock);
@@ -2574,11 +3241,7 @@ static void uncore_pci_remove(struct pci_dev *pdev)
2574static int uncore_pci_probe(struct pci_dev *pdev, 3241static int uncore_pci_probe(struct pci_dev *pdev,
2575 const struct pci_device_id *id) 3242 const struct pci_device_id *id)
2576{ 3243{
2577 struct intel_uncore_type *type; 3244 return uncore_pci_add(pci_uncores[id->driver_data], pdev);
2578
2579 type = (struct intel_uncore_type *)id->driver_data;
2580
2581 return uncore_pci_add(type, pdev);
2582} 3245}
2583 3246
2584static int __init uncore_pci_init(void) 3247static int __init uncore_pci_init(void)
@@ -2587,12 +3250,19 @@ static int __init uncore_pci_init(void)
2587 3250
2588 switch (boot_cpu_data.x86_model) { 3251 switch (boot_cpu_data.x86_model) {
2589 case 45: /* Sandy Bridge-EP */ 3252 case 45: /* Sandy Bridge-EP */
2590 ret = snbep_pci2phy_map_init(); 3253 ret = snbep_pci2phy_map_init(0x3ce0);
2591 if (ret) 3254 if (ret)
2592 return ret; 3255 return ret;
2593 pci_uncores = snbep_pci_uncores; 3256 pci_uncores = snbep_pci_uncores;
2594 uncore_pci_driver = &snbep_uncore_pci_driver; 3257 uncore_pci_driver = &snbep_uncore_pci_driver;
2595 break; 3258 break;
3259 case 62: /* IvyTown */
3260 ret = snbep_pci2phy_map_init(0x0e1e);
3261 if (ret)
3262 return ret;
3263 pci_uncores = ivt_pci_uncores;
3264 uncore_pci_driver = &ivt_uncore_pci_driver;
3265 break;
2596 default: 3266 default:
2597 return 0; 3267 return 0;
2598 } 3268 }
@@ -2622,6 +3292,21 @@ static void __init uncore_pci_exit(void)
2622 } 3292 }
2623} 3293}
2624 3294
3295/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3296static LIST_HEAD(boxes_to_free);
3297
3298static void __cpuinit uncore_kfree_boxes(void)
3299{
3300 struct intel_uncore_box *box;
3301
3302 while (!list_empty(&boxes_to_free)) {
3303 box = list_entry(boxes_to_free.next,
3304 struct intel_uncore_box, list);
3305 list_del(&box->list);
3306 kfree(box);
3307 }
3308}
3309
2625static void __cpuinit uncore_cpu_dying(int cpu) 3310static void __cpuinit uncore_cpu_dying(int cpu)
2626{ 3311{
2627 struct intel_uncore_type *type; 3312 struct intel_uncore_type *type;
@@ -2636,7 +3321,7 @@ static void __cpuinit uncore_cpu_dying(int cpu)
2636 box = *per_cpu_ptr(pmu->box, cpu); 3321 box = *per_cpu_ptr(pmu->box, cpu);
2637 *per_cpu_ptr(pmu->box, cpu) = NULL; 3322 *per_cpu_ptr(pmu->box, cpu) = NULL;
2638 if (box && atomic_dec_and_test(&box->refcnt)) 3323 if (box && atomic_dec_and_test(&box->refcnt))
2639 kfree(box); 3324 list_add(&box->list, &boxes_to_free);
2640 } 3325 }
2641 } 3326 }
2642} 3327}
@@ -2666,8 +3351,11 @@ static int __cpuinit uncore_cpu_starting(int cpu)
2666 if (exist && exist->phys_id == phys_id) { 3351 if (exist && exist->phys_id == phys_id) {
2667 atomic_inc(&exist->refcnt); 3352 atomic_inc(&exist->refcnt);
2668 *per_cpu_ptr(pmu->box, cpu) = exist; 3353 *per_cpu_ptr(pmu->box, cpu) = exist;
2669 kfree(box); 3354 if (box) {
2670 box = NULL; 3355 list_add(&box->list,
3356 &boxes_to_free);
3357 box = NULL;
3358 }
2671 break; 3359 break;
2672 } 3360 }
2673 } 3361 }
@@ -2806,6 +3494,10 @@ static int
2806 case CPU_DYING: 3494 case CPU_DYING:
2807 uncore_cpu_dying(cpu); 3495 uncore_cpu_dying(cpu);
2808 break; 3496 break;
3497 case CPU_ONLINE:
3498 case CPU_DEAD:
3499 uncore_kfree_boxes();
3500 break;
2809 default: 3501 default:
2810 break; 3502 break;
2811 } 3503 }
@@ -2871,6 +3563,12 @@ static int __init uncore_cpu_init(void)
2871 nhmex_uncore_cbox.num_boxes = max_cores; 3563 nhmex_uncore_cbox.num_boxes = max_cores;
2872 msr_uncores = nhmex_msr_uncores; 3564 msr_uncores = nhmex_msr_uncores;
2873 break; 3565 break;
3566 case 62: /* IvyTown */
3567 if (ivt_uncore_cbox.num_boxes > max_cores)
3568 ivt_uncore_cbox.num_boxes = max_cores;
3569 msr_uncores = ivt_msr_uncores;
3570 break;
3571
2874 default: 3572 default:
2875 return 0; 3573 return 0;
2876 } 3574 }
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index e68a4550e952..f9528917f6e8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -76,7 +76,7 @@
76#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 76#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
77#define SNBEP_PMON_CTL_RST (1 << 17) 77#define SNBEP_PMON_CTL_RST (1 << 17)
78#define SNBEP_PMON_CTL_EDGE_DET (1 << 18) 78#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
79#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */ 79#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
80#define SNBEP_PMON_CTL_EN (1 << 22) 80#define SNBEP_PMON_CTL_EN (1 << 22)
81#define SNBEP_PMON_CTL_INVERT (1 << 23) 81#define SNBEP_PMON_CTL_INVERT (1 << 23)
82#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 82#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
@@ -148,9 +148,20 @@
148#define SNBEP_C0_MSR_PMON_CTL0 0xd10 148#define SNBEP_C0_MSR_PMON_CTL0 0xd10
149#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 149#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
150#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 150#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
151#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f
152#define SNBEP_CBO_MSR_OFFSET 0x20 151#define SNBEP_CBO_MSR_OFFSET 0x20
153 152
153#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
154#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
155#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
156#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
157
158#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
159 .event = (e), \
160 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
161 .config_mask = (m), \
162 .idx = (i) \
163}
164
154/* SNB-EP PCU register */ 165/* SNB-EP PCU register */
155#define SNBEP_PCU_MSR_PMON_CTR0 0xc36 166#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
156#define SNBEP_PCU_MSR_PMON_CTL0 0xc30 167#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
@@ -160,6 +171,55 @@
160#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc 171#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
161#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd 172#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
162 173
174/* IVT event control */
175#define IVT_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
176 SNBEP_PMON_BOX_CTL_RST_CTRS)
177#define IVT_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
178 SNBEP_PMON_CTL_UMASK_MASK | \
179 SNBEP_PMON_CTL_EDGE_DET | \
180 SNBEP_PMON_CTL_TRESH_MASK)
181/* IVT Ubox */
182#define IVT_U_MSR_PMON_GLOBAL_CTL 0xc00
183#define IVT_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
184#define IVT_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
185
186#define IVT_U_MSR_PMON_RAW_EVENT_MASK \
187 (SNBEP_PMON_CTL_EV_SEL_MASK | \
188 SNBEP_PMON_CTL_UMASK_MASK | \
189 SNBEP_PMON_CTL_EDGE_DET | \
190 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
191/* IVT Cbo */
192#define IVT_CBO_MSR_PMON_RAW_EVENT_MASK (IVT_PMON_RAW_EVENT_MASK | \
193 SNBEP_CBO_PMON_CTL_TID_EN)
194
195#define IVT_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
196#define IVT_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
197#define IVT_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
198#define IVT_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
199#define IVT_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
200#define IVT_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
201#define IVT_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
202#define IVT_CB0_MSR_PMON_BOX_FILTER_IOSC (0x1ULL << 63)
203
204/* IVT home agent */
205#define IVT_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
206#define IVT_HA_PCI_PMON_RAW_EVENT_MASK \
207 (IVT_PMON_RAW_EVENT_MASK | \
208 IVT_HA_PCI_PMON_CTL_Q_OCC_RST)
209/* IVT PCU */
210#define IVT_PCU_MSR_PMON_RAW_EVENT_MASK \
211 (SNBEP_PMON_CTL_EV_SEL_MASK | \
212 SNBEP_PMON_CTL_EV_SEL_EXT | \
213 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
214 SNBEP_PMON_CTL_EDGE_DET | \
215 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
216 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
217 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
218/* IVT QPI */
219#define IVT_QPI_PCI_PMON_RAW_EVENT_MASK \
220 (IVT_PMON_RAW_EVENT_MASK | \
221 SNBEP_PMON_CTL_EV_SEL_EXT)
222
163/* NHM-EX event control */ 223/* NHM-EX event control */
164#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff 224#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
165#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 225#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 4b7731bf23a8..838fa8772c62 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -17,7 +17,7 @@ static const u64 knc_perfmon_event_map[] =
17 [PERF_COUNT_HW_BRANCH_MISSES] = 0x002b, 17 [PERF_COUNT_HW_BRANCH_MISSES] = 0x002b,
18}; 18};
19 19
20static __initconst u64 knc_hw_cache_event_ids 20static const u64 __initconst knc_hw_cache_event_ids
21 [PERF_COUNT_HW_CACHE_MAX] 21 [PERF_COUNT_HW_CACHE_MAX]
22 [PERF_COUNT_HW_CACHE_OP_MAX] 22 [PERF_COUNT_HW_CACHE_OP_MAX]
23 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 23 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -284,7 +284,7 @@ static struct attribute *intel_knc_formats_attr[] = {
284 NULL, 284 NULL,
285}; 285};
286 286
287static __initconst struct x86_pmu knc_pmu = { 287static const struct x86_pmu knc_pmu __initconst = {
288 .name = "knc", 288 .name = "knc",
289 .handle_irq = knc_pmu_handle_irq, 289 .handle_irq = knc_pmu_handle_irq,
290 .disable_all = knc_pmu_disable_all, 290 .disable_all = knc_pmu_disable_all,
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 92c7e39a079f..3486e6660357 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
895 * So at moment let leave metrics turned on forever -- it's 895 * So at moment let leave metrics turned on forever -- it's
896 * ok for now but need to be revisited! 896 * ok for now but need to be revisited!
897 * 897 *
898 * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0); 898 * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0);
899 * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0); 899 * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0);
900 */ 900 */
901} 901}
902 902
@@ -910,8 +910,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
910 * asserted again and again 910 * asserted again and again
911 */ 911 */
912 (void)wrmsrl_safe(hwc->config_base, 912 (void)wrmsrl_safe(hwc->config_base,
913 (u64)(p4_config_unpack_cccr(hwc->config)) & 913 p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
914 ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
915} 914}
916 915
917static void p4_pmu_disable_all(void) 916static void p4_pmu_disable_all(void)
@@ -957,7 +956,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
957 u64 escr_addr, cccr; 956 u64 escr_addr, cccr;
958 957
959 bind = &p4_event_bind_map[idx]; 958 bind = &p4_event_bind_map[idx];
960 escr_addr = (u64)bind->escr_msr[thread]; 959 escr_addr = bind->escr_msr[thread];
961 960
962 /* 961 /*
963 * - we dont support cascaded counters yet 962 * - we dont support cascaded counters yet
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 4820c232a0b9..b1e2fe115323 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
19 19
20}; 20};
21 21
22static u64 p6_hw_cache_event_ids 22static const u64 __initconst p6_hw_cache_event_ids
23 [PERF_COUNT_HW_CACHE_MAX] 23 [PERF_COUNT_HW_CACHE_MAX]
24 [PERF_COUNT_HW_CACHE_OP_MAX] 24 [PERF_COUNT_HW_CACHE_OP_MAX]
25 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 25 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index e280253f6f94..37a198bd48c8 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -34,9 +34,9 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
34 "fpu_exception\t: %s\n" 34 "fpu_exception\t: %s\n"
35 "cpuid level\t: %d\n" 35 "cpuid level\t: %d\n"
36 "wp\t\t: %s\n", 36 "wp\t\t: %s\n",
37 c->fdiv_bug ? "yes" : "no", 37 static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
38 c->f00f_bug ? "yes" : "no", 38 static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
39 c->coma_bug ? "yes" : "no", 39 static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
40 c->hard_math ? "yes" : "no", 40 c->hard_math ? "yes" : "no",
41 c->hard_math ? "yes" : "no", 41 c->hard_math ? "yes" : "no",
42 c->cpuid_level, 42 c->cpuid_level,
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index ee8e9abc859f..d92b5dad15dd 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -39,8 +39,9 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
39 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, 39 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
40 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, 40 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
41 { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 }, 41 { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 },
42 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
43 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, 42 { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
43 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
44 { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
44 { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 }, 45 { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
45 { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, 46 { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
46 { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, 47 { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
index 37250fe490b1..155a13f33ed8 100644
--- a/arch/x86/kernel/doublefault_32.c
+++ b/arch/x86/kernel/doublefault_32.c
@@ -20,7 +20,7 @@ static void doublefault_fn(void)
20 struct desc_ptr gdt_desc = {0, 0}; 20 struct desc_ptr gdt_desc = {0, 0};
21 unsigned long gdt, tss; 21 unsigned long gdt, tss;
22 22
23 store_gdt(&gdt_desc); 23 native_store_gdt(&gdt_desc);
24 gdt = gdt_desc.address; 24 gdt = gdt_desc.address;
25 25
26 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); 26 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index c8797d55b245..deb6421c9e69 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -176,26 +176,20 @@ void show_trace(struct task_struct *task, struct pt_regs *regs,
176 176
177void show_stack(struct task_struct *task, unsigned long *sp) 177void show_stack(struct task_struct *task, unsigned long *sp)
178{ 178{
179 show_stack_log_lvl(task, NULL, sp, 0, ""); 179 unsigned long bp = 0;
180}
181
182/*
183 * The architecture-independent dump_stack generator
184 */
185void dump_stack(void)
186{
187 unsigned long bp;
188 unsigned long stack; 180 unsigned long stack;
189 181
190 bp = stack_frame(current, NULL); 182 /*
191 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 183 * Stack frames below this one aren't interesting. Don't show them
192 current->pid, current->comm, print_tainted(), 184 * if we're printing for %current.
193 init_utsname()->release, 185 */
194 (int)strcspn(init_utsname()->version, " "), 186 if (!sp && (!task || task == current)) {
195 init_utsname()->version); 187 sp = &stack;
196 show_trace(NULL, NULL, &stack, bp); 188 bp = stack_frame(current, NULL);
189 }
190
191 show_stack_log_lvl(task, NULL, sp, bp, "");
197} 192}
198EXPORT_SYMBOL(dump_stack);
199 193
200static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 194static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
201static int die_owner = -1; 195static int die_owner = -1;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 1038a417ea53..f2a1770ca176 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -86,11 +86,9 @@ void show_regs(struct pt_regs *regs)
86{ 86{
87 int i; 87 int i;
88 88
89 show_regs_print_info(KERN_EMERG);
89 __show_regs(regs, !user_mode_vm(regs)); 90 __show_regs(regs, !user_mode_vm(regs));
90 91
91 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
92 TASK_COMM_LEN, current->comm, task_pid_nr(current),
93 current_thread_info(), current, task_thread_info(current));
94 /* 92 /*
95 * When in-kernel, we also print out the stack and code at the 93 * When in-kernel, we also print out the stack and code at the
96 * time of the fault.. 94 * time of the fault..
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index b653675d5288..addb207dab92 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -249,14 +249,10 @@ void show_regs(struct pt_regs *regs)
249{ 249{
250 int i; 250 int i;
251 unsigned long sp; 251 unsigned long sp;
252 const int cpu = smp_processor_id();
253 struct task_struct *cur = current;
254 252
255 sp = regs->sp; 253 sp = regs->sp;
256 printk("CPU %d ", cpu); 254 show_regs_print_info(KERN_DEFAULT);
257 __show_regs(regs, 1); 255 __show_regs(regs, 1);
258 printk(KERN_DEFAULT "Process %s (pid: %d, threadinfo %p, task %p)\n",
259 cur->comm, cur->pid, task_thread_info(cur), cur);
260 256
261 /* 257 /*
262 * When in-kernel, we also print out the stack and code at the 258 * When in-kernel, we also print out the stack and code at the
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 9b9f18b49918..d15f575a861b 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -169,25 +169,9 @@ static struct console early_serial_console = {
169 .index = -1, 169 .index = -1,
170}; 170};
171 171
172/* Direct interface for emergencies */
173static struct console *early_console = &early_vga_console;
174static int __initdata early_console_initialized;
175
176asmlinkage void early_printk(const char *fmt, ...)
177{
178 char buf[512];
179 int n;
180 va_list ap;
181
182 va_start(ap, fmt);
183 n = vscnprintf(buf, sizeof(buf), fmt, ap);
184 early_console->write(early_console, buf, n);
185 va_end(ap);
186}
187
188static inline void early_console_register(struct console *con, int keep_early) 172static inline void early_console_register(struct console *con, int keep_early)
189{ 173{
190 if (early_console->index != -1) { 174 if (con->index != -1) {
191 printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n", 175 printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
192 con->name); 176 con->name);
193 return; 177 return;
@@ -207,9 +191,8 @@ static int __init setup_early_printk(char *buf)
207 if (!buf) 191 if (!buf)
208 return 0; 192 return 0;
209 193
210 if (early_console_initialized) 194 if (early_console)
211 return 0; 195 return 0;
212 early_console_initialized = 1;
213 196
214 keep = (strstr(buf, "keep") != NULL); 197 keep = (strstr(buf, "keep") != NULL);
215 198
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index c5e403f6d869..101ac1a9263e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -144,10 +144,10 @@ void __init x86_64_start_kernel(char * real_mode_data)
144 * Build-time sanity checks on the kernel image and module 144 * Build-time sanity checks on the kernel image and module
145 * area mappings. (these are purely build-time and produce no code) 145 * area mappings. (these are purely build-time and produce no code)
146 */ 146 */
147 BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); 147 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
148 BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); 148 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
149 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); 149 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
150 BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); 150 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
151 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); 151 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
152 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); 152 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
153 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 153 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7bfe318d3d8a..9895a9a41380 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -353,7 +353,11 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
353 * have given. 353 * have given.
354 */ 354 */
355 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; 355 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
356 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ 356 if ((s64) (s32) newdisp != newdisp) {
357 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
358 pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
359 return 0;
360 }
357 disp = (u8 *) dest + insn_offset_displacement(&insn); 361 disp = (u8 *) dest + insn_offset_displacement(&insn);
358 *(s32 *) disp = (s32) newdisp; 362 *(s32 *) disp = (s32) newdisp;
359 } 363 }
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index b686a904d7c3..cd6d9a5a42f6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -20,6 +20,7 @@
20 * Authors: Anthony Liguori <aliguori@us.ibm.com> 20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */ 21 */
22 22
23#include <linux/context_tracking.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/kvm_para.h> 26#include <linux/kvm_para.h>
@@ -43,7 +44,6 @@
43#include <asm/apicdef.h> 44#include <asm/apicdef.h>
44#include <asm/hypervisor.h> 45#include <asm/hypervisor.h>
45#include <asm/kvm_guest.h> 46#include <asm/kvm_guest.h>
46#include <asm/context_tracking.h>
47 47
48static int kvmapf = 1; 48static int kvmapf = 1;
49 49
@@ -254,16 +254,18 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
254dotraplinkage void __kprobes 254dotraplinkage void __kprobes
255do_async_page_fault(struct pt_regs *regs, unsigned long error_code) 255do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
256{ 256{
257 enum ctx_state prev_state;
258
257 switch (kvm_read_and_reset_pf_reason()) { 259 switch (kvm_read_and_reset_pf_reason()) {
258 default: 260 default:
259 do_page_fault(regs, error_code); 261 do_page_fault(regs, error_code);
260 break; 262 break;
261 case KVM_PV_REASON_PAGE_NOT_PRESENT: 263 case KVM_PV_REASON_PAGE_NOT_PRESENT:
262 /* page is swapped out by the host. */ 264 /* page is swapped out by the host. */
263 exception_enter(regs); 265 prev_state = exception_enter();
264 exit_idle(); 266 exit_idle();
265 kvm_async_pf_task_wait((u32)read_cr2()); 267 kvm_async_pf_task_wait((u32)read_cr2());
266 exception_exit(regs); 268 exception_exit(prev_state);
267 break; 269 break;
268 case KVM_PV_REASON_PAGE_READY: 270 case KVM_PV_REASON_PAGE_READY:
269 rcu_irq_enter(); 271 rcu_irq_enter();
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 8bfb335f74bb..cd6de64cc480 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -360,7 +360,6 @@ struct pv_cpu_ops pv_cpu_ops = {
360 .set_ldt = native_set_ldt, 360 .set_ldt = native_set_ldt,
361 .load_gdt = native_load_gdt, 361 .load_gdt = native_load_gdt,
362 .load_idt = native_load_idt, 362 .load_idt = native_load_idt,
363 .store_gdt = native_store_gdt,
364 .store_idt = native_store_idt, 363 .store_idt = native_store_idt,
365 .store_tr = native_store_tr, 364 .store_tr = native_store_tr,
366 .load_tls = native_load_tls, 365 .load_tls = native_load_tls,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 14ae10031ff0..607af0d4d5ef 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -121,30 +121,6 @@ void exit_thread(void)
121 drop_fpu(me); 121 drop_fpu(me);
122} 122}
123 123
124void show_regs_common(void)
125{
126 const char *vendor, *product, *board;
127
128 vendor = dmi_get_system_info(DMI_SYS_VENDOR);
129 if (!vendor)
130 vendor = "";
131 product = dmi_get_system_info(DMI_PRODUCT_NAME);
132 if (!product)
133 product = "";
134
135 /* Board Name is optional */
136 board = dmi_get_system_info(DMI_BOARD_NAME);
137
138 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
139 current->pid, current->comm, print_tainted(),
140 init_utsname()->release,
141 (int)strcspn(init_utsname()->version, " "),
142 init_utsname()->version,
143 vendor, product,
144 board ? "/" : "",
145 board ? board : "");
146}
147
148void flush_thread(void) 124void flush_thread(void)
149{ 125{
150 struct task_struct *tsk = current; 126 struct task_struct *tsk = current;
@@ -301,13 +277,7 @@ void exit_idle(void)
301} 277}
302#endif 278#endif
303 279
304/* 280void arch_cpu_idle_prepare(void)
305 * The idle thread. There's no useful work to be
306 * done, so just try to conserve power and have a
307 * low exit latency (ie sit in a loop waiting for
308 * somebody to say that they'd like to reschedule)
309 */
310void cpu_idle(void)
311{ 281{
312 /* 282 /*
313 * If we're the non-boot CPU, nothing set the stack canary up 283 * If we're the non-boot CPU, nothing set the stack canary up
@@ -317,71 +287,40 @@ void cpu_idle(void)
317 * canaries already on the stack wont ever trigger). 287 * canaries already on the stack wont ever trigger).
318 */ 288 */
319 boot_init_stack_canary(); 289 boot_init_stack_canary();
320 current_thread_info()->status |= TS_POLLING; 290}
321
322 while (1) {
323 tick_nohz_idle_enter();
324
325 while (!need_resched()) {
326 rmb();
327
328 if (cpu_is_offline(smp_processor_id()))
329 play_dead();
330
331 /*
332 * Idle routines should keep interrupts disabled
333 * from here on, until they go to idle.
334 * Otherwise, idle callbacks can misfire.
335 */
336 local_touch_nmi();
337 local_irq_disable();
338
339 enter_idle();
340
341 /* Don't trace irqs off for idle */
342 stop_critical_timings();
343
344 /* enter_idle() needs rcu for notifiers */
345 rcu_idle_enter();
346 291
347 if (cpuidle_idle_call()) 292void arch_cpu_idle_enter(void)
348 x86_idle(); 293{
294 local_touch_nmi();
295 enter_idle();
296}
349 297
350 rcu_idle_exit(); 298void arch_cpu_idle_exit(void)
351 start_critical_timings(); 299{
300 __exit_idle();
301}
352 302
353 /* In many cases the interrupt that ended idle 303void arch_cpu_idle_dead(void)
354 has already called exit_idle. But some idle 304{
355 loops can be woken up without interrupt. */ 305 play_dead();
356 __exit_idle(); 306}
357 }
358 307
359 tick_nohz_idle_exit(); 308/*
360 preempt_enable_no_resched(); 309 * Called from the generic idle code.
361 schedule(); 310 */
362 preempt_disable(); 311void arch_cpu_idle(void)
363 } 312{
313 if (cpuidle_idle_call())
314 x86_idle();
364} 315}
365 316
366/* 317/*
367 * We use this if we don't have any better 318 * We use this if we don't have any better idle routine..
368 * idle routine..
369 */ 319 */
370void default_idle(void) 320void default_idle(void)
371{ 321{
372 trace_cpu_idle_rcuidle(1, smp_processor_id()); 322 trace_cpu_idle_rcuidle(1, smp_processor_id());
373 current_thread_info()->status &= ~TS_POLLING; 323 safe_halt();
374 /*
375 * TS_POLLING-cleared state must be visible before we
376 * test NEED_RESCHED:
377 */
378 smp_mb();
379
380 if (!need_resched())
381 safe_halt(); /* enables interrupts racelessly */
382 else
383 local_irq_enable();
384 current_thread_info()->status |= TS_POLLING;
385 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 324 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
386} 325}
387#ifdef CONFIG_APM_MODULE 326#ifdef CONFIG_APM_MODULE
@@ -411,20 +350,6 @@ void stop_this_cpu(void *dummy)
411 halt(); 350 halt();
412} 351}
413 352
414/*
415 * On SMP it's slightly faster (but much more power-consuming!)
416 * to poll the ->work.need_resched flag instead of waiting for the
417 * cross-CPU IPI to arrive. Use this option with caution.
418 */
419static void poll_idle(void)
420{
421 trace_cpu_idle_rcuidle(0, smp_processor_id());
422 local_irq_enable();
423 while (!need_resched())
424 cpu_relax();
425 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
426}
427
428bool amd_e400_c1e_detected; 353bool amd_e400_c1e_detected;
429EXPORT_SYMBOL(amd_e400_c1e_detected); 354EXPORT_SYMBOL(amd_e400_c1e_detected);
430 355
@@ -489,13 +414,13 @@ static void amd_e400_idle(void)
489void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 414void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
490{ 415{
491#ifdef CONFIG_SMP 416#ifdef CONFIG_SMP
492 if (x86_idle == poll_idle && smp_num_siblings > 1) 417 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
493 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 418 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
494#endif 419#endif
495 if (x86_idle) 420 if (x86_idle || boot_option_idle_override == IDLE_POLL)
496 return; 421 return;
497 422
498 if (cpu_has_amd_erratum(amd_erratum_400)) { 423 if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
499 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 424 /* E400: APIC timer interrupt does not wake up CPU from C1e */
500 pr_info("using AMD E400 aware idle routine\n"); 425 pr_info("using AMD E400 aware idle routine\n");
501 x86_idle = amd_e400_idle; 426 x86_idle = amd_e400_idle;
@@ -517,8 +442,8 @@ static int __init idle_setup(char *str)
517 442
518 if (!strcmp(str, "poll")) { 443 if (!strcmp(str, "poll")) {
519 pr_info("using polling idle threads\n"); 444 pr_info("using polling idle threads\n");
520 x86_idle = poll_idle;
521 boot_option_idle_override = IDLE_POLL; 445 boot_option_idle_override = IDLE_POLL;
446 cpu_idle_poll_ctrl(true);
522 } else if (!strcmp(str, "halt")) { 447 } else if (!strcmp(str, "halt")) {
523 /* 448 /*
524 * When the boot option of idle=halt is added, halt is 449 * When the boot option of idle=halt is added, halt is
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index b5a8905785e6..7305f7dfc7ab 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -84,8 +84,6 @@ void __show_regs(struct pt_regs *regs, int all)
84 savesegment(gs, gs); 84 savesegment(gs, gs);
85 } 85 }
86 86
87 show_regs_common();
88
89 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 87 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
90 (u16)regs->cs, regs->ip, regs->flags, 88 (u16)regs->cs, regs->ip, regs->flags,
91 smp_processor_id()); 89 smp_processor_id());
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0f49677da51e..355ae06dbf94 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -62,7 +62,6 @@ void __show_regs(struct pt_regs *regs, int all)
62 unsigned int fsindex, gsindex; 62 unsigned int fsindex, gsindex;
63 unsigned int ds, cs, es; 63 unsigned int ds, cs, es;
64 64
65 show_regs_common();
66 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 65 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
67 printk_address(regs->ip, 1); 66 printk_address(regs->ip, 1);
68 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, 67 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 26ee48a33dc4..04ee1e2e4c02 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -354,18 +354,22 @@ static void ati_force_hpet_resume(void)
354 354
355static u32 ati_ixp4x0_rev(struct pci_dev *dev) 355static u32 ati_ixp4x0_rev(struct pci_dev *dev)
356{ 356{
357 u32 d; 357 int err = 0;
358 u8 b; 358 u32 d = 0;
359 u8 b = 0;
359 360
360 pci_read_config_byte(dev, 0xac, &b); 361 err = pci_read_config_byte(dev, 0xac, &b);
361 b &= ~(1<<5); 362 b &= ~(1<<5);
362 pci_write_config_byte(dev, 0xac, b); 363 err |= pci_write_config_byte(dev, 0xac, b);
363 pci_read_config_dword(dev, 0x70, &d); 364 err |= pci_read_config_dword(dev, 0x70, &d);
364 d |= 1<<8; 365 d |= 1<<8;
365 pci_write_config_dword(dev, 0x70, d); 366 err |= pci_write_config_dword(dev, 0x70, d);
366 pci_read_config_dword(dev, 0x8, &d); 367 err |= pci_read_config_dword(dev, 0x8, &d);
367 d &= 0xff; 368 d &= 0xff;
368 dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); 369 dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
370
371 WARN_ON_ONCE(err);
372
369 return d; 373 return d;
370} 374}
371 375
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 2e8f3d3b5641..198eb201ed3b 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -13,6 +13,7 @@
13#include <asm/x86_init.h> 13#include <asm/x86_init.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/mrst.h> 15#include <asm/mrst.h>
16#include <asm/rtc.h>
16 17
17#ifdef CONFIG_X86_32 18#ifdef CONFIG_X86_32
18/* 19/*
@@ -36,70 +37,24 @@ EXPORT_SYMBOL(rtc_lock);
36 * nowtime is written into the registers of the CMOS clock, it will 37 * nowtime is written into the registers of the CMOS clock, it will
37 * jump to the next second precisely 500 ms later. Check the Motorola 38 * jump to the next second precisely 500 ms later. Check the Motorola
38 * MC146818A or Dallas DS12887 data sheet for details. 39 * MC146818A or Dallas DS12887 data sheet for details.
39 *
40 * BUG: This routine does not handle hour overflow properly; it just
41 * sets the minutes. Usually you'll only notice that after reboot!
42 */ 40 */
43int mach_set_rtc_mmss(unsigned long nowtime) 41int mach_set_rtc_mmss(unsigned long nowtime)
44{ 42{
45 int real_seconds, real_minutes, cmos_minutes; 43 struct rtc_time tm;
46 unsigned char save_control, save_freq_select;
47 unsigned long flags;
48 int retval = 0; 44 int retval = 0;
49 45
50 spin_lock_irqsave(&rtc_lock, flags); 46 rtc_time_to_tm(nowtime, &tm);
51 47 if (!rtc_valid_tm(&tm)) {
52 /* tell the clock it's being set */ 48 retval = set_rtc_time(&tm);
53 save_control = CMOS_READ(RTC_CONTROL); 49 if (retval)
54 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); 50 printk(KERN_ERR "%s: RTC write failed with error %d\n",
55 51 __FUNCTION__, retval);
56 /* stop and reset prescaler */
57 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
58 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
59
60 cmos_minutes = CMOS_READ(RTC_MINUTES);
61 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
62 cmos_minutes = bcd2bin(cmos_minutes);
63
64 /*
65 * since we're only adjusting minutes and seconds,
66 * don't interfere with hour overflow. This avoids
67 * messing with unknown time zones but requires your
68 * RTC not to be off by more than 15 minutes
69 */
70 real_seconds = nowtime % 60;
71 real_minutes = nowtime / 60;
72 /* correct for half hour time zone */
73 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
74 real_minutes += 30;
75 real_minutes %= 60;
76
77 if (abs(real_minutes - cmos_minutes) < 30) {
78 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
79 real_seconds = bin2bcd(real_seconds);
80 real_minutes = bin2bcd(real_minutes);
81 }
82 CMOS_WRITE(real_seconds, RTC_SECONDS);
83 CMOS_WRITE(real_minutes, RTC_MINUTES);
84 } else { 52 } else {
85 printk_once(KERN_NOTICE 53 printk(KERN_ERR
86 "set_rtc_mmss: can't update from %d to %d\n", 54 "%s: Invalid RTC value: write of %lx to RTC failed\n",
87 cmos_minutes, real_minutes); 55 __FUNCTION__, nowtime);
88 retval = -1; 56 retval = -EINVAL;
89 } 57 }
90
91 /* The following flags have to be released exactly in this order,
92 * otherwise the DS12887 (popular MC146818A clone with integrated
93 * battery and quartz) will not reset the oscillator and will not
94 * update precisely 500 ms later. You won't find this mentioned in
95 * the Dallas Semiconductor data sheets, but who believes data
96 * sheets anyway ... -- Markus Kuhn
97 */
98 CMOS_WRITE(save_control, RTC_CONTROL);
99 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
100
101 spin_unlock_irqrestore(&rtc_lock, flags);
102
103 return retval; 58 return retval;
104} 59}
105 60
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index fae9134a2de9..56f7fcfe7fa2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -82,7 +82,6 @@
82#include <asm/timer.h> 82#include <asm/timer.h>
83#include <asm/i8259.h> 83#include <asm/i8259.h>
84#include <asm/sections.h> 84#include <asm/sections.h>
85#include <asm/dmi.h>
86#include <asm/io_apic.h> 85#include <asm/io_apic.h>
87#include <asm/ist.h> 86#include <asm/ist.h>
88#include <asm/setup_arch.h> 87#include <asm/setup_arch.h>
@@ -173,12 +172,10 @@ static struct resource bss_resource = {
173/* cpu data as detected by the assembly code in head.S */ 172/* cpu data as detected by the assembly code in head.S */
174struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 173struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
175 .wp_works_ok = -1, 174 .wp_works_ok = -1,
176 .fdiv_bug = -1,
177}; 175};
178/* common cpu data for all cpus */ 176/* common cpu data for all cpus */
179struct cpuinfo_x86 boot_cpu_data __read_mostly = { 177struct cpuinfo_x86 boot_cpu_data __read_mostly = {
180 .wp_works_ok = -1, 178 .wp_works_ok = -1,
181 .fdiv_bug = -1,
182}; 179};
183EXPORT_SYMBOL(boot_cpu_data); 180EXPORT_SYMBOL(boot_cpu_data);
184 181
@@ -999,6 +996,7 @@ void __init setup_arch(char **cmdline_p)
999 efi_init(); 996 efi_init();
1000 997
1001 dmi_scan_machine(); 998 dmi_scan_machine();
999 dmi_set_dump_stack_arch_desc();
1002 1000
1003 /* 1001 /*
1004 * VMware detection requires dmi to be available, so this 1002 * VMware detection requires dmi to be available, so this
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9f190a2a00e9..9c73b51817e4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -284,7 +284,7 @@ notrace static void __cpuinit start_secondary(void *unused)
284 x86_cpuinit.setup_percpu_clockev(); 284 x86_cpuinit.setup_percpu_clockev();
285 285
286 wmb(); 286 wmb();
287 cpu_idle(); 287 cpu_startup_entry(CPUHP_ONLINE);
288} 288}
289 289
290void __init smp_store_boot_cpu_info(void) 290void __init smp_store_boot_cpu_info(void)
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 9d9d2f9e77a5..f7fec09e3e3a 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -3,13 +3,13 @@
3#include <linux/sched.h> 3#include <linux/sched.h>
4#include <linux/user.h> 4#include <linux/user.h>
5#include <linux/regset.h> 5#include <linux/regset.h>
6#include <linux/syscalls.h>
6 7
7#include <asm/uaccess.h> 8#include <asm/uaccess.h>
8#include <asm/desc.h> 9#include <asm/desc.h>
9#include <asm/ldt.h> 10#include <asm/ldt.h>
10#include <asm/processor.h> 11#include <asm/processor.h>
11#include <asm/proto.h> 12#include <asm/proto.h>
12#include <asm/syscalls.h>
13 13
14#include "tls.h" 14#include "tls.h"
15 15
@@ -89,11 +89,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
89 return 0; 89 return 0;
90} 90}
91 91
92asmlinkage int sys_set_thread_area(struct user_desc __user *u_info) 92SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, u_info)
93{ 93{
94 int ret = do_set_thread_area(current, -1, u_info, 1); 94 return do_set_thread_area(current, -1, u_info, 1);
95 asmlinkage_protect(1, ret, u_info);
96 return ret;
97} 95}
98 96
99 97
@@ -139,11 +137,9 @@ int do_get_thread_area(struct task_struct *p, int idx,
139 return 0; 137 return 0;
140} 138}
141 139
142asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) 140SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, u_info)
143{ 141{
144 int ret = do_get_thread_area(current, -1, u_info); 142 return do_get_thread_area(current, -1, u_info);
145 asmlinkage_protect(1, ret, u_info);
146 return ret;
147} 143}
148 144
149int regset_tls_active(struct task_struct *target, 145int regset_tls_active(struct task_struct *target,
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 68bda7a84159..772e2a846dec 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -12,6 +12,7 @@
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 14
15#include <linux/context_tracking.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
16#include <linux/kallsyms.h> 17#include <linux/kallsyms.h>
17#include <linux/spinlock.h> 18#include <linux/spinlock.h>
@@ -55,8 +56,7 @@
55#include <asm/i387.h> 56#include <asm/i387.h>
56#include <asm/fpu-internal.h> 57#include <asm/fpu-internal.h>
57#include <asm/mce.h> 58#include <asm/mce.h>
58#include <asm/context_tracking.h> 59#include <asm/fixmap.h>
59
60#include <asm/mach_traps.h> 60#include <asm/mach_traps.h>
61 61
62#ifdef CONFIG_X86_64 62#ifdef CONFIG_X86_64
@@ -176,34 +176,38 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
176#define DO_ERROR(trapnr, signr, str, name) \ 176#define DO_ERROR(trapnr, signr, str, name) \
177dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 177dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
178{ \ 178{ \
179 exception_enter(regs); \ 179 enum ctx_state prev_state; \
180 \
181 prev_state = exception_enter(); \
180 if (notify_die(DIE_TRAP, str, regs, error_code, \ 182 if (notify_die(DIE_TRAP, str, regs, error_code, \
181 trapnr, signr) == NOTIFY_STOP) { \ 183 trapnr, signr) == NOTIFY_STOP) { \
182 exception_exit(regs); \ 184 exception_exit(prev_state); \
183 return; \ 185 return; \
184 } \ 186 } \
185 conditional_sti(regs); \ 187 conditional_sti(regs); \
186 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 188 do_trap(trapnr, signr, str, regs, error_code, NULL); \
187 exception_exit(regs); \ 189 exception_exit(prev_state); \
188} 190}
189 191
190#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 192#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
191dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 193dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
192{ \ 194{ \
193 siginfo_t info; \ 195 siginfo_t info; \
196 enum ctx_state prev_state; \
197 \
194 info.si_signo = signr; \ 198 info.si_signo = signr; \
195 info.si_errno = 0; \ 199 info.si_errno = 0; \
196 info.si_code = sicode; \ 200 info.si_code = sicode; \
197 info.si_addr = (void __user *)siaddr; \ 201 info.si_addr = (void __user *)siaddr; \
198 exception_enter(regs); \ 202 prev_state = exception_enter(); \
199 if (notify_die(DIE_TRAP, str, regs, error_code, \ 203 if (notify_die(DIE_TRAP, str, regs, error_code, \
200 trapnr, signr) == NOTIFY_STOP) { \ 204 trapnr, signr) == NOTIFY_STOP) { \
201 exception_exit(regs); \ 205 exception_exit(prev_state); \
202 return; \ 206 return; \
203 } \ 207 } \
204 conditional_sti(regs); \ 208 conditional_sti(regs); \
205 do_trap(trapnr, signr, str, regs, error_code, &info); \ 209 do_trap(trapnr, signr, str, regs, error_code, &info); \
206 exception_exit(regs); \ 210 exception_exit(prev_state); \
207} 211}
208 212
209DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, 213DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
@@ -226,14 +230,16 @@ DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
226/* Runs on IST stack */ 230/* Runs on IST stack */
227dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 231dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
228{ 232{
229 exception_enter(regs); 233 enum ctx_state prev_state;
234
235 prev_state = exception_enter();
230 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 236 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
231 X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { 237 X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
232 preempt_conditional_sti(regs); 238 preempt_conditional_sti(regs);
233 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); 239 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
234 preempt_conditional_cli(regs); 240 preempt_conditional_cli(regs);
235 } 241 }
236 exception_exit(regs); 242 exception_exit(prev_state);
237} 243}
238 244
239dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 245dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
@@ -241,7 +247,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
241 static const char str[] = "double fault"; 247 static const char str[] = "double fault";
242 struct task_struct *tsk = current; 248 struct task_struct *tsk = current;
243 249
244 exception_enter(regs); 250 exception_enter();
245 /* Return not checked because double check cannot be ignored */ 251 /* Return not checked because double check cannot be ignored */
246 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 252 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
247 253
@@ -261,8 +267,9 @@ dotraplinkage void __kprobes
261do_general_protection(struct pt_regs *regs, long error_code) 267do_general_protection(struct pt_regs *regs, long error_code)
262{ 268{
263 struct task_struct *tsk; 269 struct task_struct *tsk;
270 enum ctx_state prev_state;
264 271
265 exception_enter(regs); 272 prev_state = exception_enter();
266 conditional_sti(regs); 273 conditional_sti(regs);
267 274
268#ifdef CONFIG_X86_32 275#ifdef CONFIG_X86_32
@@ -300,12 +307,14 @@ do_general_protection(struct pt_regs *regs, long error_code)
300 307
301 force_sig(SIGSEGV, tsk); 308 force_sig(SIGSEGV, tsk);
302exit: 309exit:
303 exception_exit(regs); 310 exception_exit(prev_state);
304} 311}
305 312
306/* May run on IST stack. */ 313/* May run on IST stack. */
307dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) 314dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
308{ 315{
316 enum ctx_state prev_state;
317
309#ifdef CONFIG_DYNAMIC_FTRACE 318#ifdef CONFIG_DYNAMIC_FTRACE
310 /* 319 /*
311 * ftrace must be first, everything else may cause a recursive crash. 320 * ftrace must be first, everything else may cause a recursive crash.
@@ -315,7 +324,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
315 ftrace_int3_handler(regs)) 324 ftrace_int3_handler(regs))
316 return; 325 return;
317#endif 326#endif
318 exception_enter(regs); 327 prev_state = exception_enter();
319#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 328#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
320 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 329 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
321 SIGTRAP) == NOTIFY_STOP) 330 SIGTRAP) == NOTIFY_STOP)
@@ -336,7 +345,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
336 preempt_conditional_cli(regs); 345 preempt_conditional_cli(regs);
337 debug_stack_usage_dec(); 346 debug_stack_usage_dec();
338exit: 347exit:
339 exception_exit(regs); 348 exception_exit(prev_state);
340} 349}
341 350
342#ifdef CONFIG_X86_64 351#ifdef CONFIG_X86_64
@@ -393,11 +402,12 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
393dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 402dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
394{ 403{
395 struct task_struct *tsk = current; 404 struct task_struct *tsk = current;
405 enum ctx_state prev_state;
396 int user_icebp = 0; 406 int user_icebp = 0;
397 unsigned long dr6; 407 unsigned long dr6;
398 int si_code; 408 int si_code;
399 409
400 exception_enter(regs); 410 prev_state = exception_enter();
401 411
402 get_debugreg(dr6, 6); 412 get_debugreg(dr6, 6);
403 413
@@ -467,7 +477,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
467 debug_stack_usage_dec(); 477 debug_stack_usage_dec();
468 478
469exit: 479exit:
470 exception_exit(regs); 480 exception_exit(prev_state);
471} 481}
472 482
473/* 483/*
@@ -561,17 +571,21 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
561 571
562dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 572dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
563{ 573{
564 exception_enter(regs); 574 enum ctx_state prev_state;
575
576 prev_state = exception_enter();
565 math_error(regs, error_code, X86_TRAP_MF); 577 math_error(regs, error_code, X86_TRAP_MF);
566 exception_exit(regs); 578 exception_exit(prev_state);
567} 579}
568 580
569dotraplinkage void 581dotraplinkage void
570do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 582do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
571{ 583{
572 exception_enter(regs); 584 enum ctx_state prev_state;
585
586 prev_state = exception_enter();
573 math_error(regs, error_code, X86_TRAP_XF); 587 math_error(regs, error_code, X86_TRAP_XF);
574 exception_exit(regs); 588 exception_exit(prev_state);
575} 589}
576 590
577dotraplinkage void 591dotraplinkage void
@@ -639,7 +653,9 @@ EXPORT_SYMBOL_GPL(math_state_restore);
639dotraplinkage void __kprobes 653dotraplinkage void __kprobes
640do_device_not_available(struct pt_regs *regs, long error_code) 654do_device_not_available(struct pt_regs *regs, long error_code)
641{ 655{
642 exception_enter(regs); 656 enum ctx_state prev_state;
657
658 prev_state = exception_enter();
643 BUG_ON(use_eager_fpu()); 659 BUG_ON(use_eager_fpu());
644 660
645#ifdef CONFIG_MATH_EMULATION 661#ifdef CONFIG_MATH_EMULATION
@@ -650,7 +666,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
650 666
651 info.regs = regs; 667 info.regs = regs;
652 math_emulate(&info); 668 math_emulate(&info);
653 exception_exit(regs); 669 exception_exit(prev_state);
654 return; 670 return;
655 } 671 }
656#endif 672#endif
@@ -658,15 +674,16 @@ do_device_not_available(struct pt_regs *regs, long error_code)
658#ifdef CONFIG_X86_32 674#ifdef CONFIG_X86_32
659 conditional_sti(regs); 675 conditional_sti(regs);
660#endif 676#endif
661 exception_exit(regs); 677 exception_exit(prev_state);
662} 678}
663 679
664#ifdef CONFIG_X86_32 680#ifdef CONFIG_X86_32
665dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 681dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
666{ 682{
667 siginfo_t info; 683 siginfo_t info;
684 enum ctx_state prev_state;
668 685
669 exception_enter(regs); 686 prev_state = exception_enter();
670 local_irq_enable(); 687 local_irq_enable();
671 688
672 info.si_signo = SIGILL; 689 info.si_signo = SIGILL;
@@ -678,7 +695,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
678 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 695 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
679 &info); 696 &info);
680 } 697 }
681 exception_exit(regs); 698 exception_exit(prev_state);
682} 699}
683#endif 700#endif
684 701
@@ -753,6 +770,14 @@ void __init trap_init(void)
753#endif 770#endif
754 771
755 /* 772 /*
773 * Set the IDT descriptor to a fixed read-only location, so that the
774 * "sidt" instruction will not leak the location of the kernel, and
775 * to defend the IDT against arbitrary memory write vulnerabilities.
776 * It will be reloaded in cpu_init() */
777 __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
778 idt_descr.address = fix_to_virt(FIX_RO_IDT);
779
780 /*
756 * Should be a barrier for any external CPU state: 781 * Should be a barrier for any external CPU state:
757 */ 782 */
758 cpu_init(); 783 cpu_init();
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 4b9ea101fe3b..098b3cfda72e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -768,7 +768,8 @@ static cycle_t read_tsc(struct clocksource *cs)
768 768
769static void resume_tsc(struct clocksource *cs) 769static void resume_tsc(struct clocksource *cs)
770{ 770{
771 clocksource_tsc.cycle_last = 0; 771 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
772 clocksource_tsc.cycle_last = 0;
772} 773}
773 774
774static struct clocksource clocksource_tsc = { 775static struct clocksource clocksource_tsc = {
@@ -939,6 +940,9 @@ static int __init init_tsc_clocksource(void)
939 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 940 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
940 } 941 }
941 942
943 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
944 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
945
942 /* 946 /*
943 * Trust the results of the earlier calibration on systems 947 * Trust the results of the earlier calibration on systems
944 * exporting a reliable TSC. 948 * exporting a reliable TSC.
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 0ba4cfb4f412..2ed845928b5f 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -697,3 +697,32 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
697 send_sig(SIGTRAP, current, 0); 697 send_sig(SIGTRAP, current, 0);
698 return ret; 698 return ret;
699} 699}
700
701unsigned long
702arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
703{
704 int rasize, ncopied;
705 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
706
707 rasize = is_ia32_task() ? 4 : 8;
708 ncopied = copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize);
709 if (unlikely(ncopied))
710 return -1;
711
712 /* check whether address has been already hijacked */
713 if (orig_ret_vaddr == trampoline_vaddr)
714 return orig_ret_vaddr;
715
716 ncopied = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
717 if (likely(!ncopied))
718 return orig_ret_vaddr;
719
720 if (ncopied != rasize) {
721 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
722 "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
723
724 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
725 }
726
727 return -1;
728}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 22a1530146a8..10c4f3006afd 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -94,10 +94,6 @@ SECTIONS
94 _text = .; 94 _text = .;
95 /* bootstrapping code */ 95 /* bootstrapping code */
96 HEAD_TEXT 96 HEAD_TEXT
97#ifdef CONFIG_X86_32
98 . = ALIGN(PAGE_SIZE);
99 *(.text..page_aligned)
100#endif
101 . = ALIGN(8); 97 . = ALIGN(8);
102 _stext = .; 98 _stext = .;
103 TEXT_TEXT 99 TEXT_TEXT
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e1b1ce21bc00..7d39d70647e3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -555,7 +555,7 @@ static void svm_init_erratum_383(void)
555 int err; 555 int err;
556 u64 val; 556 u64 val;
557 557
558 if (!cpu_has_amd_erratum(amd_erratum_383)) 558 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
559 return; 559 return;
560 560
561 /* Use _safe variants to not break nested virtualization */ 561 /* Use _safe variants to not break nested virtualization */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6667042714cc..867b81037f96 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2459,7 +2459,7 @@ static int hardware_enable(void *garbage)
2459 ept_sync_global(); 2459 ept_sync_global();
2460 } 2460 }
2461 2461
2462 store_gdt(&__get_cpu_var(host_gdt)); 2462 native_store_gdt(&__get_cpu_var(host_gdt));
2463 2463
2464 return 0; 2464 return 0;
2465} 2465}
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 29043d2048a0..4a0890f815c4 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -1,7 +1,6 @@
1config LGUEST_GUEST 1config LGUEST_GUEST
2 bool "Lguest guest support" 2 bool "Lguest guest support"
3 select PARAVIRT 3 depends on X86_32 && PARAVIRT
4 depends on X86_32
5 select TTY 4 select TTY
6 select VIRTUALIZATION 5 select VIRTUALIZATION
7 select VIRTIO 6 select VIRTIO
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 2af5df3ade7c..e78b8eee6615 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -61,7 +61,7 @@ ENTRY(csum_partial)
61 testl $3, %esi # Check alignment. 61 testl $3, %esi # Check alignment.
62 jz 2f # Jump if alignment is ok. 62 jz 2f # Jump if alignment is ok.
63 testl $1, %esi # Check alignment. 63 testl $1, %esi # Check alignment.
64 jz 10f # Jump if alignment is boundary of 2bytes. 64 jz 10f # Jump if alignment is boundary of 2 bytes.
65 65
66 # buf is odd 66 # buf is odd
67 dec %ecx 67 dec %ecx
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index b908a59eccf5..e78761d6b7f8 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -26,7 +26,7 @@ void *memmove(void *dest, const void *src, size_t n)
26 char *ret = dest; 26 char *ret = dest;
27 27
28 __asm__ __volatile__( 28 __asm__ __volatile__(
29 /* Handle more 16bytes in loop */ 29 /* Handle more 16 bytes in loop */
30 "cmp $0x10, %0\n\t" 30 "cmp $0x10, %0\n\t"
31 "jb 1f\n\t" 31 "jb 1f\n\t"
32 32
@@ -51,7 +51,7 @@ void *memmove(void *dest, const void *src, size_t n)
51 "sub $0x10, %0\n\t" 51 "sub $0x10, %0\n\t"
52 52
53 /* 53 /*
54 * We gobble 16byts forward in each loop. 54 * We gobble 16 bytes forward in each loop.
55 */ 55 */
56 "3:\n\t" 56 "3:\n\t"
57 "sub $0x10, %0\n\t" 57 "sub $0x10, %0\n\t"
@@ -117,7 +117,7 @@ void *memmove(void *dest, const void *src, size_t n)
117 "sub $0x10, %0\n\t" 117 "sub $0x10, %0\n\t"
118 118
119 /* 119 /*
120 * We gobble 16byts backward in each loop. 120 * We gobble 16 bytes backward in each loop.
121 */ 121 */
122 "7:\n\t" 122 "7:\n\t"
123 "sub $0x10, %0\n\t" 123 "sub $0x10, %0\n\t"
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 1c273be7c97e..56313a326188 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -98,7 +98,7 @@ ENTRY(memcpy)
98 subq $0x20, %rdx 98 subq $0x20, %rdx
99 /* 99 /*
100 * At most 3 ALU operations in one cycle, 100 * At most 3 ALU operations in one cycle,
101 * so append NOPS in the same 16bytes trunk. 101 * so append NOPS in the same 16 bytes trunk.
102 */ 102 */
103 .p2align 4 103 .p2align 4
104.Lcopy_backward_loop: 104.Lcopy_backward_loop:
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index ee164610ec46..65268a6104f4 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -27,7 +27,7 @@
27ENTRY(memmove) 27ENTRY(memmove)
28 CFI_STARTPROC 28 CFI_STARTPROC
29 29
30 /* Handle more 32bytes in loop */ 30 /* Handle more 32 bytes in loop */
31 mov %rdi, %rax 31 mov %rdi, %rax
32 cmp $0x20, %rdx 32 cmp $0x20, %rdx
33 jb 1f 33 jb 1f
@@ -56,7 +56,7 @@ ENTRY(memmove)
563: 563:
57 sub $0x20, %rdx 57 sub $0x20, %rdx
58 /* 58 /*
59 * We gobble 32byts forward in each loop. 59 * We gobble 32 bytes forward in each loop.
60 */ 60 */
615: 615:
62 sub $0x20, %rdx 62 sub $0x20, %rdx
@@ -122,7 +122,7 @@ ENTRY(memmove)
122 addq %rdx, %rdi 122 addq %rdx, %rdi
123 subq $0x20, %rdx 123 subq $0x20, %rdx
124 /* 124 /*
125 * We gobble 32byts backward in each loop. 125 * We gobble 32 bytes backward in each loop.
126 */ 126 */
1278: 1278:
128 subq $0x20, %rdx 128 subq $0x20, %rdx
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index f0312d746402..3eb18acd0e40 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -689,9 +689,3 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
689 return n; 689 return n;
690} 690}
691EXPORT_SYMBOL(_copy_from_user); 691EXPORT_SYMBOL(_copy_from_user);
692
693void copy_from_user_overflow(void)
694{
695 WARN(1, "Buffer overflow detected!\n");
696}
697EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 5247d01329ca..2ca15b59fb3f 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -130,9 +130,8 @@ int __init amd_numa_init(void)
130 } 130 }
131 131
132 limit >>= 16; 132 limit >>= 16;
133 limit <<= 24;
134 limit |= (1<<24)-1;
135 limit++; 133 limit++;
134 limit <<= 24;
136 135
137 if (limit > end) 136 if (limit > end)
138 limit = end; 137 limit = end;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 0e883364abb5..654be4ae3047 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -13,12 +13,12 @@
13#include <linux/perf_event.h> /* perf_sw_event */ 13#include <linux/perf_event.h> /* perf_sw_event */
14#include <linux/hugetlb.h> /* hstate_index_to_shift */ 14#include <linux/hugetlb.h> /* hstate_index_to_shift */
15#include <linux/prefetch.h> /* prefetchw */ 15#include <linux/prefetch.h> /* prefetchw */
16#include <linux/context_tracking.h> /* exception_enter(), ... */
16 17
17#include <asm/traps.h> /* dotraplinkage, ... */ 18#include <asm/traps.h> /* dotraplinkage, ... */
18#include <asm/pgalloc.h> /* pgd_*(), ... */ 19#include <asm/pgalloc.h> /* pgd_*(), ... */
19#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 20#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20#include <asm/fixmap.h> /* VSYSCALL_START */ 21#include <asm/fixmap.h> /* VSYSCALL_START */
21#include <asm/context_tracking.h> /* exception_enter(), ... */
22 22
23/* 23/*
24 * Page fault error code bits: 24 * Page fault error code bits:
@@ -557,7 +557,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
557 /* 557 /*
558 * Pentium F0 0F C7 C8 bug workaround: 558 * Pentium F0 0F C7 C8 bug workaround:
559 */ 559 */
560 if (boot_cpu_data.f00f_bug) { 560 if (boot_cpu_has_bug(X86_BUG_F00F)) {
561 nr = (address - idt_descr.address) >> 3; 561 nr = (address - idt_descr.address) >> 3;
562 562
563 if (nr == 6) { 563 if (nr == 6) {
@@ -1224,7 +1224,9 @@ good_area:
1224dotraplinkage void __kprobes 1224dotraplinkage void __kprobes
1225do_page_fault(struct pt_regs *regs, unsigned long error_code) 1225do_page_fault(struct pt_regs *regs, unsigned long error_code)
1226{ 1226{
1227 exception_enter(regs); 1227 enum ctx_state prev_state;
1228
1229 prev_state = exception_enter();
1228 __do_page_fault(regs, error_code); 1230 __do_page_fault(regs, error_code);
1229 exception_exit(regs); 1231 exception_exit(prev_state);
1230} 1232}
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 6f31ee56c008..252b8f5489ba 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -137,5 +137,4 @@ void __init set_highmem_pages_init(void)
137 add_highpages_with_active_regions(nid, zone_start_pfn, 137 add_highpages_with_active_regions(nid, zone_start_pfn,
138 zone_end_pfn); 138 zone_end_pfn);
139 } 139 }
140 totalram_pages += totalhigh_pages;
141} 140}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 59b7fc453277..fdc5dca14fb3 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -515,11 +515,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
515 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 515 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
516 516
517 for (; addr < end; addr += PAGE_SIZE) { 517 for (; addr < end; addr += PAGE_SIZE) {
518 ClearPageReserved(virt_to_page(addr));
519 init_page_count(virt_to_page(addr));
520 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 518 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
521 free_page(addr); 519 free_reserved_page(virt_to_page(addr));
522 totalram_pages++;
523 } 520 }
524#endif 521#endif
525} 522}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 2d19001151d5..3ac7e319918d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -427,14 +427,6 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
427 pkmap_page_table = pte; 427 pkmap_page_table = pte;
428} 428}
429 429
430static void __init add_one_highpage_init(struct page *page)
431{
432 ClearPageReserved(page);
433 init_page_count(page);
434 __free_page(page);
435 totalhigh_pages++;
436}
437
438void __init add_highpages_with_active_regions(int nid, 430void __init add_highpages_with_active_regions(int nid,
439 unsigned long start_pfn, unsigned long end_pfn) 431 unsigned long start_pfn, unsigned long end_pfn)
440{ 432{
@@ -448,7 +440,7 @@ void __init add_highpages_with_active_regions(int nid,
448 start_pfn, end_pfn); 440 start_pfn, end_pfn);
449 for ( ; pfn < e_pfn; pfn++) 441 for ( ; pfn < e_pfn; pfn++)
450 if (pfn_valid(pfn)) 442 if (pfn_valid(pfn))
451 add_one_highpage_init(pfn_to_page(pfn)); 443 free_highmem_page(pfn_to_page(pfn));
452 } 444 }
453} 445}
454#else 446#else
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 474e28f10815..caad9a0ee19f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1011,14 +1011,12 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
1011 flush_tlb_all(); 1011 flush_tlb_all();
1012} 1012}
1013 1013
1014void __ref vmemmap_free(struct page *memmap, unsigned long nr_pages) 1014void __ref vmemmap_free(unsigned long start, unsigned long end)
1015{ 1015{
1016 unsigned long start = (unsigned long)memmap;
1017 unsigned long end = (unsigned long)(memmap + nr_pages);
1018
1019 remove_pagetable(start, end, false); 1016 remove_pagetable(start, end, false);
1020} 1017}
1021 1018
1019#ifdef CONFIG_MEMORY_HOTREMOVE
1022static void __meminit 1020static void __meminit
1023kernel_physical_mapping_remove(unsigned long start, unsigned long end) 1021kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1024{ 1022{
@@ -1028,7 +1026,6 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1028 remove_pagetable(start, end, true); 1026 remove_pagetable(start, end, true);
1029} 1027}
1030 1028
1031#ifdef CONFIG_MEMORY_HOTREMOVE
1032int __ref arch_remove_memory(u64 start, u64 size) 1029int __ref arch_remove_memory(u64 start, u64 size)
1033{ 1030{
1034 unsigned long start_pfn = start >> PAGE_SHIFT; 1031 unsigned long start_pfn = start >> PAGE_SHIFT;
@@ -1067,10 +1064,9 @@ void __init mem_init(void)
1067 1064
1068 /* clear_bss() already clear the empty_zero_page */ 1065 /* clear_bss() already clear the empty_zero_page */
1069 1066
1070 reservedpages = 0;
1071
1072 /* this will put all low memory onto the freelists */
1073 register_page_bootmem_info(); 1067 register_page_bootmem_info();
1068
1069 /* this will put all memory onto the freelists */
1074 totalram_pages = free_all_bootmem(); 1070 totalram_pages = free_all_bootmem();
1075 1071
1076 absent_pages = absent_pages_in_range(0, max_pfn); 1072 absent_pages = absent_pages_in_range(0, max_pfn);
@@ -1285,18 +1281,17 @@ static long __meminitdata addr_start, addr_end;
1285static void __meminitdata *p_start, *p_end; 1281static void __meminitdata *p_start, *p_end;
1286static int __meminitdata node_start; 1282static int __meminitdata node_start;
1287 1283
1288int __meminit 1284static int __meminit vmemmap_populate_hugepages(unsigned long start,
1289vmemmap_populate(struct page *start_page, unsigned long size, int node) 1285 unsigned long end, int node)
1290{ 1286{
1291 unsigned long addr = (unsigned long)start_page; 1287 unsigned long addr;
1292 unsigned long end = (unsigned long)(start_page + size);
1293 unsigned long next; 1288 unsigned long next;
1294 pgd_t *pgd; 1289 pgd_t *pgd;
1295 pud_t *pud; 1290 pud_t *pud;
1296 pmd_t *pmd; 1291 pmd_t *pmd;
1297 1292
1298 for (; addr < end; addr = next) { 1293 for (addr = start; addr < end; addr = next) {
1299 void *p = NULL; 1294 next = pmd_addr_end(addr, end);
1300 1295
1301 pgd = vmemmap_pgd_populate(addr, node); 1296 pgd = vmemmap_pgd_populate(addr, node);
1302 if (!pgd) 1297 if (!pgd)
@@ -1306,31 +1301,14 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1306 if (!pud) 1301 if (!pud)
1307 return -ENOMEM; 1302 return -ENOMEM;
1308 1303
1309 if (!cpu_has_pse) { 1304 pmd = pmd_offset(pud, addr);
1310 next = (addr + PAGE_SIZE) & PAGE_MASK; 1305 if (pmd_none(*pmd)) {
1311 pmd = vmemmap_pmd_populate(pud, addr, node); 1306 void *p;
1312
1313 if (!pmd)
1314 return -ENOMEM;
1315
1316 p = vmemmap_pte_populate(pmd, addr, node);
1317 1307
1318 if (!p) 1308 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1319 return -ENOMEM; 1309 if (p) {
1320
1321 addr_end = addr + PAGE_SIZE;
1322 p_end = p + PAGE_SIZE;
1323 } else {
1324 next = pmd_addr_end(addr, end);
1325
1326 pmd = pmd_offset(pud, addr);
1327 if (pmd_none(*pmd)) {
1328 pte_t entry; 1310 pte_t entry;
1329 1311
1330 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1331 if (!p)
1332 return -ENOMEM;
1333
1334 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, 1312 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1335 PAGE_KERNEL_LARGE); 1313 PAGE_KERNEL_LARGE);
1336 set_pmd(pmd, __pmd(pte_val(entry))); 1314 set_pmd(pmd, __pmd(pte_val(entry)));
@@ -1347,15 +1325,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1347 1325
1348 addr_end = addr + PMD_SIZE; 1326 addr_end = addr + PMD_SIZE;
1349 p_end = p + PMD_SIZE; 1327 p_end = p + PMD_SIZE;
1350 } else 1328 continue;
1351 vmemmap_verify((pte_t *)pmd, node, addr, next); 1329 }
1330 } else if (pmd_large(*pmd)) {
1331 vmemmap_verify((pte_t *)pmd, node, addr, next);
1332 continue;
1352 } 1333 }
1353 1334 pr_warn_once("vmemmap: falling back to regular page backing\n");
1335 if (vmemmap_populate_basepages(addr, next, node))
1336 return -ENOMEM;
1354 } 1337 }
1355 sync_global_pgds((unsigned long)start_page, end - 1);
1356 return 0; 1338 return 0;
1357} 1339}
1358 1340
1341int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1342{
1343 int err;
1344
1345 if (cpu_has_pse)
1346 err = vmemmap_populate_hugepages(start, end, node);
1347 else
1348 err = vmemmap_populate_basepages(start, end, node);
1349 if (!err)
1350 sync_global_pgds(start, end - 1);
1351 return err;
1352}
1353
1359#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) 1354#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1360void register_page_bootmem_memmap(unsigned long section_nr, 1355void register_page_bootmem_memmap(unsigned long section_nr,
1361 struct page *start_page, unsigned long size) 1356 struct page *start_page, unsigned long size)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 78fe3f1ac49f..9a1e6583910c 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
282 in parallel. Reuse of the virtual address is prevented by 282 in parallel. Reuse of the virtual address is prevented by
283 leaving it in the global lists until we're done with it. 283 leaving it in the global lists until we're done with it.
284 cpa takes care of the direct mappings. */ 284 cpa takes care of the direct mappings. */
285 read_lock(&vmlist_lock); 285 p = find_vm_area((void __force *)addr);
286 for (p = vmlist; p; p = p->next) {
287 if (p->addr == (void __force *)addr)
288 break;
289 }
290 read_unlock(&vmlist_lock);
291 286
292 if (!p) { 287 if (!p) {
293 printk(KERN_ERR "iounmap: bad address %p\n", addr); 288 printk(KERN_ERR "iounmap: bad address %p\n", addr);
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 72fe01e9e414..a71c4e207679 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -114,14 +114,11 @@ void numa_clear_node(int cpu)
114 */ 114 */
115void __init setup_node_to_cpumask_map(void) 115void __init setup_node_to_cpumask_map(void)
116{ 116{
117 unsigned int node, num = 0; 117 unsigned int node;
118 118
119 /* setup nr_node_ids if not done yet */ 119 /* setup nr_node_ids if not done yet */
120 if (nr_node_ids == MAX_NUMNODES) { 120 if (nr_node_ids == MAX_NUMNODES)
121 for_each_node_mask(node, node_possible_map) 121 setup_nr_node_ids();
122 num = node;
123 nr_node_ids = num + 1;
124 }
125 122
126 /* allocate the map */ 123 /* allocate the map */
127 for (node = 0; node < nr_node_ids; node++) 124 for (node = 0; node < nr_node_ids; node++)
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 0e38951e65eb..d0b1773d9d2e 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -130,13 +130,12 @@ static int pageattr_test(void)
130 } 130 }
131 131
132 failed += print_split(&sa); 132 failed += print_split(&sa);
133 srandom32(100);
134 133
135 for (i = 0; i < NTEST; i++) { 134 for (i = 0; i < NTEST; i++) {
136 unsigned long pfn = random32() % max_pfn_mapped; 135 unsigned long pfn = prandom_u32() % max_pfn_mapped;
137 136
138 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); 137 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
139 len[i] = random32() % 100; 138 len[i] = prandom_u32() % 100;
140 len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); 139 len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
141 140
142 if (len[i] == 0) 141 if (len[i] == 0)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index fb4e73ec24d8..bb32480c2d71 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -542,13 +542,14 @@ out_unlock:
542 return do_split; 542 return do_split;
543} 543}
544 544
545int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase) 545static int
546__split_large_page(pte_t *kpte, unsigned long address, struct page *base)
546{ 547{
548 pte_t *pbase = (pte_t *)page_address(base);
547 unsigned long pfn, pfninc = 1; 549 unsigned long pfn, pfninc = 1;
548 unsigned int i, level; 550 unsigned int i, level;
549 pte_t *tmp; 551 pte_t *tmp;
550 pgprot_t ref_prot; 552 pgprot_t ref_prot;
551 struct page *base = virt_to_page(pbase);
552 553
553 spin_lock(&pgd_lock); 554 spin_lock(&pgd_lock);
554 /* 555 /*
@@ -633,7 +634,6 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase)
633 634
634static int split_large_page(pte_t *kpte, unsigned long address) 635static int split_large_page(pte_t *kpte, unsigned long address)
635{ 636{
636 pte_t *pbase;
637 struct page *base; 637 struct page *base;
638 638
639 if (!debug_pagealloc) 639 if (!debug_pagealloc)
@@ -644,8 +644,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
644 if (!base) 644 if (!base)
645 return -ENOMEM; 645 return -ENOMEM;
646 646
647 pbase = (pte_t *)page_address(base); 647 if (__split_large_page(kpte, address, base))
648 if (__split_large_page(kpte, address, pbase))
649 __free_page(base); 648 __free_page(base);
650 649
651 return 0; 650 return 0;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 901177d75ff5..305c68b8d538 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -6,6 +6,7 @@
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/pci-acpi.h>
9#include <linux/ioport.h> 10#include <linux/ioport.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/dmi.h> 12#include <linux/dmi.h>
@@ -170,6 +171,16 @@ void pcibios_fixup_bus(struct pci_bus *b)
170 pcibios_fixup_device_resources(dev); 171 pcibios_fixup_device_resources(dev);
171} 172}
172 173
174void pcibios_add_bus(struct pci_bus *bus)
175{
176 acpi_pci_add_bus(bus);
177}
178
179void pcibios_remove_bus(struct pci_bus *bus)
180{
181 acpi_pci_remove_bus(bus);
182}
183
173/* 184/*
174 * Only use DMI information to set this if nothing was passed 185 * Only use DMI information to set this if nothing was passed
175 * on the kernel command line (which was parsed earlier). 186 * on the kernel command line (which was parsed earlier).
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 94e76620460f..4a9be6ddf054 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -177,7 +177,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
177 goto error; 177 goto error;
178 i = 0; 178 i = 0;
179 list_for_each_entry(msidesc, &dev->msi_list, list) { 179 list_for_each_entry(msidesc, &dev->msi_list, list) {
180 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, 180 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
181 (type == PCI_CAP_ID_MSIX) ? 181 (type == PCI_CAP_ID_MSIX) ?
182 "pcifront-msi-x" : 182 "pcifront-msi-x" :
183 "pcifront-msi", 183 "pcifront-msi",
@@ -244,7 +244,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
244 dev_dbg(&dev->dev, 244 dev_dbg(&dev->dev,
245 "xen: msi already bound to pirq=%d\n", pirq); 245 "xen: msi already bound to pirq=%d\n", pirq);
246 } 246 }
247 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, 247 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
248 (type == PCI_CAP_ID_MSIX) ? 248 (type == PCI_CAP_ID_MSIX) ?
249 "msi-x" : "msi", 249 "msi-x" : "msi",
250 DOMID_SELF); 250 DOMID_SELF);
@@ -326,7 +326,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
326 } 326 }
327 327
328 ret = xen_bind_pirq_msi_to_irq(dev, msidesc, 328 ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
329 map_irq.pirq, map_irq.index, 329 map_irq.pirq,
330 (type == PCI_CAP_ID_MSIX) ? 330 (type == PCI_CAP_ID_MSIX) ?
331 "msi-x" : "msi", 331 "msi-x" : "msi",
332 domid); 332 domid);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index e4a86a677ce1..b55d174e5034 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -49,6 +49,7 @@
49#include <asm/cacheflush.h> 49#include <asm/cacheflush.h>
50#include <asm/tlbflush.h> 50#include <asm/tlbflush.h>
51#include <asm/x86_init.h> 51#include <asm/x86_init.h>
52#include <asm/rtc.h>
52 53
53#define EFI_DEBUG 1 54#define EFI_DEBUG 1
54 55
@@ -352,10 +353,10 @@ static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
352 353
353int efi_set_rtc_mmss(unsigned long nowtime) 354int efi_set_rtc_mmss(unsigned long nowtime)
354{ 355{
355 int real_seconds, real_minutes;
356 efi_status_t status; 356 efi_status_t status;
357 efi_time_t eft; 357 efi_time_t eft;
358 efi_time_cap_t cap; 358 efi_time_cap_t cap;
359 struct rtc_time tm;
359 360
360 status = efi.get_time(&eft, &cap); 361 status = efi.get_time(&eft, &cap);
361 if (status != EFI_SUCCESS) { 362 if (status != EFI_SUCCESS) {
@@ -363,13 +364,20 @@ int efi_set_rtc_mmss(unsigned long nowtime)
363 return -1; 364 return -1;
364 } 365 }
365 366
366 real_seconds = nowtime % 60; 367 rtc_time_to_tm(nowtime, &tm);
367 real_minutes = nowtime / 60; 368 if (!rtc_valid_tm(&tm)) {
368 if (((abs(real_minutes - eft.minute) + 15)/30) & 1) 369 eft.year = tm.tm_year + 1900;
369 real_minutes += 30; 370 eft.month = tm.tm_mon + 1;
370 real_minutes %= 60; 371 eft.day = tm.tm_mday;
371 eft.minute = real_minutes; 372 eft.minute = tm.tm_min;
372 eft.second = real_seconds; 373 eft.second = tm.tm_sec;
374 eft.nanosecond = 0;
375 } else {
376 printk(KERN_ERR
377 "%s: Invalid EFI RTC value: write of %lx to EFI RTC failed\n",
378 __FUNCTION__, nowtime);
379 return -1;
380 }
373 381
374 status = efi.set_time(&eft); 382 status = efi.set_time(&eft);
375 if (status != EFI_SUCCESS) { 383 if (status != EFI_SUCCESS) {
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index e31bcd8f2eee..a0a0a4389bbd 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -356,8 +356,7 @@ static int __init sfi_parse_gpio(struct sfi_table_header *table)
356 num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry); 356 num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
357 pentry = (struct sfi_gpio_table_entry *)sb->pentry; 357 pentry = (struct sfi_gpio_table_entry *)sb->pentry;
358 358
359 gpio_table = (struct sfi_gpio_table_entry *) 359 gpio_table = kmalloc(num * sizeof(*pentry), GFP_KERNEL);
360 kmalloc(num * sizeof(*pentry), GFP_KERNEL);
361 if (!gpio_table) 360 if (!gpio_table)
362 return -1; 361 return -1;
363 memcpy(gpio_table, pentry, num * sizeof(*pentry)); 362 memcpy(gpio_table, pentry, num * sizeof(*pentry));
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
index 225bd0f0f675..d62b0a3b5c14 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -85,27 +85,35 @@ unsigned long vrtc_get_time(void)
85 return mktime(year, mon, mday, hour, min, sec); 85 return mktime(year, mon, mday, hour, min, sec);
86} 86}
87 87
88/* Only care about the minutes and seconds */
89int vrtc_set_mmss(unsigned long nowtime) 88int vrtc_set_mmss(unsigned long nowtime)
90{ 89{
91 int real_sec, real_min;
92 unsigned long flags; 90 unsigned long flags;
93 int vrtc_min; 91 struct rtc_time tm;
94 92 int year;
95 spin_lock_irqsave(&rtc_lock, flags); 93 int retval = 0;
96 vrtc_min = vrtc_cmos_read(RTC_MINUTES); 94
97 95 rtc_time_to_tm(nowtime, &tm);
98 real_sec = nowtime % 60; 96 if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) {
99 real_min = nowtime / 60; 97 /*
100 if (((abs(real_min - vrtc_min) + 15)/30) & 1) 98 * tm.year is the number of years since 1900, and the
101 real_min += 30; 99 * vrtc need the years since 1972.
102 real_min %= 60; 100 */
103 101 year = tm.tm_year - 72;
104 vrtc_cmos_write(real_sec, RTC_SECONDS); 102 spin_lock_irqsave(&rtc_lock, flags);
105 vrtc_cmos_write(real_min, RTC_MINUTES); 103 vrtc_cmos_write(year, RTC_YEAR);
106 spin_unlock_irqrestore(&rtc_lock, flags); 104 vrtc_cmos_write(tm.tm_mon, RTC_MONTH);
107 105 vrtc_cmos_write(tm.tm_mday, RTC_DAY_OF_MONTH);
108 return 0; 106 vrtc_cmos_write(tm.tm_hour, RTC_HOURS);
107 vrtc_cmos_write(tm.tm_min, RTC_MINUTES);
108 vrtc_cmos_write(tm.tm_sec, RTC_SECONDS);
109 spin_unlock_irqrestore(&rtc_lock, flags);
110 } else {
111 printk(KERN_ERR
112 "%s: Invalid vRTC value: write of %lx to vRTC failed\n",
113 __FUNCTION__, nowtime);
114 retval = -EINVAL;
115 }
116 return retval;
109} 117}
110 118
111void __init mrst_rtc_init(void) 119void __init mrst_rtc_init(void)
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 74704be7b1fe..9a2e590dd202 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -460,7 +460,6 @@ static int setup_power_button(struct platform_device *pdev)
460static void free_power_button(void) 460static void free_power_button(void)
461{ 461{
462 input_unregister_device(power_button_idev); 462 input_unregister_device(power_button_idev);
463 input_free_device(power_button_idev);
464} 463}
465 464
466static int setup_ebook_switch(struct platform_device *pdev) 465static int setup_ebook_switch(struct platform_device *pdev)
@@ -491,7 +490,6 @@ static int setup_ebook_switch(struct platform_device *pdev)
491static void free_ebook_switch(void) 490static void free_ebook_switch(void)
492{ 491{
493 input_unregister_device(ebook_switch_idev); 492 input_unregister_device(ebook_switch_idev);
494 input_free_device(ebook_switch_idev);
495} 493}
496 494
497static int setup_lid_switch(struct platform_device *pdev) 495static int setup_lid_switch(struct platform_device *pdev)
@@ -526,6 +524,7 @@ static int setup_lid_switch(struct platform_device *pdev)
526 524
527err_create_attr: 525err_create_attr:
528 input_unregister_device(lid_switch_idev); 526 input_unregister_device(lid_switch_idev);
527 lid_switch_idev = NULL;
529err_register: 528err_register:
530 input_free_device(lid_switch_idev); 529 input_free_device(lid_switch_idev);
531 return r; 530 return r;
@@ -535,7 +534,6 @@ static void free_lid_switch(void)
535{ 534{
536 device_remove_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode); 535 device_remove_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode);
537 input_unregister_device(lid_switch_idev); 536 input_unregister_device(lid_switch_idev);
538 input_free_device(lid_switch_idev);
539} 537}
540 538
541static int xo1_sci_probe(struct platform_device *pdev) 539static int xo1_sci_probe(struct platform_device *pdev)
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 98718f604eb6..5c86786bbfd2 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -159,10 +159,9 @@ static __init int uv_rtc_allocate_timers(void)
159{ 159{
160 int cpu; 160 int cpu;
161 161
162 blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL); 162 blade_info = kzalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL);
163 if (!blade_info) 163 if (!blade_info)
164 return -ENOMEM; 164 return -ENOMEM;
165 memset(blade_info, 0, uv_possible_blades * sizeof(void *));
166 165
167 for_each_present_cpu(cpu) { 166 for_each_present_cpu(cpu) {
168 int nid = cpu_to_node(cpu); 167 int nid = cpu_to_node(cpu);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 3c68768d7a75..6d6e907cee46 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -62,11 +62,9 @@ static void __save_processor_state(struct saved_context *ctxt)
62 * descriptor tables 62 * descriptor tables
63 */ 63 */
64#ifdef CONFIG_X86_32 64#ifdef CONFIG_X86_32
65 store_gdt(&ctxt->gdt);
66 store_idt(&ctxt->idt); 65 store_idt(&ctxt->idt);
67#else 66#else
68/* CONFIG_X86_64 */ 67/* CONFIG_X86_64 */
69 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
70 store_idt((struct desc_ptr *)&ctxt->idt_limit); 68 store_idt((struct desc_ptr *)&ctxt->idt_limit);
71#endif 69#endif
72 store_tr(ctxt->tr); 70 store_tr(ctxt->tr);
@@ -135,7 +133,10 @@ static void fix_processor_context(void)
135{ 133{
136 int cpu = smp_processor_id(); 134 int cpu = smp_processor_id();
137 struct tss_struct *t = &per_cpu(init_tss, cpu); 135 struct tss_struct *t = &per_cpu(init_tss, cpu);
138 136#ifdef CONFIG_X86_64
137 struct desc_struct *desc = get_cpu_gdt_table(cpu);
138 tss_desc tss;
139#endif
139 set_tss_desc(cpu, t); /* 140 set_tss_desc(cpu, t); /*
140 * This just modifies memory; should not be 141 * This just modifies memory; should not be
141 * necessary. But... This is necessary, because 142 * necessary. But... This is necessary, because
@@ -144,7 +145,9 @@ static void fix_processor_context(void)
144 */ 145 */
145 146
146#ifdef CONFIG_X86_64 147#ifdef CONFIG_X86_64
147 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; 148 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
149 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
150 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
148 151
149 syscall_init(); /* This sets MSR_*STAR and related */ 152 syscall_init(); /* This sets MSR_*STAR and related */
150#endif 153#endif
@@ -183,11 +186,9 @@ static void __restore_processor_state(struct saved_context *ctxt)
183 * ltr is done i fix_processor_context(). 186 * ltr is done i fix_processor_context().
184 */ 187 */
185#ifdef CONFIG_X86_32 188#ifdef CONFIG_X86_32
186 load_gdt(&ctxt->gdt);
187 load_idt(&ctxt->idt); 189 load_idt(&ctxt->idt);
188#else 190#else
189/* CONFIG_X86_64 */ 191/* CONFIG_X86_64 */
190 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
191 load_idt((const struct desc_ptr *)&ctxt->idt_limit); 192 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
192#endif 193#endif
193 194
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index e6d55f0064df..d0d59bfbccce 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -43,7 +43,7 @@
4334 i386 nice sys_nice 4334 i386 nice sys_nice
4435 i386 ftime 4435 i386 ftime
4536 i386 sync sys_sync 4536 i386 sync sys_sync
4637 i386 kill sys_kill sys32_kill 4637 i386 kill sys_kill
4738 i386 rename sys_rename 4738 i386 rename sys_rename
4839 i386 mkdir sys_mkdir 4839 i386 mkdir sys_mkdir
4940 i386 rmdir sys_rmdir 4940 i386 rmdir sys_rmdir
@@ -123,7 +123,7 @@
123114 i386 wait4 sys_wait4 compat_sys_wait4 123114 i386 wait4 sys_wait4 compat_sys_wait4
124115 i386 swapoff sys_swapoff 124115 i386 swapoff sys_swapoff
125116 i386 sysinfo sys_sysinfo compat_sys_sysinfo 125116 i386 sysinfo sys_sysinfo compat_sys_sysinfo
126117 i386 ipc sys_ipc sys32_ipc 126117 i386 ipc sys_ipc compat_sys_ipc
127118 i386 fsync sys_fsync 127118 i386 fsync sys_fsync
128119 i386 sigreturn sys_sigreturn stub32_sigreturn 128119 i386 sigreturn sys_sigreturn stub32_sigreturn
129120 i386 clone sys_clone stub32_clone 129120 i386 clone sys_clone stub32_clone
@@ -131,7 +131,7 @@
131122 i386 uname sys_newuname 131122 i386 uname sys_newuname
132123 i386 modify_ldt sys_modify_ldt 132123 i386 modify_ldt sys_modify_ldt
133124 i386 adjtimex sys_adjtimex compat_sys_adjtimex 133124 i386 adjtimex sys_adjtimex compat_sys_adjtimex
134125 i386 mprotect sys_mprotect sys32_mprotect 134125 i386 mprotect sys_mprotect
135126 i386 sigprocmask sys_sigprocmask compat_sys_sigprocmask 135126 i386 sigprocmask sys_sigprocmask compat_sys_sigprocmask
136127 i386 create_module 136127 i386 create_module
137128 i386 init_module sys_init_module 137128 i386 init_module sys_init_module
@@ -193,7 +193,7 @@
193184 i386 capget sys_capget 193184 i386 capget sys_capget
194185 i386 capset sys_capset 194185 i386 capset sys_capset
195186 i386 sigaltstack sys_sigaltstack compat_sys_sigaltstack 195186 i386 sigaltstack sys_sigaltstack compat_sys_sigaltstack
196187 i386 sendfile sys_sendfile sys32_sendfile 196187 i386 sendfile sys_sendfile compat_sys_sendfile
197188 i386 getpmsg 197188 i386 getpmsg
198189 i386 putpmsg 198189 i386 putpmsg
199190 i386 vfork sys_vfork stub32_vfork 199190 i386 vfork sys_vfork stub32_vfork
@@ -259,7 +259,7 @@
259250 i386 fadvise64 sys_fadvise64 sys32_fadvise64 259250 i386 fadvise64 sys_fadvise64 sys32_fadvise64
260# 251 is available for reuse (was briefly sys_set_zone_reclaim) 260# 251 is available for reuse (was briefly sys_set_zone_reclaim)
261252 i386 exit_group sys_exit_group 261252 i386 exit_group sys_exit_group
262253 i386 lookup_dcookie sys_lookup_dcookie sys32_lookup_dcookie 262253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
263254 i386 epoll_create sys_epoll_create 263254 i386 epoll_create sys_epoll_create
264255 i386 epoll_ctl sys_epoll_ctl 264255 i386 epoll_ctl sys_epoll_ctl
265256 i386 epoll_wait sys_epoll_wait 265256 i386 epoll_wait sys_epoll_wait
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index bae601f900ef..e8120346903b 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -39,4 +39,5 @@ $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/ina
39 39
40HOST_EXTRACFLAGS += -I$(srctree)/tools/include 40HOST_EXTRACFLAGS += -I$(srctree)/tools/include
41hostprogs-y += relocs 41hostprogs-y += relocs
42relocs-objs := relocs_32.o relocs_64.o relocs_common.o
42relocs: $(obj)/relocs 43relocs: $(obj)/relocs
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 79d67bd507fa..590be1090892 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -1,43 +1,36 @@
1#include <stdio.h> 1/* This is included from relocs_32/64.c */
2#include <stdarg.h> 2
3#include <stdlib.h> 3#define ElfW(type) _ElfW(ELF_BITS, type)
4#include <stdint.h> 4#define _ElfW(bits, type) __ElfW(bits, type)
5#include <string.h> 5#define __ElfW(bits, type) Elf##bits##_##type
6#include <errno.h> 6
7#include <unistd.h> 7#define Elf_Addr ElfW(Addr)
8#include <elf.h> 8#define Elf_Ehdr ElfW(Ehdr)
9#include <byteswap.h> 9#define Elf_Phdr ElfW(Phdr)
10#define USE_BSD 10#define Elf_Shdr ElfW(Shdr)
11#include <endian.h> 11#define Elf_Sym ElfW(Sym)
12#include <regex.h> 12
13#include <tools/le_byteshift.h> 13static Elf_Ehdr ehdr;
14 14
15static void die(char *fmt, ...); 15struct relocs {
16 16 uint32_t *offset;
17#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 17 unsigned long count;
18static Elf32_Ehdr ehdr; 18 unsigned long size;
19static unsigned long reloc_count, reloc_idx; 19};
20static unsigned long *relocs; 20
21static unsigned long reloc16_count, reloc16_idx; 21static struct relocs relocs16;
22static unsigned long *relocs16; 22static struct relocs relocs32;
23static struct relocs relocs64;
23 24
24struct section { 25struct section {
25 Elf32_Shdr shdr; 26 Elf_Shdr shdr;
26 struct section *link; 27 struct section *link;
27 Elf32_Sym *symtab; 28 Elf_Sym *symtab;
28 Elf32_Rel *reltab; 29 Elf_Rel *reltab;
29 char *strtab; 30 char *strtab;
30}; 31};
31static struct section *secs; 32static struct section *secs;
32 33
33enum symtype {
34 S_ABS,
35 S_REL,
36 S_SEG,
37 S_LIN,
38 S_NSYMTYPES
39};
40
41static const char * const sym_regex_kernel[S_NSYMTYPES] = { 34static const char * const sym_regex_kernel[S_NSYMTYPES] = {
42/* 35/*
43 * Following symbols have been audited. There values are constant and do 36 * Following symbols have been audited. There values are constant and do
@@ -49,6 +42,9 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
49 "^(xen_irq_disable_direct_reloc$|" 42 "^(xen_irq_disable_direct_reloc$|"
50 "xen_save_fl_direct_reloc$|" 43 "xen_save_fl_direct_reloc$|"
51 "VDSO|" 44 "VDSO|"
45#if ELF_BITS == 64
46 "__vvar_page|"
47#endif
52 "__crc_)", 48 "__crc_)",
53 49
54/* 50/*
@@ -72,6 +68,11 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
72 "__end_rodata|" 68 "__end_rodata|"
73 "__initramfs_start|" 69 "__initramfs_start|"
74 "(jiffies|jiffies_64)|" 70 "(jiffies|jiffies_64)|"
71#if ELF_BITS == 64
72 "__per_cpu_load|"
73 "init_per_cpu__.*|"
74 "__end_rodata_hpage_align|"
75#endif
75 "_end)$" 76 "_end)$"
76}; 77};
77 78
@@ -132,15 +133,6 @@ static void regex_init(int use_real_mode)
132 } 133 }
133} 134}
134 135
135static void die(char *fmt, ...)
136{
137 va_list ap;
138 va_start(ap, fmt);
139 vfprintf(stderr, fmt, ap);
140 va_end(ap);
141 exit(1);
142}
143
144static const char *sym_type(unsigned type) 136static const char *sym_type(unsigned type)
145{ 137{
146 static const char *type_name[] = { 138 static const char *type_name[] = {
@@ -198,6 +190,24 @@ static const char *rel_type(unsigned type)
198{ 190{
199 static const char *type_name[] = { 191 static const char *type_name[] = {
200#define REL_TYPE(X) [X] = #X 192#define REL_TYPE(X) [X] = #X
193#if ELF_BITS == 64
194 REL_TYPE(R_X86_64_NONE),
195 REL_TYPE(R_X86_64_64),
196 REL_TYPE(R_X86_64_PC32),
197 REL_TYPE(R_X86_64_GOT32),
198 REL_TYPE(R_X86_64_PLT32),
199 REL_TYPE(R_X86_64_COPY),
200 REL_TYPE(R_X86_64_GLOB_DAT),
201 REL_TYPE(R_X86_64_JUMP_SLOT),
202 REL_TYPE(R_X86_64_RELATIVE),
203 REL_TYPE(R_X86_64_GOTPCREL),
204 REL_TYPE(R_X86_64_32),
205 REL_TYPE(R_X86_64_32S),
206 REL_TYPE(R_X86_64_16),
207 REL_TYPE(R_X86_64_PC16),
208 REL_TYPE(R_X86_64_8),
209 REL_TYPE(R_X86_64_PC8),
210#else
201 REL_TYPE(R_386_NONE), 211 REL_TYPE(R_386_NONE),
202 REL_TYPE(R_386_32), 212 REL_TYPE(R_386_32),
203 REL_TYPE(R_386_PC32), 213 REL_TYPE(R_386_PC32),
@@ -213,6 +223,7 @@ static const char *rel_type(unsigned type)
213 REL_TYPE(R_386_PC8), 223 REL_TYPE(R_386_PC8),
214 REL_TYPE(R_386_16), 224 REL_TYPE(R_386_16),
215 REL_TYPE(R_386_PC16), 225 REL_TYPE(R_386_PC16),
226#endif
216#undef REL_TYPE 227#undef REL_TYPE
217 }; 228 };
218 const char *name = "unknown type rel type name"; 229 const char *name = "unknown type rel type name";
@@ -240,7 +251,7 @@ static const char *sec_name(unsigned shndx)
240 return name; 251 return name;
241} 252}
242 253
243static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym) 254static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
244{ 255{
245 const char *name; 256 const char *name;
246 name = "<noname>"; 257 name = "<noname>";
@@ -253,15 +264,42 @@ static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
253 return name; 264 return name;
254} 265}
255 266
267static Elf_Sym *sym_lookup(const char *symname)
268{
269 int i;
270 for (i = 0; i < ehdr.e_shnum; i++) {
271 struct section *sec = &secs[i];
272 long nsyms;
273 char *strtab;
274 Elf_Sym *symtab;
275 Elf_Sym *sym;
276
277 if (sec->shdr.sh_type != SHT_SYMTAB)
278 continue;
256 279
280 nsyms = sec->shdr.sh_size/sizeof(Elf_Sym);
281 symtab = sec->symtab;
282 strtab = sec->link->strtab;
283
284 for (sym = symtab; --nsyms >= 0; sym++) {
285 if (!sym->st_name)
286 continue;
287 if (strcmp(symname, strtab + sym->st_name) == 0)
288 return sym;
289 }
290 }
291 return 0;
292}
257 293
258#if BYTE_ORDER == LITTLE_ENDIAN 294#if BYTE_ORDER == LITTLE_ENDIAN
259#define le16_to_cpu(val) (val) 295#define le16_to_cpu(val) (val)
260#define le32_to_cpu(val) (val) 296#define le32_to_cpu(val) (val)
297#define le64_to_cpu(val) (val)
261#endif 298#endif
262#if BYTE_ORDER == BIG_ENDIAN 299#if BYTE_ORDER == BIG_ENDIAN
263#define le16_to_cpu(val) bswap_16(val) 300#define le16_to_cpu(val) bswap_16(val)
264#define le32_to_cpu(val) bswap_32(val) 301#define le32_to_cpu(val) bswap_32(val)
302#define le64_to_cpu(val) bswap_64(val)
265#endif 303#endif
266 304
267static uint16_t elf16_to_cpu(uint16_t val) 305static uint16_t elf16_to_cpu(uint16_t val)
@@ -274,6 +312,23 @@ static uint32_t elf32_to_cpu(uint32_t val)
274 return le32_to_cpu(val); 312 return le32_to_cpu(val);
275} 313}
276 314
315#define elf_half_to_cpu(x) elf16_to_cpu(x)
316#define elf_word_to_cpu(x) elf32_to_cpu(x)
317
318#if ELF_BITS == 64
319static uint64_t elf64_to_cpu(uint64_t val)
320{
321 return le64_to_cpu(val);
322}
323#define elf_addr_to_cpu(x) elf64_to_cpu(x)
324#define elf_off_to_cpu(x) elf64_to_cpu(x)
325#define elf_xword_to_cpu(x) elf64_to_cpu(x)
326#else
327#define elf_addr_to_cpu(x) elf32_to_cpu(x)
328#define elf_off_to_cpu(x) elf32_to_cpu(x)
329#define elf_xword_to_cpu(x) elf32_to_cpu(x)
330#endif
331
277static void read_ehdr(FILE *fp) 332static void read_ehdr(FILE *fp)
278{ 333{
279 if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) { 334 if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
@@ -283,8 +338,8 @@ static void read_ehdr(FILE *fp)
283 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) { 338 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) {
284 die("No ELF magic\n"); 339 die("No ELF magic\n");
285 } 340 }
286 if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) { 341 if (ehdr.e_ident[EI_CLASS] != ELF_CLASS) {
287 die("Not a 32 bit executable\n"); 342 die("Not a %d bit executable\n", ELF_BITS);
288 } 343 }
289 if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) { 344 if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
290 die("Not a LSB ELF executable\n"); 345 die("Not a LSB ELF executable\n");
@@ -293,36 +348,36 @@ static void read_ehdr(FILE *fp)
293 die("Unknown ELF version\n"); 348 die("Unknown ELF version\n");
294 } 349 }
295 /* Convert the fields to native endian */ 350 /* Convert the fields to native endian */
296 ehdr.e_type = elf16_to_cpu(ehdr.e_type); 351 ehdr.e_type = elf_half_to_cpu(ehdr.e_type);
297 ehdr.e_machine = elf16_to_cpu(ehdr.e_machine); 352 ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine);
298 ehdr.e_version = elf32_to_cpu(ehdr.e_version); 353 ehdr.e_version = elf_word_to_cpu(ehdr.e_version);
299 ehdr.e_entry = elf32_to_cpu(ehdr.e_entry); 354 ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry);
300 ehdr.e_phoff = elf32_to_cpu(ehdr.e_phoff); 355 ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff);
301 ehdr.e_shoff = elf32_to_cpu(ehdr.e_shoff); 356 ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff);
302 ehdr.e_flags = elf32_to_cpu(ehdr.e_flags); 357 ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags);
303 ehdr.e_ehsize = elf16_to_cpu(ehdr.e_ehsize); 358 ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize);
304 ehdr.e_phentsize = elf16_to_cpu(ehdr.e_phentsize); 359 ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize);
305 ehdr.e_phnum = elf16_to_cpu(ehdr.e_phnum); 360 ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum);
306 ehdr.e_shentsize = elf16_to_cpu(ehdr.e_shentsize); 361 ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize);
307 ehdr.e_shnum = elf16_to_cpu(ehdr.e_shnum); 362 ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum);
308 ehdr.e_shstrndx = elf16_to_cpu(ehdr.e_shstrndx); 363 ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx);
309 364
310 if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) { 365 if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
311 die("Unsupported ELF header type\n"); 366 die("Unsupported ELF header type\n");
312 } 367 }
313 if (ehdr.e_machine != EM_386) { 368 if (ehdr.e_machine != ELF_MACHINE) {
314 die("Not for x86\n"); 369 die("Not for %s\n", ELF_MACHINE_NAME);
315 } 370 }
316 if (ehdr.e_version != EV_CURRENT) { 371 if (ehdr.e_version != EV_CURRENT) {
317 die("Unknown ELF version\n"); 372 die("Unknown ELF version\n");
318 } 373 }
319 if (ehdr.e_ehsize != sizeof(Elf32_Ehdr)) { 374 if (ehdr.e_ehsize != sizeof(Elf_Ehdr)) {
320 die("Bad Elf header size\n"); 375 die("Bad Elf header size\n");
321 } 376 }
322 if (ehdr.e_phentsize != sizeof(Elf32_Phdr)) { 377 if (ehdr.e_phentsize != sizeof(Elf_Phdr)) {
323 die("Bad program header entry\n"); 378 die("Bad program header entry\n");
324 } 379 }
325 if (ehdr.e_shentsize != sizeof(Elf32_Shdr)) { 380 if (ehdr.e_shentsize != sizeof(Elf_Shdr)) {
326 die("Bad section header entry\n"); 381 die("Bad section header entry\n");
327 } 382 }
328 if (ehdr.e_shstrndx >= ehdr.e_shnum) { 383 if (ehdr.e_shstrndx >= ehdr.e_shnum) {
@@ -333,7 +388,7 @@ static void read_ehdr(FILE *fp)
333static void read_shdrs(FILE *fp) 388static void read_shdrs(FILE *fp)
334{ 389{
335 int i; 390 int i;
336 Elf32_Shdr shdr; 391 Elf_Shdr shdr;
337 392
338 secs = calloc(ehdr.e_shnum, sizeof(struct section)); 393 secs = calloc(ehdr.e_shnum, sizeof(struct section));
339 if (!secs) { 394 if (!secs) {
@@ -349,16 +404,16 @@ static void read_shdrs(FILE *fp)
349 if (fread(&shdr, sizeof shdr, 1, fp) != 1) 404 if (fread(&shdr, sizeof shdr, 1, fp) != 1)
350 die("Cannot read ELF section headers %d/%d: %s\n", 405 die("Cannot read ELF section headers %d/%d: %s\n",
351 i, ehdr.e_shnum, strerror(errno)); 406 i, ehdr.e_shnum, strerror(errno));
352 sec->shdr.sh_name = elf32_to_cpu(shdr.sh_name); 407 sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
353 sec->shdr.sh_type = elf32_to_cpu(shdr.sh_type); 408 sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type);
354 sec->shdr.sh_flags = elf32_to_cpu(shdr.sh_flags); 409 sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags);
355 sec->shdr.sh_addr = elf32_to_cpu(shdr.sh_addr); 410 sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr);
356 sec->shdr.sh_offset = elf32_to_cpu(shdr.sh_offset); 411 sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset);
357 sec->shdr.sh_size = elf32_to_cpu(shdr.sh_size); 412 sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size);
358 sec->shdr.sh_link = elf32_to_cpu(shdr.sh_link); 413 sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link);
359 sec->shdr.sh_info = elf32_to_cpu(shdr.sh_info); 414 sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info);
360 sec->shdr.sh_addralign = elf32_to_cpu(shdr.sh_addralign); 415 sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign);
361 sec->shdr.sh_entsize = elf32_to_cpu(shdr.sh_entsize); 416 sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize);
362 if (sec->shdr.sh_link < ehdr.e_shnum) 417 if (sec->shdr.sh_link < ehdr.e_shnum)
363 sec->link = &secs[sec->shdr.sh_link]; 418 sec->link = &secs[sec->shdr.sh_link];
364 } 419 }
@@ -412,12 +467,12 @@ static void read_symtabs(FILE *fp)
412 die("Cannot read symbol table: %s\n", 467 die("Cannot read symbol table: %s\n",
413 strerror(errno)); 468 strerror(errno));
414 } 469 }
415 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { 470 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) {
416 Elf32_Sym *sym = &sec->symtab[j]; 471 Elf_Sym *sym = &sec->symtab[j];
417 sym->st_name = elf32_to_cpu(sym->st_name); 472 sym->st_name = elf_word_to_cpu(sym->st_name);
418 sym->st_value = elf32_to_cpu(sym->st_value); 473 sym->st_value = elf_addr_to_cpu(sym->st_value);
419 sym->st_size = elf32_to_cpu(sym->st_size); 474 sym->st_size = elf_xword_to_cpu(sym->st_size);
420 sym->st_shndx = elf16_to_cpu(sym->st_shndx); 475 sym->st_shndx = elf_half_to_cpu(sym->st_shndx);
421 } 476 }
422 } 477 }
423} 478}
@@ -428,7 +483,7 @@ static void read_relocs(FILE *fp)
428 int i,j; 483 int i,j;
429 for (i = 0; i < ehdr.e_shnum; i++) { 484 for (i = 0; i < ehdr.e_shnum; i++) {
430 struct section *sec = &secs[i]; 485 struct section *sec = &secs[i];
431 if (sec->shdr.sh_type != SHT_REL) { 486 if (sec->shdr.sh_type != SHT_REL_TYPE) {
432 continue; 487 continue;
433 } 488 }
434 sec->reltab = malloc(sec->shdr.sh_size); 489 sec->reltab = malloc(sec->shdr.sh_size);
@@ -445,10 +500,13 @@ static void read_relocs(FILE *fp)
445 die("Cannot read symbol table: %s\n", 500 die("Cannot read symbol table: %s\n",
446 strerror(errno)); 501 strerror(errno));
447 } 502 }
448 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { 503 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
449 Elf32_Rel *rel = &sec->reltab[j]; 504 Elf_Rel *rel = &sec->reltab[j];
450 rel->r_offset = elf32_to_cpu(rel->r_offset); 505 rel->r_offset = elf_addr_to_cpu(rel->r_offset);
451 rel->r_info = elf32_to_cpu(rel->r_info); 506 rel->r_info = elf_xword_to_cpu(rel->r_info);
507#if (SHT_REL_TYPE == SHT_RELA)
508 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
509#endif
452 } 510 }
453 } 511 }
454} 512}
@@ -457,6 +515,13 @@ static void read_relocs(FILE *fp)
457static void print_absolute_symbols(void) 515static void print_absolute_symbols(void)
458{ 516{
459 int i; 517 int i;
518 const char *format;
519
520 if (ELF_BITS == 64)
521 format = "%5d %016"PRIx64" %5"PRId64" %10s %10s %12s %s\n";
522 else
523 format = "%5d %08"PRIx32" %5"PRId32" %10s %10s %12s %s\n";
524
460 printf("Absolute symbols\n"); 525 printf("Absolute symbols\n");
461 printf(" Num: Value Size Type Bind Visibility Name\n"); 526 printf(" Num: Value Size Type Bind Visibility Name\n");
462 for (i = 0; i < ehdr.e_shnum; i++) { 527 for (i = 0; i < ehdr.e_shnum; i++) {
@@ -468,19 +533,19 @@ static void print_absolute_symbols(void)
468 continue; 533 continue;
469 } 534 }
470 sym_strtab = sec->link->strtab; 535 sym_strtab = sec->link->strtab;
471 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { 536 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) {
472 Elf32_Sym *sym; 537 Elf_Sym *sym;
473 const char *name; 538 const char *name;
474 sym = &sec->symtab[j]; 539 sym = &sec->symtab[j];
475 name = sym_name(sym_strtab, sym); 540 name = sym_name(sym_strtab, sym);
476 if (sym->st_shndx != SHN_ABS) { 541 if (sym->st_shndx != SHN_ABS) {
477 continue; 542 continue;
478 } 543 }
479 printf("%5d %08x %5d %10s %10s %12s %s\n", 544 printf(format,
480 j, sym->st_value, sym->st_size, 545 j, sym->st_value, sym->st_size,
481 sym_type(ELF32_ST_TYPE(sym->st_info)), 546 sym_type(ELF_ST_TYPE(sym->st_info)),
482 sym_bind(ELF32_ST_BIND(sym->st_info)), 547 sym_bind(ELF_ST_BIND(sym->st_info)),
483 sym_visibility(ELF32_ST_VISIBILITY(sym->st_other)), 548 sym_visibility(ELF_ST_VISIBILITY(sym->st_other)),
484 name); 549 name);
485 } 550 }
486 } 551 }
@@ -490,14 +555,20 @@ static void print_absolute_symbols(void)
490static void print_absolute_relocs(void) 555static void print_absolute_relocs(void)
491{ 556{
492 int i, printed = 0; 557 int i, printed = 0;
558 const char *format;
559
560 if (ELF_BITS == 64)
561 format = "%016"PRIx64" %016"PRIx64" %10s %016"PRIx64" %s\n";
562 else
563 format = "%08"PRIx32" %08"PRIx32" %10s %08"PRIx32" %s\n";
493 564
494 for (i = 0; i < ehdr.e_shnum; i++) { 565 for (i = 0; i < ehdr.e_shnum; i++) {
495 struct section *sec = &secs[i]; 566 struct section *sec = &secs[i];
496 struct section *sec_applies, *sec_symtab; 567 struct section *sec_applies, *sec_symtab;
497 char *sym_strtab; 568 char *sym_strtab;
498 Elf32_Sym *sh_symtab; 569 Elf_Sym *sh_symtab;
499 int j; 570 int j;
500 if (sec->shdr.sh_type != SHT_REL) { 571 if (sec->shdr.sh_type != SHT_REL_TYPE) {
501 continue; 572 continue;
502 } 573 }
503 sec_symtab = sec->link; 574 sec_symtab = sec->link;
@@ -507,12 +578,12 @@ static void print_absolute_relocs(void)
507 } 578 }
508 sh_symtab = sec_symtab->symtab; 579 sh_symtab = sec_symtab->symtab;
509 sym_strtab = sec_symtab->link->strtab; 580 sym_strtab = sec_symtab->link->strtab;
510 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { 581 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
511 Elf32_Rel *rel; 582 Elf_Rel *rel;
512 Elf32_Sym *sym; 583 Elf_Sym *sym;
513 const char *name; 584 const char *name;
514 rel = &sec->reltab[j]; 585 rel = &sec->reltab[j];
515 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)]; 586 sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
516 name = sym_name(sym_strtab, sym); 587 name = sym_name(sym_strtab, sym);
517 if (sym->st_shndx != SHN_ABS) { 588 if (sym->st_shndx != SHN_ABS) {
518 continue; 589 continue;
@@ -542,10 +613,10 @@ static void print_absolute_relocs(void)
542 printed = 1; 613 printed = 1;
543 } 614 }
544 615
545 printf("%08x %08x %10s %08x %s\n", 616 printf(format,
546 rel->r_offset, 617 rel->r_offset,
547 rel->r_info, 618 rel->r_info,
548 rel_type(ELF32_R_TYPE(rel->r_info)), 619 rel_type(ELF_R_TYPE(rel->r_info)),
549 sym->st_value, 620 sym->st_value,
550 name); 621 name);
551 } 622 }
@@ -555,19 +626,34 @@ static void print_absolute_relocs(void)
555 printf("\n"); 626 printf("\n");
556} 627}
557 628
558static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym), 629static void add_reloc(struct relocs *r, uint32_t offset)
559 int use_real_mode) 630{
631 if (r->count == r->size) {
632 unsigned long newsize = r->size + 50000;
633 void *mem = realloc(r->offset, newsize * sizeof(r->offset[0]));
634
635 if (!mem)
636 die("realloc of %ld entries for relocs failed\n",
637 newsize);
638 r->offset = mem;
639 r->size = newsize;
640 }
641 r->offset[r->count++] = offset;
642}
643
644static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
645 Elf_Sym *sym, const char *symname))
560{ 646{
561 int i; 647 int i;
562 /* Walk through the relocations */ 648 /* Walk through the relocations */
563 for (i = 0; i < ehdr.e_shnum; i++) { 649 for (i = 0; i < ehdr.e_shnum; i++) {
564 char *sym_strtab; 650 char *sym_strtab;
565 Elf32_Sym *sh_symtab; 651 Elf_Sym *sh_symtab;
566 struct section *sec_applies, *sec_symtab; 652 struct section *sec_applies, *sec_symtab;
567 int j; 653 int j;
568 struct section *sec = &secs[i]; 654 struct section *sec = &secs[i];
569 655
570 if (sec->shdr.sh_type != SHT_REL) { 656 if (sec->shdr.sh_type != SHT_REL_TYPE) {
571 continue; 657 continue;
572 } 658 }
573 sec_symtab = sec->link; 659 sec_symtab = sec->link;
@@ -577,101 +663,281 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
577 } 663 }
578 sh_symtab = sec_symtab->symtab; 664 sh_symtab = sec_symtab->symtab;
579 sym_strtab = sec_symtab->link->strtab; 665 sym_strtab = sec_symtab->link->strtab;
580 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { 666 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
581 Elf32_Rel *rel; 667 Elf_Rel *rel = &sec->reltab[j];
582 Elf32_Sym *sym; 668 Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
583 unsigned r_type; 669 const char *symname = sym_name(sym_strtab, sym);
584 const char *symname;
585 int shn_abs;
586 670
587 rel = &sec->reltab[j]; 671 process(sec, rel, sym, symname);
588 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)]; 672 }
589 r_type = ELF32_R_TYPE(rel->r_info); 673 }
590 674}
591 shn_abs = sym->st_shndx == SHN_ABS;
592
593 switch (r_type) {
594 case R_386_NONE:
595 case R_386_PC32:
596 case R_386_PC16:
597 case R_386_PC8:
598 /*
599 * NONE can be ignored and and PC relative
600 * relocations don't need to be adjusted.
601 */
602 break;
603 675
604 case R_386_16: 676/*
605 symname = sym_name(sym_strtab, sym); 677 * The .data..percpu section is a special case for x86_64 SMP kernels.
606 if (!use_real_mode) 678 * It is used to initialize the actual per_cpu areas and to provide
607 goto bad; 679 * definitions for the per_cpu variables that correspond to their offsets
608 if (shn_abs) { 680 * within the percpu area. Since the values of all of the symbols need
609 if (is_reloc(S_ABS, symname)) 681 * to be offsets from the start of the per_cpu area the virtual address
610 break; 682 * (sh_addr) of .data..percpu is 0 in SMP kernels.
611 else if (!is_reloc(S_SEG, symname)) 683 *
612 goto bad; 684 * This means that:
613 } else { 685 *
614 if (is_reloc(S_LIN, symname)) 686 * Relocations that reference symbols in the per_cpu area do not
615 goto bad; 687 * need further relocation (since the value is an offset relative
616 else 688 * to the start of the per_cpu area that does not change).
617 break; 689 *
618 } 690 * Relocations that apply to the per_cpu area need to have their
619 visit(rel, sym); 691 * offset adjusted by by the value of __per_cpu_load to make them
620 break; 692 * point to the correct place in the loaded image (because the
693 * virtual address of .data..percpu is 0).
694 *
695 * For non SMP kernels .data..percpu is linked as part of the normal
696 * kernel data and does not require special treatment.
697 *
698 */
699static int per_cpu_shndx = -1;
700Elf_Addr per_cpu_load_addr;
621 701
622 case R_386_32: 702static void percpu_init(void)
623 symname = sym_name(sym_strtab, sym); 703{
624 if (shn_abs) { 704 int i;
625 if (is_reloc(S_ABS, symname)) 705 for (i = 0; i < ehdr.e_shnum; i++) {
626 break; 706 ElfW(Sym) *sym;
627 else if (!is_reloc(S_REL, symname)) 707 if (strcmp(sec_name(i), ".data..percpu"))
628 goto bad; 708 continue;
629 } else { 709
630 if (use_real_mode && 710 if (secs[i].shdr.sh_addr != 0) /* non SMP kernel */
631 !is_reloc(S_LIN, symname)) 711 return;
632 break; 712
633 } 713 sym = sym_lookup("__per_cpu_load");
634 visit(rel, sym); 714 if (!sym)
635 break; 715 die("can't find __per_cpu_load\n");
636 default: 716
637 die("Unsupported relocation type: %s (%d)\n", 717 per_cpu_shndx = i;
638 rel_type(r_type), r_type); 718 per_cpu_load_addr = sym->st_value;
719 return;
720 }
721}
722
723#if ELF_BITS == 64
724
725/*
726 * Check to see if a symbol lies in the .data..percpu section.
727 * For some as yet not understood reason the "__init_begin"
728 * symbol which immediately preceeds the .data..percpu section
729 * also shows up as it it were part of it so we do an explict
730 * check for that symbol name and ignore it.
731 */
732static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
733{
734 return (sym->st_shndx == per_cpu_shndx) &&
735 strcmp(symname, "__init_begin");
736}
737
738
739static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
740 const char *symname)
741{
742 unsigned r_type = ELF64_R_TYPE(rel->r_info);
743 ElfW(Addr) offset = rel->r_offset;
744 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
745
746 if (sym->st_shndx == SHN_UNDEF)
747 return 0;
748
749 /*
750 * Adjust the offset if this reloc applies to the percpu section.
751 */
752 if (sec->shdr.sh_info == per_cpu_shndx)
753 offset += per_cpu_load_addr;
754
755 switch (r_type) {
756 case R_X86_64_NONE:
757 case R_X86_64_PC32:
758 /*
759 * NONE can be ignored and PC relative relocations don't
760 * need to be adjusted.
761 */
762 break;
763
764 case R_X86_64_32:
765 case R_X86_64_32S:
766 case R_X86_64_64:
767 /*
768 * References to the percpu area don't need to be adjusted.
769 */
770 if (is_percpu_sym(sym, symname))
771 break;
772
773 if (shn_abs) {
774 /*
775 * Whitelisted absolute symbols do not require
776 * relocation.
777 */
778 if (is_reloc(S_ABS, symname))
639 break; 779 break;
640 bad: 780
641 symname = sym_name(sym_strtab, sym); 781 die("Invalid absolute %s relocation: %s\n",
642 die("Invalid %s %s relocation: %s\n", 782 rel_type(r_type), symname);
643 shn_abs ? "absolute" : "relative", 783 break;
644 rel_type(r_type), symname);
645 }
646 } 784 }
785
786 /*
787 * Relocation offsets for 64 bit kernels are output
788 * as 32 bits and sign extended back to 64 bits when
789 * the relocations are processed.
790 * Make sure that the offset will fit.
791 */
792 if ((int32_t)offset != (int64_t)offset)
793 die("Relocation offset doesn't fit in 32 bits\n");
794
795 if (r_type == R_X86_64_64)
796 add_reloc(&relocs64, offset);
797 else
798 add_reloc(&relocs32, offset);
799 break;
800
801 default:
802 die("Unsupported relocation type: %s (%d)\n",
803 rel_type(r_type), r_type);
804 break;
647 } 805 }
806
807 return 0;
648} 808}
649 809
650static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym) 810#else
811
812static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
813 const char *symname)
651{ 814{
652 if (ELF32_R_TYPE(rel->r_info) == R_386_16) 815 unsigned r_type = ELF32_R_TYPE(rel->r_info);
653 reloc16_count++; 816 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
654 else 817
655 reloc_count++; 818 switch (r_type) {
819 case R_386_NONE:
820 case R_386_PC32:
821 case R_386_PC16:
822 case R_386_PC8:
823 /*
824 * NONE can be ignored and PC relative relocations don't
825 * need to be adjusted.
826 */
827 break;
828
829 case R_386_32:
830 if (shn_abs) {
831 /*
832 * Whitelisted absolute symbols do not require
833 * relocation.
834 */
835 if (is_reloc(S_ABS, symname))
836 break;
837
838 die("Invalid absolute %s relocation: %s\n",
839 rel_type(r_type), symname);
840 break;
841 }
842
843 add_reloc(&relocs32, rel->r_offset);
844 break;
845
846 default:
847 die("Unsupported relocation type: %s (%d)\n",
848 rel_type(r_type), r_type);
849 break;
850 }
851
852 return 0;
656} 853}
657 854
658static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym) 855static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
856 const char *symname)
659{ 857{
660 /* Remember the address that needs to be adjusted. */ 858 unsigned r_type = ELF32_R_TYPE(rel->r_info);
661 if (ELF32_R_TYPE(rel->r_info) == R_386_16) 859 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
662 relocs16[reloc16_idx++] = rel->r_offset; 860
663 else 861 switch (r_type) {
664 relocs[reloc_idx++] = rel->r_offset; 862 case R_386_NONE:
863 case R_386_PC32:
864 case R_386_PC16:
865 case R_386_PC8:
866 /*
867 * NONE can be ignored and PC relative relocations don't
868 * need to be adjusted.
869 */
870 break;
871
872 case R_386_16:
873 if (shn_abs) {
874 /*
875 * Whitelisted absolute symbols do not require
876 * relocation.
877 */
878 if (is_reloc(S_ABS, symname))
879 break;
880
881 if (is_reloc(S_SEG, symname)) {
882 add_reloc(&relocs16, rel->r_offset);
883 break;
884 }
885 } else {
886 if (!is_reloc(S_LIN, symname))
887 break;
888 }
889 die("Invalid %s %s relocation: %s\n",
890 shn_abs ? "absolute" : "relative",
891 rel_type(r_type), symname);
892 break;
893
894 case R_386_32:
895 if (shn_abs) {
896 /*
897 * Whitelisted absolute symbols do not require
898 * relocation.
899 */
900 if (is_reloc(S_ABS, symname))
901 break;
902
903 if (is_reloc(S_REL, symname)) {
904 add_reloc(&relocs32, rel->r_offset);
905 break;
906 }
907 } else {
908 if (is_reloc(S_LIN, symname))
909 add_reloc(&relocs32, rel->r_offset);
910 break;
911 }
912 die("Invalid %s %s relocation: %s\n",
913 shn_abs ? "absolute" : "relative",
914 rel_type(r_type), symname);
915 break;
916
917 default:
918 die("Unsupported relocation type: %s (%d)\n",
919 rel_type(r_type), r_type);
920 break;
921 }
922
923 return 0;
665} 924}
666 925
926#endif
927
667static int cmp_relocs(const void *va, const void *vb) 928static int cmp_relocs(const void *va, const void *vb)
668{ 929{
669 const unsigned long *a, *b; 930 const uint32_t *a, *b;
670 a = va; b = vb; 931 a = va; b = vb;
671 return (*a == *b)? 0 : (*a > *b)? 1 : -1; 932 return (*a == *b)? 0 : (*a > *b)? 1 : -1;
672} 933}
673 934
674static int write32(unsigned int v, FILE *f) 935static void sort_relocs(struct relocs *r)
936{
937 qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
938}
939
940static int write32(uint32_t v, FILE *f)
675{ 941{
676 unsigned char buf[4]; 942 unsigned char buf[4];
677 943
@@ -679,33 +945,40 @@ static int write32(unsigned int v, FILE *f)
679 return fwrite(buf, 1, 4, f) == 4 ? 0 : -1; 945 return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
680} 946}
681 947
948static int write32_as_text(uint32_t v, FILE *f)
949{
950 return fprintf(f, "\t.long 0x%08"PRIx32"\n", v) > 0 ? 0 : -1;
951}
952
682static void emit_relocs(int as_text, int use_real_mode) 953static void emit_relocs(int as_text, int use_real_mode)
683{ 954{
684 int i; 955 int i;
685 /* Count how many relocations I have and allocate space for them. */ 956 int (*write_reloc)(uint32_t, FILE *) = write32;
686 reloc_count = 0; 957 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
687 walk_relocs(count_reloc, use_real_mode); 958 const char *symname);
688 relocs = malloc(reloc_count * sizeof(relocs[0])); 959
689 if (!relocs) { 960#if ELF_BITS == 64
690 die("malloc of %d entries for relocs failed\n", 961 if (!use_real_mode)
691 reloc_count); 962 do_reloc = do_reloc64;
692 } 963 else
964 die("--realmode not valid for a 64-bit ELF file");
965#else
966 if (!use_real_mode)
967 do_reloc = do_reloc32;
968 else
969 do_reloc = do_reloc_real;
970#endif
693 971
694 relocs16 = malloc(reloc16_count * sizeof(relocs[0]));
695 if (!relocs16) {
696 die("malloc of %d entries for relocs16 failed\n",
697 reloc16_count);
698 }
699 /* Collect up the relocations */ 972 /* Collect up the relocations */
700 reloc_idx = 0; 973 walk_relocs(do_reloc);
701 walk_relocs(collect_reloc, use_real_mode);
702 974
703 if (reloc16_count && !use_real_mode) 975 if (relocs16.count && !use_real_mode)
704 die("Segment relocations found but --realmode not specified\n"); 976 die("Segment relocations found but --realmode not specified\n");
705 977
706 /* Order the relocations for more efficient processing */ 978 /* Order the relocations for more efficient processing */
707 qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs); 979 sort_relocs(&relocs16);
708 qsort(relocs16, reloc16_count, sizeof(relocs16[0]), cmp_relocs); 980 sort_relocs(&relocs32);
981 sort_relocs(&relocs64);
709 982
710 /* Print the relocations */ 983 /* Print the relocations */
711 if (as_text) { 984 if (as_text) {
@@ -714,114 +987,60 @@ static void emit_relocs(int as_text, int use_real_mode)
714 */ 987 */
715 printf(".section \".data.reloc\",\"a\"\n"); 988 printf(".section \".data.reloc\",\"a\"\n");
716 printf(".balign 4\n"); 989 printf(".balign 4\n");
717 if (use_real_mode) { 990 write_reloc = write32_as_text;
718 printf("\t.long %lu\n", reloc16_count);
719 for (i = 0; i < reloc16_count; i++)
720 printf("\t.long 0x%08lx\n", relocs16[i]);
721 printf("\t.long %lu\n", reloc_count);
722 for (i = 0; i < reloc_count; i++) {
723 printf("\t.long 0x%08lx\n", relocs[i]);
724 }
725 } else {
726 /* Print a stop */
727 printf("\t.long 0x%08lx\n", (unsigned long)0);
728 for (i = 0; i < reloc_count; i++) {
729 printf("\t.long 0x%08lx\n", relocs[i]);
730 }
731 }
732
733 printf("\n");
734 } 991 }
735 else {
736 if (use_real_mode) {
737 write32(reloc16_count, stdout);
738 for (i = 0; i < reloc16_count; i++)
739 write32(relocs16[i], stdout);
740 write32(reloc_count, stdout);
741 992
742 /* Now print each relocation */ 993 if (use_real_mode) {
743 for (i = 0; i < reloc_count; i++) 994 write_reloc(relocs16.count, stdout);
744 write32(relocs[i], stdout); 995 for (i = 0; i < relocs16.count; i++)
745 } else { 996 write_reloc(relocs16.offset[i], stdout);
997
998 write_reloc(relocs32.count, stdout);
999 for (i = 0; i < relocs32.count; i++)
1000 write_reloc(relocs32.offset[i], stdout);
1001 } else {
1002 if (ELF_BITS == 64) {
746 /* Print a stop */ 1003 /* Print a stop */
747 write32(0, stdout); 1004 write_reloc(0, stdout);
748 1005
749 /* Now print each relocation */ 1006 /* Now print each relocation */
750 for (i = 0; i < reloc_count; i++) { 1007 for (i = 0; i < relocs64.count; i++)
751 write32(relocs[i], stdout); 1008 write_reloc(relocs64.offset[i], stdout);
752 }
753 } 1009 }
1010
1011 /* Print a stop */
1012 write_reloc(0, stdout);
1013
1014 /* Now print each relocation */
1015 for (i = 0; i < relocs32.count; i++)
1016 write_reloc(relocs32.offset[i], stdout);
754 } 1017 }
755} 1018}
756 1019
757static void usage(void) 1020#if ELF_BITS == 64
758{ 1021# define process process_64
759 die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n"); 1022#else
760} 1023# define process process_32
1024#endif
761 1025
762int main(int argc, char **argv) 1026void process(FILE *fp, int use_real_mode, int as_text,
1027 int show_absolute_syms, int show_absolute_relocs)
763{ 1028{
764 int show_absolute_syms, show_absolute_relocs;
765 int as_text, use_real_mode;
766 const char *fname;
767 FILE *fp;
768 int i;
769
770 show_absolute_syms = 0;
771 show_absolute_relocs = 0;
772 as_text = 0;
773 use_real_mode = 0;
774 fname = NULL;
775 for (i = 1; i < argc; i++) {
776 char *arg = argv[i];
777 if (*arg == '-') {
778 if (strcmp(arg, "--abs-syms") == 0) {
779 show_absolute_syms = 1;
780 continue;
781 }
782 if (strcmp(arg, "--abs-relocs") == 0) {
783 show_absolute_relocs = 1;
784 continue;
785 }
786 if (strcmp(arg, "--text") == 0) {
787 as_text = 1;
788 continue;
789 }
790 if (strcmp(arg, "--realmode") == 0) {
791 use_real_mode = 1;
792 continue;
793 }
794 }
795 else if (!fname) {
796 fname = arg;
797 continue;
798 }
799 usage();
800 }
801 if (!fname) {
802 usage();
803 }
804 regex_init(use_real_mode); 1029 regex_init(use_real_mode);
805 fp = fopen(fname, "r");
806 if (!fp) {
807 die("Cannot open %s: %s\n",
808 fname, strerror(errno));
809 }
810 read_ehdr(fp); 1030 read_ehdr(fp);
811 read_shdrs(fp); 1031 read_shdrs(fp);
812 read_strtabs(fp); 1032 read_strtabs(fp);
813 read_symtabs(fp); 1033 read_symtabs(fp);
814 read_relocs(fp); 1034 read_relocs(fp);
1035 if (ELF_BITS == 64)
1036 percpu_init();
815 if (show_absolute_syms) { 1037 if (show_absolute_syms) {
816 print_absolute_symbols(); 1038 print_absolute_symbols();
817 goto out; 1039 return;
818 } 1040 }
819 if (show_absolute_relocs) { 1041 if (show_absolute_relocs) {
820 print_absolute_relocs(); 1042 print_absolute_relocs();
821 goto out; 1043 return;
822 } 1044 }
823 emit_relocs(as_text, use_real_mode); 1045 emit_relocs(as_text, use_real_mode);
824out:
825 fclose(fp);
826 return 0;
827} 1046}
diff --git a/arch/x86/tools/relocs.h b/arch/x86/tools/relocs.h
new file mode 100644
index 000000000000..07cdb1eca4fa
--- /dev/null
+++ b/arch/x86/tools/relocs.h
@@ -0,0 +1,36 @@
1#ifndef RELOCS_H
2#define RELOCS_H
3
4#include <stdio.h>
5#include <stdarg.h>
6#include <stdlib.h>
7#include <stdint.h>
8#include <inttypes.h>
9#include <string.h>
10#include <errno.h>
11#include <unistd.h>
12#include <elf.h>
13#include <byteswap.h>
14#define USE_BSD
15#include <endian.h>
16#include <regex.h>
17#include <tools/le_byteshift.h>
18
19void die(char *fmt, ...);
20
21#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
22
23enum symtype {
24 S_ABS,
25 S_REL,
26 S_SEG,
27 S_LIN,
28 S_NSYMTYPES
29};
30
31void process_32(FILE *fp, int use_real_mode, int as_text,
32 int show_absolute_syms, int show_absolute_relocs);
33void process_64(FILE *fp, int use_real_mode, int as_text,
34 int show_absolute_syms, int show_absolute_relocs);
35
36#endif /* RELOCS_H */
diff --git a/arch/x86/tools/relocs_32.c b/arch/x86/tools/relocs_32.c
new file mode 100644
index 000000000000..b2ade2bb4162
--- /dev/null
+++ b/arch/x86/tools/relocs_32.c
@@ -0,0 +1,17 @@
1#include "relocs.h"
2
3#define ELF_BITS 32
4
5#define ELF_MACHINE EM_386
6#define ELF_MACHINE_NAME "i386"
7#define SHT_REL_TYPE SHT_REL
8#define Elf_Rel ElfW(Rel)
9
10#define ELF_CLASS ELFCLASS32
11#define ELF_R_SYM(val) ELF32_R_SYM(val)
12#define ELF_R_TYPE(val) ELF32_R_TYPE(val)
13#define ELF_ST_TYPE(o) ELF32_ST_TYPE(o)
14#define ELF_ST_BIND(o) ELF32_ST_BIND(o)
15#define ELF_ST_VISIBILITY(o) ELF32_ST_VISIBILITY(o)
16
17#include "relocs.c"
diff --git a/arch/x86/tools/relocs_64.c b/arch/x86/tools/relocs_64.c
new file mode 100644
index 000000000000..56b61b743c4c
--- /dev/null
+++ b/arch/x86/tools/relocs_64.c
@@ -0,0 +1,17 @@
1#include "relocs.h"
2
3#define ELF_BITS 64
4
5#define ELF_MACHINE EM_X86_64
6#define ELF_MACHINE_NAME "x86_64"
7#define SHT_REL_TYPE SHT_RELA
8#define Elf_Rel Elf64_Rela
9
10#define ELF_CLASS ELFCLASS64
11#define ELF_R_SYM(val) ELF64_R_SYM(val)
12#define ELF_R_TYPE(val) ELF64_R_TYPE(val)
13#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o)
14#define ELF_ST_BIND(o) ELF64_ST_BIND(o)
15#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o)
16
17#include "relocs.c"
diff --git a/arch/x86/tools/relocs_common.c b/arch/x86/tools/relocs_common.c
new file mode 100644
index 000000000000..44d396823a53
--- /dev/null
+++ b/arch/x86/tools/relocs_common.c
@@ -0,0 +1,76 @@
1#include "relocs.h"
2
3void die(char *fmt, ...)
4{
5 va_list ap;
6 va_start(ap, fmt);
7 vfprintf(stderr, fmt, ap);
8 va_end(ap);
9 exit(1);
10}
11
12static void usage(void)
13{
14 die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n");
15}
16
17int main(int argc, char **argv)
18{
19 int show_absolute_syms, show_absolute_relocs;
20 int as_text, use_real_mode;
21 const char *fname;
22 FILE *fp;
23 int i;
24 unsigned char e_ident[EI_NIDENT];
25
26 show_absolute_syms = 0;
27 show_absolute_relocs = 0;
28 as_text = 0;
29 use_real_mode = 0;
30 fname = NULL;
31 for (i = 1; i < argc; i++) {
32 char *arg = argv[i];
33 if (*arg == '-') {
34 if (strcmp(arg, "--abs-syms") == 0) {
35 show_absolute_syms = 1;
36 continue;
37 }
38 if (strcmp(arg, "--abs-relocs") == 0) {
39 show_absolute_relocs = 1;
40 continue;
41 }
42 if (strcmp(arg, "--text") == 0) {
43 as_text = 1;
44 continue;
45 }
46 if (strcmp(arg, "--realmode") == 0) {
47 use_real_mode = 1;
48 continue;
49 }
50 }
51 else if (!fname) {
52 fname = arg;
53 continue;
54 }
55 usage();
56 }
57 if (!fname) {
58 usage();
59 }
60 fp = fopen(fname, "r");
61 if (!fp) {
62 die("Cannot open %s: %s\n", fname, strerror(errno));
63 }
64 if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT) {
65 die("Cannot read %s: %s", fname, strerror(errno));
66 }
67 rewind(fp);
68 if (e_ident[EI_CLASS] == ELFCLASS64)
69 process_64(fp, use_real_mode, as_text,
70 show_absolute_syms, show_absolute_relocs);
71 else
72 process_32(fp, use_real_mode, as_text,
73 show_absolute_syms, show_absolute_relocs);
74 fclose(fp);
75 return 0;
76}
diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
index 5f5feff3d24c..80ffa5b9982d 100644
--- a/arch/x86/um/tls_32.c
+++ b/arch/x86/um/tls_32.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/percpu.h> 6#include <linux/percpu.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/syscalls.h>
8#include <asm/uaccess.h> 9#include <asm/uaccess.h>
9#include <os.h> 10#include <os.h>
10#include <skas.h> 11#include <skas.h>
@@ -274,7 +275,7 @@ clear:
274 goto out; 275 goto out;
275} 276}
276 277
277int sys_set_thread_area(struct user_desc __user *user_desc) 278SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
278{ 279{
279 struct user_desc info; 280 struct user_desc info;
280 int idx, ret; 281 int idx, ret;
@@ -322,7 +323,7 @@ int ptrace_set_thread_area(struct task_struct *child, int idx,
322 return set_tls_entry(child, &info, idx, 0); 323 return set_tls_entry(child, &info, idx, 0);
323} 324}
324 325
325int sys_get_thread_area(struct user_desc __user *user_desc) 326SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
326{ 327{
327 struct user_desc info; 328 struct user_desc info;
328 int idx, ret; 329 int idx, ret;
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 131dacd2748a..1a3c76505649 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -4,7 +4,7 @@
4 4
5config XEN 5config XEN
6 bool "Xen guest support" 6 bool "Xen guest support"
7 select PARAVIRT 7 depends on PARAVIRT
8 select PARAVIRT_CLOCK 8 select PARAVIRT_CLOCK
9 select XEN_HAVE_PVMMU 9 select XEN_HAVE_PVMMU
10 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS) 10 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c8e1c7b95c3b..53d4f680c9b5 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -31,6 +31,7 @@
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/memblock.h> 33#include <linux/memblock.h>
34#include <linux/edd.h>
34 35
35#include <xen/xen.h> 36#include <xen/xen.h>
36#include <xen/events.h> 37#include <xen/events.h>
@@ -1220,7 +1221,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1220 .alloc_ldt = xen_alloc_ldt, 1221 .alloc_ldt = xen_alloc_ldt,
1221 .free_ldt = xen_free_ldt, 1222 .free_ldt = xen_free_ldt,
1222 1223
1223 .store_gdt = native_store_gdt,
1224 .store_idt = native_store_idt, 1224 .store_idt = native_store_idt,
1225 .store_tr = xen_store_tr, 1225 .store_tr = xen_store_tr,
1226 1226
@@ -1306,6 +1306,55 @@ static const struct machine_ops xen_machine_ops __initconst = {
1306 .emergency_restart = xen_emergency_restart, 1306 .emergency_restart = xen_emergency_restart,
1307}; 1307};
1308 1308
1309static void __init xen_boot_params_init_edd(void)
1310{
1311#if IS_ENABLED(CONFIG_EDD)
1312 struct xen_platform_op op;
1313 struct edd_info *edd_info;
1314 u32 *mbr_signature;
1315 unsigned nr;
1316 int ret;
1317
1318 edd_info = boot_params.eddbuf;
1319 mbr_signature = boot_params.edd_mbr_sig_buffer;
1320
1321 op.cmd = XENPF_firmware_info;
1322
1323 op.u.firmware_info.type = XEN_FW_DISK_INFO;
1324 for (nr = 0; nr < EDDMAXNR; nr++) {
1325 struct edd_info *info = edd_info + nr;
1326
1327 op.u.firmware_info.index = nr;
1328 info->params.length = sizeof(info->params);
1329 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1330 &info->params);
1331 ret = HYPERVISOR_dom0_op(&op);
1332 if (ret)
1333 break;
1334
1335#define C(x) info->x = op.u.firmware_info.u.disk_info.x
1336 C(device);
1337 C(version);
1338 C(interface_support);
1339 C(legacy_max_cylinder);
1340 C(legacy_max_head);
1341 C(legacy_sectors_per_track);
1342#undef C
1343 }
1344 boot_params.eddbuf_entries = nr;
1345
1346 op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1347 for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1348 op.u.firmware_info.index = nr;
1349 ret = HYPERVISOR_dom0_op(&op);
1350 if (ret)
1351 break;
1352 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1353 }
1354 boot_params.edd_mbr_sig_buf_entries = nr;
1355#endif
1356}
1357
1309/* 1358/*
1310 * Set up the GDT and segment registers for -fstack-protector. Until 1359 * Set up the GDT and segment registers for -fstack-protector. Until
1311 * we do this, we have to be careful not to call any stack-protected 1360 * we do this, we have to be careful not to call any stack-protected
@@ -1508,6 +1557,8 @@ asmlinkage void __init xen_start_kernel(void)
1508 /* Avoid searching for BIOS MP tables */ 1557 /* Avoid searching for BIOS MP tables */
1509 x86_init.mpparse.find_smp_config = x86_init_noop; 1558 x86_init.mpparse.find_smp_config = x86_init_noop;
1510 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 1559 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
1560
1561 xen_boot_params_init_edd();
1511 } 1562 }
1512#ifdef CONFIG_PCI 1563#ifdef CONFIG_PCI
1513 /* PCI BIOS service won't work from a PV guest. */ 1564 /* PCI BIOS service won't work from a PV guest. */
@@ -1589,8 +1640,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1589 switch (action) { 1640 switch (action) {
1590 case CPU_UP_PREPARE: 1641 case CPU_UP_PREPARE:
1591 xen_vcpu_setup(cpu); 1642 xen_vcpu_setup(cpu);
1592 if (xen_have_vector_callback) 1643 if (xen_have_vector_callback) {
1593 xen_init_lock_cpu(cpu); 1644 xen_init_lock_cpu(cpu);
1645 if (xen_feature(XENFEAT_hvm_safe_pvclock))
1646 xen_setup_timer(cpu);
1647 }
1594 break; 1648 break;
1595 default: 1649 default:
1596 break; 1650 break;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index e006c18d288a..fdc3ba28ca38 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2043,9 +2043,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2043 2043
2044 switch (idx) { 2044 switch (idx) {
2045 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: 2045 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2046#ifdef CONFIG_X86_F00F_BUG 2046 case FIX_RO_IDT:
2047 case FIX_F00F_IDT:
2048#endif
2049#ifdef CONFIG_X86_32 2047#ifdef CONFIG_X86_32
2050 case FIX_WP_TEST: 2048 case FIX_WP_TEST:
2051 case FIX_VDSO: 2049 case FIX_VDSO:
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 09ea61d2e02f..8ff37995d54e 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -95,7 +95,7 @@ static void __cpuinit cpu_bringup(void)
95static void __cpuinit cpu_bringup_and_idle(void) 95static void __cpuinit cpu_bringup_and_idle(void)
96{ 96{
97 cpu_bringup(); 97 cpu_bringup();
98 cpu_idle(); 98 cpu_startup_entry(CPUHP_ONLINE);
99} 99}
100 100
101static int xen_smp_intr_init(unsigned int cpu) 101static int xen_smp_intr_init(unsigned int cpu)
@@ -144,6 +144,13 @@ static int xen_smp_intr_init(unsigned int cpu)
144 goto fail; 144 goto fail;
145 per_cpu(xen_callfuncsingle_irq, cpu) = rc; 145 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
146 146
147 /*
148 * The IRQ worker on PVHVM goes through the native path and uses the
149 * IPI mechanism.
150 */
151 if (xen_hvm_domain())
152 return 0;
153
147 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); 154 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
148 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, 155 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
149 cpu, 156 cpu,
@@ -167,6 +174,9 @@ static int xen_smp_intr_init(unsigned int cpu)
167 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) 174 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
168 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), 175 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
169 NULL); 176 NULL);
177 if (xen_hvm_domain())
178 return rc;
179
170 if (per_cpu(xen_irq_work, cpu) >= 0) 180 if (per_cpu(xen_irq_work, cpu) >= 0)
171 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); 181 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
172 182
@@ -418,7 +428,7 @@ static int xen_cpu_disable(void)
418 428
419static void xen_cpu_die(unsigned int cpu) 429static void xen_cpu_die(unsigned int cpu)
420{ 430{
421 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { 431 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
422 current->state = TASK_UNINTERRUPTIBLE; 432 current->state = TASK_UNINTERRUPTIBLE;
423 schedule_timeout(HZ/10); 433 schedule_timeout(HZ/10);
424 } 434 }
@@ -426,7 +436,8 @@ static void xen_cpu_die(unsigned int cpu)
426 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 436 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
427 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 437 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
428 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); 438 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
429 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); 439 if (!xen_hvm_domain())
440 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
430 xen_uninit_lock_cpu(cpu); 441 xen_uninit_lock_cpu(cpu);
431 xen_teardown_timer(cpu); 442 xen_teardown_timer(cpu);
432} 443}
@@ -657,11 +668,7 @@ static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
657 668
658static void xen_hvm_cpu_die(unsigned int cpu) 669static void xen_hvm_cpu_die(unsigned int cpu)
659{ 670{
660 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); 671 xen_cpu_die(cpu);
661 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
662 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
663 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
664 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
665 native_cpu_die(cpu); 672 native_cpu_die(cpu);
666} 673}
667 674
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index f7a080ef0354..8b54603ce816 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -364,6 +364,16 @@ void __cpuinit xen_init_lock_cpu(int cpu)
364 int irq; 364 int irq;
365 const char *name; 365 const char *name;
366 366
367 WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
368 cpu, per_cpu(lock_kicker_irq, cpu));
369
370 /*
371 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
372 * (xen: disable PV spinlocks on HVM)
373 */
374 if (xen_hvm_domain())
375 return;
376
367 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); 377 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
368 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, 378 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
369 cpu, 379 cpu,
@@ -382,11 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu)
382 392
383void xen_uninit_lock_cpu(int cpu) 393void xen_uninit_lock_cpu(int cpu)
384{ 394{
395 /*
396 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
397 * (xen: disable PV spinlocks on HVM)
398 */
399 if (xen_hvm_domain())
400 return;
401
385 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); 402 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
403 per_cpu(lock_kicker_irq, cpu) = -1;
386} 404}
387 405
388void __init xen_init_spinlocks(void) 406void __init xen_init_spinlocks(void)
389{ 407{
408 /*
409 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
410 * (xen: disable PV spinlocks on HVM)
411 */
412 if (xen_hvm_domain())
413 return;
414
390 BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t)); 415 BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
391 416
392 pv_lock_ops.spin_is_locked = xen_spin_is_locked; 417 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 0296a9522501..3d88bfdf9e1c 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -377,7 +377,7 @@ static const struct clock_event_device xen_vcpuop_clockevent = {
377 377
378static const struct clock_event_device *xen_clockevent = 378static const struct clock_event_device *xen_clockevent =
379 &xen_timerop_clockevent; 379 &xen_timerop_clockevent;
380static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events); 380static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 };
381 381
382static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) 382static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
383{ 383{
@@ -401,6 +401,9 @@ void xen_setup_timer(int cpu)
401 struct clock_event_device *evt; 401 struct clock_event_device *evt;
402 int irq; 402 int irq;
403 403
404 evt = &per_cpu(xen_clock_events, cpu);
405 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
406
404 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); 407 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
405 408
406 name = kasprintf(GFP_KERNEL, "timer%d", cpu); 409 name = kasprintf(GFP_KERNEL, "timer%d", cpu);
@@ -413,7 +416,6 @@ void xen_setup_timer(int cpu)
413 IRQF_FORCE_RESUME, 416 IRQF_FORCE_RESUME,
414 name, NULL); 417 name, NULL);
415 418
416 evt = &per_cpu(xen_clock_events, cpu);
417 memcpy(evt, xen_clockevent, sizeof(*evt)); 419 memcpy(evt, xen_clockevent, sizeof(*evt));
418 420
419 evt->cpumask = cpumask_of(cpu); 421 evt->cpumask = cpumask_of(cpu);
@@ -426,6 +428,7 @@ void xen_teardown_timer(int cpu)
426 BUG_ON(cpu == 0); 428 BUG_ON(cpu == 0);
427 evt = &per_cpu(xen_clock_events, cpu); 429 evt = &per_cpu(xen_clock_events, cpu);
428 unbind_from_irqhandler(evt->irq, NULL); 430 unbind_from_irqhandler(evt->irq, NULL);
431 evt->irq = -1;
429} 432}
430 433
431void xen_setup_cpu_clockevents(void) 434void xen_setup_cpu_clockevents(void)
@@ -497,7 +500,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
497{ 500{
498 int cpu = smp_processor_id(); 501 int cpu = smp_processor_id();
499 xen_setup_runstate_info(cpu); 502 xen_setup_runstate_info(cpu);
500 xen_setup_timer(cpu); 503 /*
504 * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
505 * doing it xen_hvm_cpu_notify (which gets called by smp_init during
506 * early bootup and also during CPU hotplug events).
507 */
501 xen_setup_cpu_clockevents(); 508 xen_setup_cpu_clockevents();
502} 509}
503 510
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index c38834de9ac7..cb4c2ce8d447 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -4,14 +4,6 @@
4#define __ARCH_WANT_SYS_CLONE 4#define __ARCH_WANT_SYS_CLONE
5#include <uapi/asm/unistd.h> 5#include <uapi/asm/unistd.h>
6 6
7/*
8 * "Conditional" syscalls
9 *
10 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
11 * but it doesn't work on all toolchains, so we just do it by hand
12 */
13#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
14
15#define __ARCH_WANT_STAT64 7#define __ARCH_WANT_STAT64
16#define __ARCH_WANT_SYS_UTIME 8#define __ARCH_WANT_SYS_UTIME
17#define __ARCH_WANT_SYS_LLSEEK 9#define __ARCH_WANT_SYS_LLSEEK
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 5cd82e9f601c..1c85323f01d7 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -105,19 +105,9 @@ void coprocessor_flush_all(struct thread_info *ti)
105/* 105/*
106 * Powermanagement idle function, if any is provided by the platform. 106 * Powermanagement idle function, if any is provided by the platform.
107 */ 107 */
108 108void arch_cpu_idle(void)
109void cpu_idle(void)
110{ 109{
111 local_irq_enable(); 110 platform_idle();
112
113 /* endless idle loop with no priority at all */
114 while (1) {
115 rcu_idle_enter();
116 while (!need_resched())
117 platform_idle();
118 rcu_idle_exit();
119 schedule_preempt_disabled();
120 }
121} 111}
122 112
123/* 113/*
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 923db5c15278..458186dab5dc 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -383,6 +383,8 @@ void show_regs(struct pt_regs * regs)
383{ 383{
384 int i, wmask; 384 int i, wmask;
385 385
386 show_regs_print_info(KERN_DEFAULT);
387
386 wmask = regs->wmask & ~1; 388 wmask = regs->wmask & ~1;
387 389
388 for (i = 0; i < 16; i++) { 390 for (i = 0; i < 16; i++) {
@@ -481,14 +483,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
481 show_trace(task, stack); 483 show_trace(task, stack);
482} 484}
483 485
484void dump_stack(void)
485{
486 show_stack(current, NULL);
487}
488
489EXPORT_SYMBOL(dump_stack);
490
491
492void show_code(unsigned int *pc) 486void show_code(unsigned int *pc)
493{ 487{
494 long i; 488 long i;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 7a5156ffebb6..bba125b4bb06 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -208,32 +208,17 @@ void __init mem_init(void)
208 highmemsize >> 10); 208 highmemsize >> 10);
209} 209}
210 210
211void
212free_reserved_mem(void *start, void *end)
213{
214 for (; start < end; start += PAGE_SIZE) {
215 ClearPageReserved(virt_to_page(start));
216 init_page_count(virt_to_page(start));
217 free_page((unsigned long)start);
218 totalram_pages++;
219 }
220}
221
222#ifdef CONFIG_BLK_DEV_INITRD 211#ifdef CONFIG_BLK_DEV_INITRD
223extern int initrd_is_mapped; 212extern int initrd_is_mapped;
224 213
225void free_initrd_mem(unsigned long start, unsigned long end) 214void free_initrd_mem(unsigned long start, unsigned long end)
226{ 215{
227 if (initrd_is_mapped) { 216 if (initrd_is_mapped)
228 free_reserved_mem((void*)start, (void*)end); 217 free_reserved_area(start, end, 0, "initrd");
229 printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
230 }
231} 218}
232#endif 219#endif
233 220
234void free_initmem(void) 221void free_initmem(void)
235{ 222{
236 free_reserved_mem(__init_begin, __init_end); 223 free_initmem_default(0);
237 printk("Freeing unused kernel memory: %zuk freed\n",
238 (__init_end - __init_begin) >> 10);
239} 224}