aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/Makefile2
-rw-r--r--arch/alpha/include/asm/floppy.h2
-rw-r--r--arch/alpha/include/asm/thread_info.h2
-rw-r--r--arch/alpha/kernel/irq.c7
-rw-r--r--arch/alpha/kernel/irq_alpha.c10
-rw-r--r--arch/alpha/kernel/process.c19
-rw-r--r--arch/alpha/kernel/smp.c3
-rw-r--r--arch/alpha/kernel/sys_nautilus.c10
-rw-r--r--arch/alpha/kernel/sys_titan.c14
-rw-r--r--arch/alpha/mm/init.c24
-rw-r--r--arch/alpha/mm/numa.c3
-rw-r--r--arch/arc/include/asm/dma-mapping.h2
-rw-r--r--arch/arc/include/asm/elf.h3
-rw-r--r--arch/arc/include/asm/entry.h2
-rw-r--r--arch/arc/include/asm/irqflags.h12
-rw-r--r--arch/arc/include/asm/kgdb.h6
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/syscalls.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arc/kernel/disasm.c2
-rw-r--r--arch/arc/kernel/entry.S27
-rw-r--r--arch/arc/kernel/kgdb.c1
-rw-r--r--arch/arc/kernel/process.c27
-rw-r--r--arch/arc/kernel/setup.c4
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arc/kernel/sys.c2
-rw-r--r--arch/arc/mm/init.c23
-rw-r--r--arch/arc/plat-arcfpga/Kconfig2
-rw-r--r--arch/arm/Kconfig39
-rw-r--r--arch/arm/Kconfig.debug3
-rw-r--r--arch/arm/boot/Makefile2
-rw-r--r--arch/arm/boot/dts/armada-370-mirabox.dts2
-rw-r--r--arch/arm/boot/dts/armada-370-rd.dts8
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi5
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi6
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9263ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi10
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi68
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi10
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi2
-rw-r--r--arch/arm/boot/dts/dbx5x0.dtsi7
-rw-r--r--arch/arm/boot/dts/dove.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi9
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi6
-rw-r--r--arch/arm/boot/dts/href.dtsi2
-rw-r--r--arch/arm/boot/dts/hrefv60plus.dts2
-rw-r--r--arch/arm/boot/dts/imx28-m28evk.dts1
-rw-r--r--arch/arm/boot/dts/imx28-sps1.dts1
-rw-r--r--arch/arm/boot/dts/imx53-mba53.dts3
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi1
-rw-r--r--arch/arm/boot/dts/kirkwood-dns320.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-dns325.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-dockstar.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-dreamplug.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-goflexnet.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-ib62x0.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-iconnect.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts15
-rw-r--r--arch/arm/boot/dts/kirkwood-km_kirkwood.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-lschlv2.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-lsxhl.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-mplcec4.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-ns2-common.dtsi1
-rw-r--r--arch/arm/boot/dts/kirkwood-nsa310.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a6.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-topkick.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi5
-rw-r--r--arch/arm/boot/dts/msm8660-surf.dts6
-rw-r--r--arch/arm/boot/dts/msm8960-cdp.dts6
-rw-r--r--arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts2
-rw-r--r--arch/arm/boot/dts/orion5x.dtsi9
-rw-r--r--arch/arm/boot/dts/snowball.dts2
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi3
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi4
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi4
-rw-r--r--arch/arm/boot/dts/spear310.dtsi4
-rw-r--r--arch/arm/boot/dts/spear320.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi3
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi3
-rw-r--r--arch/arm/boot/dts/vt8500-bv07.dts34
-rw-r--r--arch/arm/boot/dts/vt8500.dtsi4
-rw-r--r--arch/arm/boot/dts/wm8505-ref.dts34
-rw-r--r--arch/arm/boot/dts/wm8505.dtsi4
-rw-r--r--arch/arm/boot/dts/wm8650-mid.dts36
-rw-r--r--arch/arm/boot/dts/wm8650.dtsi4
-rw-r--r--arch/arm/boot/dts/wm8850-w70v2.dts40
-rw-r--r--arch/arm/boot/dts/wm8850.dtsi4
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/lpc32xx_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/include/asm/delay.h2
-rw-r--r--arch/arm/include/asm/glue-cache.h8
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h2
-rw-r--r--arch/arm/include/asm/highmem.h7
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/pgtable.h9
-rw-r--r--arch/arm/include/asm/system_misc.h3
-rw-r--r--arch/arm/include/asm/tlbflush.h26
-rw-r--r--arch/arm/include/asm/xen/events.h25
-rw-r--r--arch/arm/kernel/early_printk.c17
-rw-r--r--arch/arm/kernel/entry-common.S12
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm/kernel/perf_event.c5
-rw-r--r--arch/arm/kernel/process.c100
-rw-r--r--arch/arm/kernel/sched_clock.c4
-rw-r--r--arch/arm/kernel/setup.c27
-rw-r--r--arch/arm/kernel/smp.c7
-rw-r--r--arch/arm/kernel/smp_tlb.c66
-rw-r--r--arch/arm/kernel/tcm.c1
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/kvm/vgic.c35
-rw-r--r--arch/arm/lib/delay.c8
-rw-r--r--arch/arm/lib/memset.S33
-rw-r--r--arch/arm/mach-at91/at91sam9260.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c2
-rw-r--r--arch/arm/mach-at91/at91sam9n12.c2
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c2
-rw-r--r--arch/arm/mach-at91/board-foxg20.c1
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c1
-rw-r--r--arch/arm/mach-at91/include/mach/gpio.h8
-rw-r--r--arch/arm/mach-at91/irq.c20
-rw-r--r--arch/arm/mach-at91/pm.c10
-rw-r--r--arch/arm/mach-bcm/Kconfig1
-rw-r--r--arch/arm/mach-bcm/board_bcm.c7
-rw-r--r--arch/arm/mach-cns3xxx/core.c16
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/cns3xxx.h16
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c71
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c166
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c8
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c2
-rw-r--r--arch/arm/mach-davinci/davinci.h11
-rw-r--r--arch/arm/mach-davinci/dm355.c174
-rw-r--r--arch/arm/mach-davinci/dm365.c195
-rw-r--r--arch/arm/mach-davinci/dm644x.c11
-rw-r--r--arch/arm/mach-davinci/dma.c3
-rw-r--r--arch/arm/mach-davinci/pm_domain.c2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/uncompress.h10
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c27
-rw-r--r--arch/arm/mach-exynos/setup-usb-phy.c8
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-gemini/idle.c4
-rw-r--r--arch/arm/mach-gemini/irq.c4
-rw-r--r--arch/arm/mach-highbank/hotplug.c10
-rw-r--r--arch/arm/mach-imx/clk-busy.c2
-rw-r--r--arch/arm/mach-imx/clk-imx35.c3
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c5
-rw-r--r--arch/arm/mach-imx/common.h2
-rw-r--r--arch/arm/mach-imx/headsmp.S18
-rw-r--r--arch/arm/mach-imx/hotplug.c12
-rw-r--r--arch/arm/mach-imx/imx25-dt.c5
-rw-r--r--arch/arm/mach-imx/pm-imx6q.c15
-rw-r--r--arch/arm/mach-imx/src.c12
-rw-r--r--arch/arm/mach-ixp4xx/common.c3
-rw-r--r--arch/arm/mach-ixp4xx/vulcan-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c25
-rw-r--r--arch/arm/mach-kirkwood/board-iomega_ix2_200.c7
-rw-r--r--arch/arm/mach-kirkwood/guruplug-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/openrd-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/rd88f6281-setup.c1
-rw-r--r--arch/arm/mach-mmp/aspenite.c6
-rw-r--r--arch/arm/mach-mmp/gplugd.c1
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c6
-rw-r--r--arch/arm/mach-msm/timer.c5
-rw-r--r--arch/arm/mach-mvebu/irq-armada-370-xp.c26
-rw-r--r--arch/arm/mach-mxs/icoll.c2
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c34
-rw-r--r--arch/arm/mach-mxs/mm.c1
-rw-r--r--arch/arm/mach-mxs/ocotp.c1
-rw-r--r--arch/arm/mach-omap1/clock_data.c12
-rw-r--r--arch/arm/mach-omap1/common.h2
-rw-r--r--arch/arm/mach-omap1/pm.c6
-rw-r--r--arch/arm/mach-omap2/Kconfig6
-rw-r--r--arch/arm/mach-omap2/board-generic.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/cclock44xx_data.c20
-rw-r--r--arch/arm/mach-omap2/common.h4
-rw-r--r--arch/arm/mach-omap2/gpmc.c6
-rw-r--r--arch/arm/mach-omap2/io.c18
-rw-r--r--arch/arm/mach-omap2/mux.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c14
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c7
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c12
-rw-r--r--arch/arm/mach-omap2/pm.c5
-rw-r--r--arch/arm/mach-omap2/pm44xx.c4
-rw-r--r--arch/arm/mach-omap2/timer.c4
-rw-r--r--arch/arm/mach-orion5x/board-dt.c3
-rw-r--r--arch/arm/mach-orion5x/common.c2
-rw-r--r--arch/arm/mach-pxa/raumfeld.c1
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c5
-rw-r--r--arch/arm/mach-s3c24xx/common.c5
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/irqs.h4
-rw-r--r--arch/arm/mach-s3c24xx/irq.c2
-rw-r--r--arch/arm/mach-s3c64xx/setup-usb-phy.c4
-rw-r--r--arch/arm/mach-s5pv210/clock.c36
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c2
-rw-r--r--arch/arm/mach-s5pv210/setup-usb-phy.c4
-rw-r--r--arch/arm/mach-shark/core.c3
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c8
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c8
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c12
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c1
-rw-r--r--arch/arm/mach-shmobile/suspend.c6
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c2
-rw-r--r--arch/arm/mach-tegra/Kconfig8
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c774
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.h7
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c1
-rw-r--r--arch/arm/mach-ux500/board-mop500.c17
-rw-r--r--arch/arm/mach-ux500/board-mop500.h1
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c5
-rw-r--r--arch/arm/mach-vexpress/v2m.c8
-rw-r--r--arch/arm/mach-w90x900/dev.c3
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c1
-rw-r--r--arch/arm/mm/cache-l2x0.c11
-rw-r--r--arch/arm/mm/cache-v3.S137
-rw-r--r--arch/arm/mm/cache-v4.S2
-rw-r--r--arch/arm/mm/context.c3
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/init.c50
-rw-r--r--arch/arm/mm/mmu.c75
-rw-r--r--arch/arm/mm/proc-arm740.S30
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-syms.c2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S19
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/mm/tcm.h (renamed from arch/arm/kernel/tcm.h)0
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm/plat-orion/addr-map.c7
-rw-r--r--arch/arm/plat-samsung/devs.c10
-rw-r--r--arch/arm/plat-samsung/include/plat/fb.h50
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h282
-rw-r--r--arch/arm/plat-samsung/include/plat/usb-phy.h5
-rw-r--r--arch/arm/plat-spear/Kconfig2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/Kconfig.debug11
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/ucontext.h2
-rw-r--r--arch/arm64/kernel/arm64ksyms.c2
-rw-r--r--arch/arm64/kernel/process.c43
-rw-r--r--arch/arm64/kernel/signal32.c1
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/mm/init.c26
-rw-r--r--arch/arm64/mm/mmu.c15
-rw-r--r--arch/avr32/Kconfig2
-rw-r--r--arch/avr32/include/asm/io.h4
-rw-r--r--arch/avr32/kernel/process.c13
-rw-r--r--arch/avr32/kernel/time.c9
-rw-r--r--arch/avr32/mach-at32ap/include/mach/pm.h24
-rw-r--r--arch/avr32/mach-at32ap/pm-at32ap700x.S7
-rw-r--r--arch/avr32/mm/init.c24
-rw-r--r--arch/blackfin/Kconfig2
-rw-r--r--arch/blackfin/include/asm/bfin_sport3.h2
-rw-r--r--arch/blackfin/kernel/early_printk.c2
-rw-r--r--arch/blackfin/kernel/process.c32
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c8
-rw-r--r--arch/blackfin/mach-common/smp.c2
-rw-r--r--arch/blackfin/mm/init.c22
-rw-r--r--arch/c6x/include/asm/irqflags.h2
-rw-r--r--arch/c6x/kernel/process.c28
-rw-r--r--arch/c6x/mm/init.c30
-rw-r--r--arch/cris/Kconfig2
-rw-r--r--arch/cris/arch-v10/kernel/process.c3
-rw-r--r--arch/cris/arch-v32/kernel/process.c12
-rw-r--r--arch/cris/arch-v32/kernel/smp.c4
-rw-r--r--arch/cris/include/asm/processor.h7
-rw-r--r--arch/cris/kernel/process.c49
-rw-r--r--arch/cris/mm/init.c16
-rw-r--r--arch/frv/Kconfig2
-rw-r--r--arch/frv/kernel/process.c27
-rw-r--r--arch/frv/mm/init.c38
-rw-r--r--arch/h8300/Kconfig2
-rw-r--r--arch/h8300/kernel/process.c35
-rw-r--r--arch/h8300/mm/init.c30
-rw-r--r--arch/hexagon/kernel/process.c23
-rw-r--r--arch/hexagon/kernel/smp.c2
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/hp/sim/simserial.c16
-rw-r--r--arch/ia64/include/asm/futex.h5
-rw-r--r--arch/ia64/include/asm/hugetlb.h1
-rw-r--r--arch/ia64/include/asm/irqflags.h1
-rw-r--r--arch/ia64/include/asm/mca.h1
-rw-r--r--arch/ia64/include/asm/numa.h5
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/ia64/kernel/fsys.S49
-rw-r--r--arch/ia64/kernel/iosapic.c34
-rw-r--r--arch/ia64/kernel/irq.c8
-rw-r--r--arch/ia64/kernel/mca.c37
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/palinfo.c77
-rw-r--r--arch/ia64/kernel/perfmon.c14
-rw-r--r--arch/ia64/kernel/process.c83
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kvm/vtlb.c2
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c9
-rw-r--r--arch/ia64/mm/init.c23
-rw-r--r--arch/ia64/mm/ioremap.c14
-rw-r--r--arch/ia64/mm/numa.c20
-rw-r--r--arch/ia64/pci/pci.c11
-rw-r--r--arch/ia64/sn/kernel/tiocx.c5
-rw-r--r--arch/m32r/Kconfig2
-rw-r--r--arch/m32r/include/uapi/asm/stat.h4
-rw-r--r--arch/m32r/kernel/process.c18
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/m32r/kernel/traps.c6
-rw-r--r--arch/m32r/mm/init.c26
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/Kconfig.bus10
-rw-r--r--arch/m68k/Kconfig.devices24
-rw-r--r--arch/m68k/Kconfig.machine1
-rw-r--r--arch/m68k/atari/ataints.c152
-rw-r--r--arch/m68k/atari/config.c239
-rw-r--r--arch/m68k/include/asm/MC68328.h10
-rw-r--r--arch/m68k/include/asm/atarihw.h6
-rw-r--r--arch/m68k/include/asm/atariints.h11
-rw-r--r--arch/m68k/include/asm/cmpxchg.h3
-rw-r--r--arch/m68k/include/asm/delay.h23
-rw-r--r--arch/m68k/include/asm/gpio.h20
-rw-r--r--arch/m68k/include/asm/io_mm.h136
-rw-r--r--arch/m68k/include/asm/irq.h6
-rw-r--r--arch/m68k/include/asm/raw_io.h109
-rw-r--r--arch/m68k/include/asm/string.h14
-rw-r--r--arch/m68k/kernel/process.c32
-rw-r--r--arch/m68k/kernel/setup_mm.c6
-rw-r--r--arch/m68k/kernel/setup_no.c3
-rw-r--r--arch/m68k/lib/string.c2
-rw-r--r--arch/m68k/mm/init.c26
-rw-r--r--arch/m68k/platform/coldfire/m528x.c2
-rw-r--r--arch/metag/include/asm/thread_info.h2
-rw-r--r--arch/metag/kernel/process.c35
-rw-r--r--arch/metag/kernel/smp.c2
-rw-r--r--arch/metag/mm/init.c31
-rw-r--r--arch/microblaze/Kconfig3
-rw-r--r--arch/microblaze/include/asm/processor.h5
-rw-r--r--arch/microblaze/include/asm/setup.h1
-rw-r--r--arch/microblaze/include/asm/thread_info.h1
-rw-r--r--arch/microblaze/kernel/early_printk.c26
-rw-r--r--arch/microblaze/kernel/process.c65
-rw-r--r--arch/microblaze/mm/init.c34
-rw-r--r--arch/mips/Kconfig12
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c5
-rw-r--r--arch/mips/bcm63xx/dev-spi.c11
-rw-r--r--arch/mips/bcm63xx/nvram.c7
-rw-r--r--arch/mips/bcm63xx/setup.c2
-rw-r--r--arch/mips/cavium-octeon/setup.c5
-rw-r--r--arch/mips/include/asm/hugetlb.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h4
-rw-r--r--arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h209
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/asm/signal.h2
-rw-r--r--arch/mips/include/uapi/asm/signal.h8
-rw-r--r--arch/mips/kernel/Makefile25
-rw-r--r--arch/mips/kernel/cpu-probe.c13
-rw-r--r--arch/mips/kernel/early_printk.c12
-rw-r--r--arch/mips/kernel/linux32.c2
-rw-r--r--arch/mips/kernel/mcount.S11
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/process.c48
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/lib/bitops.c16
-rw-r--r--arch/mips/lib/csum_partial.S4
-rw-r--r--arch/mips/mm/c-r4k.c6
-rw-r--r--arch/mips/mm/init.c37
-rw-r--r--arch/mips/mm/sc-mips.c6
-rw-r--r--arch/mips/pci/pci-alchemy.c4
-rw-r--r--arch/mips/pci/pci.c8
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c4
-rw-r--r--arch/mn10300/Kconfig2
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/kernel/process.c70
-rw-r--r--arch/mn10300/kernel/smp.c7
-rw-r--r--arch/mn10300/mm/init.c23
-rw-r--r--arch/openrisc/Kconfig3
-rw-r--r--arch/openrisc/include/asm/thread_info.h2
-rw-r--r--arch/openrisc/kernel/Makefile2
-rw-r--r--arch/openrisc/kernel/idle.c73
-rw-r--r--arch/openrisc/mm/init.c27
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/Makefile6
-rw-r--r--arch/parisc/include/asm/cacheflush.h5
-rw-r--r--arch/parisc/include/asm/pgtable.h47
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/asm/uaccess.h14
-rw-r--r--arch/parisc/kernel/cache.c5
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c2
-rw-r--r--arch/parisc/kernel/process.c22
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/parisc/lib/Makefile3
-rw-r--r--arch/parisc/lib/ucmpdi2.c25
-rw-r--r--arch/parisc/mm/init.c25
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/hugetlb.h1
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h128
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/uprobes.h1
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/crash_dump.c5
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S178
-rw-r--r--arch/powerpc/kernel/fadump.c5
-rw-r--r--arch/powerpc/kernel/idle.c89
-rw-r--r--arch/powerpc/kernel/kvm.c7
-rw-r--r--arch/powerpc/kernel/nvram_64.c3
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c14
-rw-r--r--arch/powerpc/kernel/ptrace.c1
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/tm.S2
-rw-r--r--arch/powerpc/kernel/udbg.c6
-rw-r--r--arch/powerpc/kernel/uprobes.c29
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/book3s_pr.c2
-rw-r--r--arch/powerpc/kvm/e500.h24
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c84
-rw-r--r--arch/powerpc/kvm/e500mc.c7
-rw-r--r--arch/powerpc/mm/hash_utils_64.c22
-rw-r--r--arch/powerpc/mm/init_64.c11
-rw-r--r--arch/powerpc/mm/mem.c35
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c11
-rw-r--r--arch/powerpc/mm/numa.c10
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/slb_low.S50
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
-rw-r--r--arch/powerpc/perf/power7-pmu.c13
-rw-r--r--arch/powerpc/platforms/44x/Kconfig2
-rw-r--r--arch/powerpc/platforms/512x/Kconfig2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c5
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c6
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c12
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c8
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c4
-rw-r--r--arch/s390/include/asm/bitops.h117
-rw-r--r--arch/s390/include/asm/ccwdev.h3
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/compat.h57
-rw-r--r--arch/s390/include/asm/cpu_mf.h1
-rw-r--r--arch/s390/include/asm/eadm.h6
-rw-r--r--arch/s390/include/asm/elf.h23
-rw-r--r--arch/s390/include/asm/hugetlb.h56
-rw-r--r--arch/s390/include/asm/io.h4
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/pci_debug.h9
-rw-r--r--arch/s390/include/asm/pci_insn.h203
-rw-r--r--arch/s390/include/asm/pci_io.h16
-rw-r--r--arch/s390/include/asm/pgtable.h105
-rw-r--r--arch/s390/include/asm/processor.h3
-rw-r--r--arch/s390/include/asm/ptrace.h6
-rw-r--r--arch/s390/include/asm/syscall.h1
-rw-r--r--arch/s390/include/asm/thread_info.h6
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h20
-rw-r--r--arch/s390/include/uapi/asm/statfs.h63
-rw-r--r--arch/s390/kernel/Makefile17
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/dumpstack.c236
-rw-r--r--arch/s390/kernel/entry.S42
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/entry64.S48
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/machine_kexec.c30
-rw-r--r--arch/s390/kernel/process.c32
-rw-r--r--arch/s390/kernel/setup.c11
-rw-r--r--arch/s390/kernel/smp.c18
-rw-r--r--arch/s390/kernel/suspend.c31
-rw-r--r--arch/s390/kernel/swsusp_asm64.S29
-rw-r--r--arch/s390/kernel/traps.c250
-rw-r--r--arch/s390/kernel/vtime.c5
-rw-r--r--arch/s390/kvm/trace.h4
-rw-r--r--arch/s390/lib/uaccess_pt.c83
-rw-r--r--arch/s390/mm/cmm.c8
-rw-r--r--arch/s390/mm/fault.c9
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/init.c45
-rw-r--r--arch/s390/mm/pageattr.c24
-rw-r--r--arch/s390/mm/pgtable.c235
-rw-r--r--arch/s390/mm/vmem.c15
-rw-r--r--arch/s390/net/bpf_jit_comp.c3
-rw-r--r--arch/s390/oprofile/init.c1
-rw-r--r--arch/s390/pci/Makefile4
-rw-r--r--arch/s390/pci/pci.c153
-rw-r--r--arch/s390/pci/pci_clp.c13
-rw-r--r--arch/s390/pci/pci_debug.c7
-rw-r--r--arch/s390/pci/pci_dma.c9
-rw-r--r--arch/s390/pci/pci_insn.c202
-rw-r--r--arch/s390/pci/pci_msi.c10
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/score/kernel/process.c18
-rw-r--r--arch/score/mm/init.c33
-rw-r--r--arch/sh/Kconfig4
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c4
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.c2
-rw-r--r--arch/sh/include/asm/hugetlb.h1
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sh/kernel/idle.c101
-rw-r--r--arch/sh/kernel/sh_bios.c4
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sh/mm/init.c26
-rw-r--r--arch/sparc/Kconfig10
-rw-r--r--arch/sparc/include/asm/Kbuild5
-rw-r--r--arch/sparc/include/asm/cputime.h6
-rw-r--r--arch/sparc/include/asm/emergency-restart.h6
-rw-r--r--arch/sparc/include/asm/hugetlb.h1
-rw-r--r--arch/sparc/include/asm/mutex.h9
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/sparc/include/asm/serial.h6
-rw-r--r--arch/sparc/include/asm/smp_32.h5
-rw-r--r--arch/sparc/include/asm/spitfire.h1
-rw-r--r--arch/sparc/include/asm/switch_to_64.h3
-rw-r--r--arch/sparc/include/asm/thread_info_32.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/tlbflush_64.h37
-rw-r--r--arch/sparc/include/uapi/asm/Kbuild1
-rw-r--r--arch/sparc/include/uapi/asm/types.h17
-rw-r--r--arch/sparc/kernel/cpu.c6
-rw-r--r--arch/sparc/kernel/head_64.S25
-rw-r--r--arch/sparc/kernel/hvtramp.S3
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c41
-rw-r--r--arch/sparc/kernel/process_32.c21
-rw-r--r--arch/sparc/kernel/process_64.c49
-rw-r--r--arch/sparc/kernel/smp_32.c2
-rw-r--r--arch/sparc/kernel/smp_64.c43
-rw-r--r--arch/sparc/kernel/trampoline_64.S3
-rw-r--r--arch/sparc/lib/bitext.c6
-rw-r--r--arch/sparc/mm/init_32.c12
-rw-r--r--arch/sparc/mm/init_64.c7
-rw-r--r--arch/sparc/mm/iommu.c2
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/sparc/mm/tlb.c39
-rw-r--r--arch/sparc/mm/tsb.c57
-rw-r--r--arch/sparc/mm/ultra.S119
-rw-r--r--arch/tile/Kconfig2
-rw-r--r--arch/tile/configs/tilegx_defconfig1
-rw-r--r--arch/tile/configs/tilepro_defconfig1
-rw-r--r--arch/tile/include/asm/hugetlb.h1
-rw-r--r--arch/tile/include/asm/irqflags.h10
-rw-r--r--arch/tile/include/asm/thread_info.h2
-rw-r--r--arch/tile/kernel/early_printk.c27
-rw-r--r--arch/tile/kernel/process.c65
-rw-r--r--arch/tile/kernel/setup.c25
-rw-r--r--arch/tile/kernel/smpboot.c4
-rw-r--r--arch/tile/mm/pgtable.c7
-rw-r--r--arch/um/drivers/chan.h2
-rw-r--r--arch/um/drivers/chan_kern.c10
-rw-r--r--arch/um/drivers/chan_user.c12
-rw-r--r--arch/um/drivers/chan_user.h6
-rw-r--r--arch/um/drivers/line.c50
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/um/drivers/ssl.c1
-rw-r--r--arch/um/drivers/stdio_console.c1
-rw-r--r--arch/um/kernel/early_printk.c8
-rw-r--r--arch/um/kernel/mem.c26
-rw-r--r--arch/um/kernel/process.c27
-rw-r--r--arch/um/os-Linux/signal.c2
-rw-r--r--arch/um/os-Linux/start_up.c2
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/unicore32/kernel/early_printk.c12
-rw-r--r--arch/unicore32/kernel/process.c21
-rw-r--r--arch/unicore32/mm/init.c31
-rw-r--r--arch/unicore32/mm/ioremap.c17
-rw-r--r--arch/x86/Kconfig96
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/boot/compressed/Makefile5
-rw-r--r--arch/x86/boot/compressed/eboot.c47
-rw-r--r--arch/x86/boot/compressed/head_64.S2
-rw-r--r--arch/x86/include/asm/cmpxchg.h2
-rw-r--r--arch/x86/include/asm/context_tracking.h21
-rw-r--r--arch/x86/include/asm/cpufeature.h22
-rw-r--r--arch/x86/include/asm/efi.h7
-rw-r--r--arch/x86/include/asm/fixmap.h7
-rw-r--r--arch/x86/include/asm/hugetlb.h1
-rw-r--r--arch/x86/include/asm/hypervisor.h16
-rw-r--r--arch/x86/include/asm/kprobes.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/mce.h4
-rw-r--r--arch/x86/include/asm/msr.h14
-rw-r--r--arch/x86/include/asm/page_64_types.h1
-rw-r--r--arch/x86/include/asm/paravirt.h9
-rw-r--r--arch/x86/include/asm/paravirt_types.h4
-rw-r--r--arch/x86/include/asm/perf_event_p4.h62
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/processor.h25
-rw-r--r--arch/x86/include/asm/suspend_32.h1
-rw-r--r--arch/x86/include/asm/suspend_64.h3
-rw-r--r--arch/x86/include/asm/syscall.h4
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/tlb.h2
-rw-r--r--arch/x86/include/asm/uprobes.h1
-rw-r--r--arch/x86/include/asm/xen/hypercall.h4
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h1
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h6
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S5
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/amd_nb.c5
-rw-r--r--arch/x86/kernel/aperture_64.c2
-rw-r--r--arch/x86/kernel/cpu/Makefile9
-rw-r--r--arch/x86/kernel/cpu/amd.c48
-rw-r--r--arch/x86/kernel/cpu/bugs.c34
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/cyrix.c5
-rw-r--r--arch/x86/kernel/cpu/intel.c34
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c39
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c25
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.pl48
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh41
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event.c89
-rw-r--r--arch/x86/kernel/cpu/perf_event.h56
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c138
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c547
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c62
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c195
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c876
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h64
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c6
-rw-r--r--arch/x86/kernel/doublefault_32.c2
-rw-r--r--arch/x86/kernel/early_printk.c21
-rw-r--r--arch/x86/kernel/head64.c6
-rw-r--r--arch/x86/kernel/kprobes/core.c11
-rw-r--r--arch/x86/kernel/kvm.c8
-rw-r--r--arch/x86/kernel/microcode_core_early.c38
-rw-r--r--arch/x86/kernel/microcode_intel_early.c30
-rw-r--r--arch/x86/kernel/paravirt.c26
-rw-r--r--arch/x86/kernel/process.c107
-rw-r--r--arch/x86/kernel/quirks.c18
-rw-r--r--arch/x86/kernel/rtc.c69
-rw-r--r--arch/x86/kernel/setup.c48
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/traps.c77
-rw-r--r--arch/x86/kernel/tsc.c6
-rw-r--r--arch/x86/kernel/uprobes.c29
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c69
-rw-r--r--arch/x86/lguest/Kconfig3
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--arch/x86/lib/checksum_32.S2
-rw-r--r--arch/x86/lib/memcpy_32.c6
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/lib/memmove_64.S6
-rw-r--r--arch/x86/lib/usercopy_64.c4
-rw-r--r--arch/x86/mm/amdtopology.c3
-rw-r--r--arch/x86/mm/fault.c16
-rw-r--r--arch/x86/mm/highmem_32.c1
-rw-r--r--arch/x86/mm/init.c5
-rw-r--r--arch/x86/mm/init_32.c10
-rw-r--r--arch/x86/mm/init_64.c75
-rw-r--r--arch/x86/mm/ioremap.c7
-rw-r--r--arch/x86/mm/numa.c9
-rw-r--r--arch/x86/mm/pageattr-test.c7
-rw-r--r--arch/x86/mm/pageattr.c21
-rw-r--r--arch/x86/mm/pgtable.c7
-rw-r--r--arch/x86/pci/common.c11
-rw-r--r--arch/x86/pci/xen.c6
-rw-r--r--arch/x86/platform/efi/efi.c192
-rw-r--r--arch/x86/platform/mrst/mrst.c3
-rw-r--r--arch/x86/platform/mrst/vrtc.c44
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c4
-rw-r--r--arch/x86/platform/uv/uv_time.c3
-rw-r--r--arch/x86/power/cpu.c15
-rw-r--r--arch/x86/tools/Makefile1
-rw-r--r--arch/x86/tools/relocs.c783
-rw-r--r--arch/x86/tools/relocs.h36
-rw-r--r--arch/x86/tools/relocs_32.c17
-rw-r--r--arch/x86/tools/relocs_64.c17
-rw-r--r--arch/x86/tools/relocs_common.c76
-rw-r--r--arch/x86/xen/Kconfig2
-rw-r--r--arch/x86/xen/enlighten.c58
-rw-r--r--arch/x86/xen/mmu.c20
-rw-r--r--arch/x86/xen/smp.c23
-rw-r--r--arch/x86/xen/spinlock.c25
-rw-r--r--arch/x86/xen/time.c13
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--arch/xtensa/kernel/process.c14
-rw-r--r--arch/xtensa/mm/init.c21
715 files changed, 9645 insertions, 6523 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 5a1779c93940..1455579791ec 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -319,13 +319,6 @@ config ARCH_WANT_OLD_COMPAT_IPC
319 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 319 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
320 bool 320 bool
321 321
322config HAVE_VIRT_TO_BUS
323 bool
324 help
325 An architecture should select this if it implements the
326 deprecated interface virt_to_bus(). All new architectures
327 should probably not select this.
328
329config HAVE_ARCH_SECCOMP_FILTER 322config HAVE_ARCH_SECCOMP_FILTER
330 bool 323 bool
331 help 324 help
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 5833aa441481..8a33ba01301f 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,7 +9,7 @@ config ALPHA
9 select HAVE_PERF_EVENTS 9 select HAVE_PERF_EVENTS
10 select HAVE_DMA_ATTRS 10 select HAVE_DMA_ATTRS
11 select HAVE_GENERIC_HARDIRQS 11 select HAVE_GENERIC_HARDIRQS
12 select HAVE_VIRT_TO_BUS 12 select VIRT_TO_BUS
13 select GENERIC_IRQ_PROBE 13 select GENERIC_IRQ_PROBE
14 select AUTO_IRQ_AFFINITY if SMP 14 select AUTO_IRQ_AFFINITY if SMP
15 select GENERIC_IRQ_SHOW 15 select GENERIC_IRQ_SHOW
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
index 4759fe751aa1..2cc3cc519c54 100644
--- a/arch/alpha/Makefile
+++ b/arch/alpha/Makefile
@@ -12,7 +12,7 @@ NM := $(NM) -B
12 12
13LDFLAGS_vmlinux := -static -N #-relax 13LDFLAGS_vmlinux := -static -N #-relax
14CHECKFLAGS += -D__alpha__ -m64 14CHECKFLAGS += -D__alpha__ -m64
15cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data 15cflags-y := -pipe -mno-fp-regs -ffixed-8
16cflags-y += $(call cc-option, -fno-jump-tables) 16cflags-y += $(call cc-option, -fno-jump-tables)
17 17
18cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 18cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h
index 46cefbd50e73..bae97eb19d26 100644
--- a/arch/alpha/include/asm/floppy.h
+++ b/arch/alpha/include/asm/floppy.h
@@ -26,7 +26,7 @@
26#define fd_disable_irq() disable_irq(FLOPPY_IRQ) 26#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
27#define fd_cacheflush(addr,size) /* nothing */ 27#define fd_cacheflush(addr,size) /* nothing */
28#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ 28#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
29 IRQF_DISABLED, "floppy", NULL) 29 0, "floppy", NULL)
30#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) 30#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
31 31
32#ifdef CONFIG_PCI 32#ifdef CONFIG_PCI
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 1f8c72959fb6..52cd2a4a3ff4 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -95,8 +95,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
95#define TS_POLLING 0x0010 /* idle task polling need_resched, 95#define TS_POLLING 0x0010 /* idle task polling need_resched,
96 skip sending interrupt */ 96 skip sending interrupt */
97 97
98#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
99
100#ifndef __ASSEMBLY__ 98#ifndef __ASSEMBLY__
101#define HAVE_SET_RESTORE_SIGMASK 1 99#define HAVE_SET_RESTORE_SIGMASK 1
102static inline void set_restore_sigmask(void) 100static inline void set_restore_sigmask(void)
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 2872accd2215..7b2be251c30f 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -117,13 +117,6 @@ handle_irq(int irq)
117 return; 117 return;
118 } 118 }
119 119
120 /*
121 * From here we must proceed with IPL_MAX. Note that we do not
122 * explicitly enable interrupts afterwards - some MILO PALcode
123 * (namely LX164 one) seems to have severe problems with RTI
124 * at IPL 0.
125 */
126 local_irq_disable();
127 irq_enter(); 120 irq_enter();
128 generic_handle_irq_desc(irq, desc); 121 generic_handle_irq_desc(irq, desc);
129 irq_exit(); 122 irq_exit();
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 772ddfdb71a8..f433fc11877a 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector,
45 unsigned long la_ptr, struct pt_regs *regs) 45 unsigned long la_ptr, struct pt_regs *regs)
46{ 46{
47 struct pt_regs *old_regs; 47 struct pt_regs *old_regs;
48
49 /*
50 * Disable interrupts during IRQ handling.
51 * Note that there is no matching local_irq_enable() due to
52 * severe problems with RTI at IPL0 and some MILO PALcode
53 * (namely LX164).
54 */
55 local_irq_disable();
48 switch (type) { 56 switch (type) {
49 case 0: 57 case 0:
50#ifdef CONFIG_SMP 58#ifdef CONFIG_SMP
@@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector,
62 { 70 {
63 long cpu; 71 long cpu;
64 72
65 local_irq_disable();
66 smp_percpu_timer_interrupt(regs); 73 smp_percpu_timer_interrupt(regs);
67 cpu = smp_processor_id(); 74 cpu = smp_processor_id();
68 if (cpu != boot_cpuid) { 75 if (cpu != boot_cpuid) {
@@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
222 229
223struct irqaction timer_irqaction = { 230struct irqaction timer_irqaction = {
224 .handler = timer_interrupt, 231 .handler = timer_interrupt,
225 .flags = IRQF_DISABLED,
226 .name = "timer", 232 .name = "timer",
227}; 233};
228 234
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 63d27fb9b023..a3fd8a29ccac 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -46,25 +46,6 @@
46void (*pm_power_off)(void) = machine_power_off; 46void (*pm_power_off)(void) = machine_power_off;
47EXPORT_SYMBOL(pm_power_off); 47EXPORT_SYMBOL(pm_power_off);
48 48
49void
50cpu_idle(void)
51{
52 current_thread_info()->status |= TS_POLLING;
53
54 while (1) {
55 /* FIXME -- EV6 and LCA45 know how to power down
56 the CPU. */
57
58 rcu_idle_enter();
59 while (!need_resched())
60 cpu_relax();
61
62 rcu_idle_exit();
63 schedule_preempt_disabled();
64 }
65}
66
67
68struct halt_info { 49struct halt_info {
69 int mode; 50 int mode;
70 char *restart_cmd; 51 char *restart_cmd;
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 9603bc234b47..7b60834fb4b2 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -167,8 +167,7 @@ smp_callin(void)
167 cpuid, current, current->active_mm)); 167 cpuid, current, current->active_mm));
168 168
169 preempt_disable(); 169 preempt_disable();
170 /* Do nothing. */ 170 cpu_startup_entry(CPUHP_ONLINE);
171 cpu_idle();
172} 171}
173 172
174/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ 173/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 4d4c046f708d..1d4aabfcf9a1 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -185,9 +185,12 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
185 mb(); 185 mb();
186} 186}
187 187
188extern void free_reserved_mem(void *, void *);
189extern void pcibios_claim_one_bus(struct pci_bus *); 188extern void pcibios_claim_one_bus(struct pci_bus *);
190 189
190static struct resource irongate_io = {
191 .name = "Irongate PCI IO",
192 .flags = IORESOURCE_IO,
193};
191static struct resource irongate_mem = { 194static struct resource irongate_mem = {
192 .name = "Irongate PCI MEM", 195 .name = "Irongate PCI MEM",
193 .flags = IORESOURCE_MEM, 196 .flags = IORESOURCE_MEM,
@@ -209,6 +212,7 @@ nautilus_init_pci(void)
209 212
210 irongate = pci_get_bus_and_slot(0, 0); 213 irongate = pci_get_bus_and_slot(0, 0);
211 bus->self = irongate; 214 bus->self = irongate;
215 bus->resource[0] = &irongate_io;
212 bus->resource[1] = &irongate_mem; 216 bus->resource[1] = &irongate_mem;
213 217
214 pci_bus_size_bridges(bus); 218 pci_bus_size_bridges(bus);
@@ -234,8 +238,8 @@ nautilus_init_pci(void)
234 if (pci_mem < memtop) 238 if (pci_mem < memtop)
235 memtop = pci_mem; 239 memtop = pci_mem;
236 if (memtop > alpha_mv.min_mem_address) { 240 if (memtop > alpha_mv.min_mem_address) {
237 free_reserved_mem(__va(alpha_mv.min_mem_address), 241 free_reserved_area((unsigned long)__va(alpha_mv.min_mem_address),
238 __va(memtop)); 242 (unsigned long)__va(memtop), 0, NULL);
239 printk("nautilus_init_pci: %ldk freed\n", 243 printk("nautilus_init_pci: %ldk freed\n",
240 (memtop - alpha_mv.min_mem_address) >> 10); 244 (memtop - alpha_mv.min_mem_address) >> 10);
241 } 245 }
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 5cf4a481b8c5..a53cf03f49d5 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -280,15 +280,15 @@ titan_late_init(void)
280 * all reported to the kernel as machine checks, so the handler 280 * all reported to the kernel as machine checks, so the handler
281 * is a nop so it can be called to count the individual events. 281 * is a nop so it can be called to count the individual events.
282 */ 282 */
283 titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, 283 titan_request_irq(63+16, titan_intr_nop, 0,
284 "CChip Error", NULL); 284 "CChip Error", NULL);
285 titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, 285 titan_request_irq(62+16, titan_intr_nop, 0,
286 "PChip 0 H_Error", NULL); 286 "PChip 0 H_Error", NULL);
287 titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, 287 titan_request_irq(61+16, titan_intr_nop, 0,
288 "PChip 1 H_Error", NULL); 288 "PChip 1 H_Error", NULL);
289 titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, 289 titan_request_irq(60+16, titan_intr_nop, 0,
290 "PChip 0 C_Error", NULL); 290 "PChip 0 C_Error", NULL);
291 titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, 291 titan_request_irq(59+16, titan_intr_nop, 0,
292 "PChip 1 C_Error", NULL); 292 "PChip 1 C_Error", NULL);
293 293
294 /* 294 /*
@@ -348,9 +348,9 @@ privateer_init_pci(void)
348 * Hook a couple of extra err interrupts that the 348 * Hook a couple of extra err interrupts that the
349 * common titan code won't. 349 * common titan code won't.
350 */ 350 */
351 titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, 351 titan_request_irq(53+16, titan_intr_nop, 0,
352 "NMI", NULL); 352 "NMI", NULL);
353 titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, 353 titan_request_irq(50+16, titan_intr_nop, 0,
354 "Temperature Warning", NULL); 354 "Temperature Warning", NULL);
355 355
356 /* 356 /*
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 1ad6ca74bed2..0ba85ee4a466 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -31,6 +31,7 @@
31#include <asm/console.h> 31#include <asm/console.h>
32#include <asm/tlb.h> 32#include <asm/tlb.h>
33#include <asm/setup.h> 33#include <asm/setup.h>
34#include <asm/sections.h>
34 35
35extern void die_if_kernel(char *,struct pt_regs *,long); 36extern void die_if_kernel(char *,struct pt_regs *,long);
36 37
@@ -281,8 +282,6 @@ printk_memory_info(void)
281{ 282{
282 unsigned long codesize, reservedpages, datasize, initsize, tmp; 283 unsigned long codesize, reservedpages, datasize, initsize, tmp;
283 extern int page_is_ram(unsigned long) __init; 284 extern int page_is_ram(unsigned long) __init;
284 extern char _text, _etext, _data, _edata;
285 extern char __init_begin, __init_end;
286 285
287 /* printk all informations */ 286 /* printk all informations */
288 reservedpages = 0; 287 reservedpages = 0;
@@ -318,32 +317,15 @@ mem_init(void)
318#endif /* CONFIG_DISCONTIGMEM */ 317#endif /* CONFIG_DISCONTIGMEM */
319 318
320void 319void
321free_reserved_mem(void *start, void *end)
322{
323 void *__start = start;
324 for (; __start < end; __start += PAGE_SIZE) {
325 ClearPageReserved(virt_to_page(__start));
326 init_page_count(virt_to_page(__start));
327 free_page((long)__start);
328 totalram_pages++;
329 }
330}
331
332void
333free_initmem(void) 320free_initmem(void)
334{ 321{
335 extern char __init_begin, __init_end; 322 free_initmem_default(0);
336
337 free_reserved_mem(&__init_begin, &__init_end);
338 printk ("Freeing unused kernel memory: %ldk freed\n",
339 (&__init_end - &__init_begin) >> 10);
340} 323}
341 324
342#ifdef CONFIG_BLK_DEV_INITRD 325#ifdef CONFIG_BLK_DEV_INITRD
343void 326void
344free_initrd_mem(unsigned long start, unsigned long end) 327free_initrd_mem(unsigned long start, unsigned long end)
345{ 328{
346 free_reserved_mem((void *)start, (void *)end); 329 free_reserved_area(start, end, 0, "initrd");
347 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
348} 330}
349#endif 331#endif
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 3973ae395772..33885048fa36 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/hwrpb.h> 18#include <asm/hwrpb.h>
19#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
20#include <asm/sections.h>
20 21
21pg_data_t node_data[MAX_NUMNODES]; 22pg_data_t node_data[MAX_NUMNODES];
22EXPORT_SYMBOL(node_data); 23EXPORT_SYMBOL(node_data);
@@ -325,8 +326,6 @@ void __init mem_init(void)
325{ 326{
326 unsigned long codesize, reservedpages, datasize, initsize, pfn; 327 unsigned long codesize, reservedpages, datasize, initsize, pfn;
327 extern int page_is_ram(unsigned long) __init; 328 extern int page_is_ram(unsigned long) __init;
328 extern char _text, _etext, _data, _edata;
329 extern char __init_begin, __init_end;
330 unsigned long nid, i; 329 unsigned long nid, i;
331 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 330 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
332 331
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 31f77aec0823..45b8e0cea176 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
126 int i; 126 int i;
127 127
128 for_each_sg(sg, s, nents, i) 128 for_each_sg(sg, s, nents, i)
129 sg->dma_address = dma_map_page(dev, sg_page(s), s->offset, 129 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
130 s->length, dir); 130 s->length, dir);
131 131
132 return nents; 132 return nents;
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index f4c8d36ebecb..a26282857683 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
72 */ 72 */
73#define ELF_PLATFORM (NULL) 73#define ELF_PLATFORM (NULL)
74 74
75#define SET_PERSONALITY(ex) \
76 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
77
78#endif 75#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 23daa326fc9b..eb2ae53187d9 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -415,7 +415,7 @@
415 *-------------------------------------------------------------*/ 415 *-------------------------------------------------------------*/
416.macro SAVE_ALL_EXCEPTION marker 416.macro SAVE_ALL_EXCEPTION marker
417 417
418 st \marker, [sp, 8] 418 st \marker, [sp, 8] /* orig_r8 */
419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */ 419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */
420 420
421 /* Restore r9 used to code the early prologue */ 421 /* Restore r9 used to code the early prologue */
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index ccd84806b62f..eac071668201 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void)
39 " flag.nz %0 \n" 39 " flag.nz %0 \n"
40 : "=r"(temp), "=r"(flags) 40 : "=r"(temp), "=r"(flags)
41 : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) 41 : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
42 : "cc"); 42 : "memory", "cc");
43 43
44 return flags; 44 return flags;
45} 45}
@@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
53 __asm__ __volatile__( 53 __asm__ __volatile__(
54 " flag %0 \n" 54 " flag %0 \n"
55 : 55 :
56 : "r"(flags)); 56 : "r"(flags)
57 : "memory");
57} 58}
58 59
59/* 60/*
@@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void)
73 " and %0, %0, %1 \n" 74 " and %0, %0, %1 \n"
74 " flag %0 \n" 75 " flag %0 \n"
75 : "=&r"(temp) 76 : "=&r"(temp)
76 : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); 77 : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
78 : "memory");
77} 79}
78 80
79/* 81/*
@@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void)
85 87
86 __asm__ __volatile__( 88 __asm__ __volatile__(
87 " lr %0, [status32] \n" 89 " lr %0, [status32] \n"
88 : "=&r"(temp)); 90 : "=&r"(temp)
91 :
92 : "memory");
89 93
90 return temp; 94 return temp;
91} 95}
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
index f3c4934f0ca9..4930957ca3d3 100644
--- a/arch/arc/include/asm/kgdb.h
+++ b/arch/arc/include/asm/kgdb.h
@@ -13,7 +13,7 @@
13 13
14#ifdef CONFIG_KGDB 14#ifdef CONFIG_KGDB
15 15
16#include <asm/user.h> 16#include <asm/ptrace.h>
17 17
18/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set 18/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
19 * register API yet */ 19 * register API yet */
@@ -53,9 +53,7 @@ enum arc700_linux_regnums {
53}; 53};
54 54
55#else 55#else
56static inline void kgdb_trap(struct pt_regs *regs, int param) 56#define kgdb_trap(regs, param)
57{
58}
59#endif 57#endif
60 58
61#endif /* __ARC_KGDB_H__ */ 59#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 8ae783d20a81..6179de7e07c2 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs)
123#define orig_r8_IS_SCALL 0x0001 123#define orig_r8_IS_SCALL 0x0001
124#define orig_r8_IS_SCALL_RESTARTED 0x0002 124#define orig_r8_IS_SCALL_RESTARTED 0x0002
125#define orig_r8_IS_BRKPT 0x0004 125#define orig_r8_IS_BRKPT 0x0004
126#define orig_r8_IS_EXCPN 0x0004 126#define orig_r8_IS_EXCPN 0x0008
127#define orig_r8_IS_IRQ1 0x0010 127#define orig_r8_IS_IRQ1 0x0010
128#define orig_r8_IS_IRQ2 0x0020 128#define orig_r8_IS_IRQ2 0x0020
129 129
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e53a5340ba4f..dd785befe7fd 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -16,8 +16,6 @@
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18int sys_clone_wrapper(int, int, int, int, int); 18int sys_clone_wrapper(int, int, int, int, int);
19int sys_fork_wrapper(void);
20int sys_vfork_wrapper(void);
21int sys_cacheflush(uint32_t, uint32_t uint32_t); 19int sys_cacheflush(uint32_t, uint32_t uint32_t);
22int sys_arc_settls(void *); 20int sys_arc_settls(void *);
23int sys_arc_gettls(void); 21int sys_arc_gettls(void);
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 6afa4f702075..30333cec0fef 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -28,14 +28,14 @@
28*/ 28*/
29struct user_regs_struct { 29struct user_regs_struct {
30 30
31 struct scratch { 31 struct {
32 long pad; 32 long pad;
33 long bta, lp_start, lp_end, lp_count; 33 long bta, lp_start, lp_end, lp_count;
34 long status32, ret, blink, fp, gp; 34 long status32, ret, blink, fp, gp;
35 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 35 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
36 long sp; 36 long sp;
37 } scratch; 37 } scratch;
38 struct callee { 38 struct {
39 long pad; 39 long pad;
40 long r25, r24, r23, r22, r21, r20; 40 long r25, r24, r23, r22, r21, r20;
41 long r19, r18, r17, r16, r15, r14, r13; 41 long r19, r18, r17, r16, r15, r14, r13;
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
index 2f390289a792..d14764ae2c60 100644
--- a/arch/arc/kernel/disasm.c
+++ b/arch/arc/kernel/disasm.c
@@ -535,4 +535,4 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
535 return instr.is_branch; 535 return instr.is_branch;
536} 536}
537 537
538#endif /* CONFIG_KGDB || CONFIG_MISALIGN_ACCESS || CONFIG_KPROBES */ 538#endif /* CONFIG_KGDB || CONFIG_ARC_MISALIGN_ACCESS || CONFIG_KPROBES */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index ef6800ba2f03..91eeab81f52d 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -452,7 +452,7 @@ tracesys:
452 ; using ERET won't work since next-PC has already committed 452 ; using ERET won't work since next-PC has already committed
453 lr r12, [efa] 453 lr r12, [efa]
454 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 454 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
455 st r12, [r11, THREAD_FAULT_ADDR] 455 st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
456 456
457 ; PRE Sys Call Ptrace hook 457 ; PRE Sys Call Ptrace hook
458 mov r0, sp ; pt_regs needed 458 mov r0, sp ; pt_regs needed
@@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork
792 792
793;################### Special Sys Call Wrappers ########################## 793;################### Special Sys Call Wrappers ##########################
794 794
795; TBD: call do_fork directly from here
796ARC_ENTRY sys_fork_wrapper
797 SAVE_CALLEE_SAVED_USER
798 bl @sys_fork
799 DISCARD_CALLEE_SAVED_USER
800
801 GET_CURR_THR_INFO_FLAGS r10
802 btst r10, TIF_SYSCALL_TRACE
803 bnz tracesys_exit
804
805 b ret_from_system_call
806ARC_EXIT sys_fork_wrapper
807
808ARC_ENTRY sys_vfork_wrapper
809 SAVE_CALLEE_SAVED_USER
810 bl @sys_vfork
811 DISCARD_CALLEE_SAVED_USER
812
813 GET_CURR_THR_INFO_FLAGS r10
814 btst r10, TIF_SYSCALL_TRACE
815 bnz tracesys_exit
816
817 b ret_from_system_call
818ARC_EXIT sys_vfork_wrapper
819
820ARC_ENTRY sys_clone_wrapper 795ARC_ENTRY sys_clone_wrapper
821 SAVE_CALLEE_SAVED_USER 796 SAVE_CALLEE_SAVED_USER
822 bl @sys_clone 797 bl @sys_clone
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index 2888ba5be47e..52bdc83c1495 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/kgdb.h> 11#include <linux/kgdb.h>
12#include <linux/sched.h>
12#include <asm/disasm.h> 13#include <asm/disasm.h>
13#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
14 15
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 0a7531d99294..cad66851e0c4 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -41,37 +41,12 @@ SYSCALL_DEFINE0(arc_gettls)
41 return task_thread_info(current)->thr_ptr; 41 return task_thread_info(current)->thr_ptr;
42} 42}
43 43
44static inline void arch_idle(void) 44void arch_cpu_idle(void)
45{ 45{
46 /* sleep, but enable all interrupts before committing */ 46 /* sleep, but enable all interrupts before committing */
47 __asm__("sleep 0x3"); 47 __asm__("sleep 0x3");
48} 48}
49 49
50void cpu_idle(void)
51{
52 /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
53
54 /* endless idle loop with no priority at all */
55 while (1) {
56 tick_nohz_idle_enter();
57 rcu_idle_enter();
58
59doze:
60 local_irq_disable();
61 if (!need_resched()) {
62 arch_idle();
63 goto doze;
64 } else {
65 local_irq_enable();
66 }
67
68 rcu_idle_exit();
69 tick_nohz_idle_exit();
70
71 schedule_preempt_disabled();
72 }
73}
74
75asmlinkage void ret_from_fork(void); 50asmlinkage void ret_from_fork(void);
76 51
77/* Layout of Child kernel mode stack as setup at the end of this function is 52/* Layout of Child kernel mode stack as setup at the end of this function is
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index dc0f968dae0a..2d95ac07df7b 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
232 232
233 n += scnprintf(buf + n, len - n, "\n"); 233 n += scnprintf(buf + n, len - n, "\n");
234 234
235#ifdef _ASM_GENERIC_UNISTD_H
236 n += scnprintf(buf + n, len - n, 235 n += scnprintf(buf + n, len - n,
237 "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n"); 236 "OS ABI [v3]\t: no-legacy-syscalls\n");
238#endif
239 237
240 return buf; 238 return buf;
241} 239}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 3af3e06dcf02..5c7fd603d216 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -141,7 +141,7 @@ void __cpuinit start_kernel_secondary(void)
141 141
142 local_irq_enable(); 142 local_irq_enable();
143 preempt_disable(); 143 preempt_disable();
144 cpu_idle(); 144 cpu_startup_entry(CPUHP_ONLINE);
145} 145}
146 146
147/* 147/*
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
index f6bdd07583f3..9d6c1ca26af6 100644
--- a/arch/arc/kernel/sys.c
+++ b/arch/arc/kernel/sys.c
@@ -6,8 +6,6 @@
6#include <asm/syscalls.h> 6#include <asm/syscalls.h>
7 7
8#define sys_clone sys_clone_wrapper 8#define sys_clone sys_clone_wrapper
9#define sys_fork sys_fork_wrapper
10#define sys_vfork sys_vfork_wrapper
11 9
12#undef __SYSCALL 10#undef __SYSCALL
13#define __SYSCALL(nr, call) [nr] = (call), 11#define __SYSCALL(nr, call) [nr] = (call),
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index caf797de23fc..727d4794ea0f 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -144,37 +144,18 @@ void __init mem_init(void)
144 PAGES_TO_KB(reserved_pages)); 144 PAGES_TO_KB(reserved_pages));
145} 145}
146 146
147static void __init free_init_pages(const char *what, unsigned long begin,
148 unsigned long end)
149{
150 unsigned long addr;
151
152 pr_info("Freeing %s: %ldk [%lx] to [%lx]\n",
153 what, TO_KB(end - begin), begin, end);
154
155 /* need to check that the page we free is not a partial page */
156 for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
157 ClearPageReserved(virt_to_page(addr));
158 init_page_count(virt_to_page(addr));
159 free_page(addr);
160 totalram_pages++;
161 }
162}
163
164/* 147/*
165 * free_initmem: Free all the __init memory. 148 * free_initmem: Free all the __init memory.
166 */ 149 */
167void __init_refok free_initmem(void) 150void __init_refok free_initmem(void)
168{ 151{
169 free_init_pages("unused kernel memory", 152 free_initmem_default(0);
170 (unsigned long)__init_begin,
171 (unsigned long)__init_end);
172} 153}
173 154
174#ifdef CONFIG_BLK_DEV_INITRD 155#ifdef CONFIG_BLK_DEV_INITRD
175void __init free_initrd_mem(unsigned long start, unsigned long end) 156void __init free_initrd_mem(unsigned long start, unsigned long end)
176{ 157{
177 free_init_pages("initrd memory", start, end); 158 free_reserved_area(start, end, 0, "initrd");
178} 159}
179#endif 160#endif
180 161
diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig
index b41e786cdbc0..295cefeb25d3 100644
--- a/arch/arc/plat-arcfpga/Kconfig
+++ b/arch/arc/plat-arcfpga/Kconfig
@@ -53,7 +53,7 @@ menuconfig ARC_HAS_BVCI_LAT_UNIT
53 bool "BVCI Bus Latency Unit" 53 bool "BVCI Bus Latency Unit"
54 depends on ARC_BOARD_ML509 || ARC_BOARD_ANGEL4 54 depends on ARC_BOARD_ML509 || ARC_BOARD_ANGEL4
55 help 55 help
56 IP to add artifical latency to BVCI Bus Based FPGA builds. 56 IP to add artificial latency to BVCI Bus Based FPGA builds.
57 The default latency (even worst case) for FPGA is non-realistic 57 The default latency (even worst case) for FPGA is non-realistic
58 (~10 SDRAM, ~5 SSRAM). 58 (~10 SDRAM, ~5 SSRAM).
59 59
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5b714695b01b..a39e3214ea3d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -15,6 +15,7 @@ config ARM
15 select GENERIC_IRQ_SHOW 15 select GENERIC_IRQ_SHOW
16 select GENERIC_PCI_IOMAP 16 select GENERIC_PCI_IOMAP
17 select GENERIC_SMP_IDLE_THREAD 17 select GENERIC_SMP_IDLE_THREAD
18 select GENERIC_IDLE_POLL_SETUP
18 select GENERIC_STRNCPY_FROM_USER 19 select GENERIC_STRNCPY_FROM_USER
19 select GENERIC_STRNLEN_USER 20 select GENERIC_STRNLEN_USER
20 select HARDIRQS_SW_RESEND 21 select HARDIRQS_SW_RESEND
@@ -49,7 +50,6 @@ config ARM
49 select HAVE_REGS_AND_STACK_ACCESS_API 50 select HAVE_REGS_AND_STACK_ACCESS_API
50 select HAVE_SYSCALL_TRACEPOINTS 51 select HAVE_SYSCALL_TRACEPOINTS
51 select HAVE_UID16 52 select HAVE_UID16
52 select HAVE_VIRT_TO_BUS
53 select KTIME_SCALAR 53 select KTIME_SCALAR
54 select PERF_USE_VMALLOC 54 select PERF_USE_VMALLOC
55 select RTC_LIB 55 select RTC_LIB
@@ -550,13 +550,14 @@ config ARCH_IXP4XX
550 select GENERIC_CLOCKEVENTS 550 select GENERIC_CLOCKEVENTS
551 select MIGHT_HAVE_PCI 551 select MIGHT_HAVE_PCI
552 select NEED_MACH_IO_H 552 select NEED_MACH_IO_H
553 select USB_EHCI_BIG_ENDIAN_MMIO
554 select USB_EHCI_BIG_ENDIAN_DESC
553 help 555 help
554 Support for Intel's IXP4XX (XScale) family of processors. 556 Support for Intel's IXP4XX (XScale) family of processors.
555 557
556config ARCH_DOVE 558config ARCH_DOVE
557 bool "Marvell Dove" 559 bool "Marvell Dove"
558 select ARCH_REQUIRE_GPIOLIB 560 select ARCH_REQUIRE_GPIOLIB
559 select COMMON_CLK_DOVE
560 select CPU_V7 561 select CPU_V7
561 select GENERIC_CLOCKEVENTS 562 select GENERIC_CLOCKEVENTS
562 select MIGHT_HAVE_PCI 563 select MIGHT_HAVE_PCI
@@ -744,6 +745,7 @@ config ARCH_RPC
744 select NEED_MACH_IO_H 745 select NEED_MACH_IO_H
745 select NEED_MACH_MEMORY_H 746 select NEED_MACH_MEMORY_H
746 select NO_IOPORT 747 select NO_IOPORT
748 select VIRT_TO_BUS
747 help 749 help
748 On the Acorn Risc-PC, Linux can support the internal IDE disk and 750 On the Acorn Risc-PC, Linux can support the internal IDE disk and
749 CD-ROM interface, serial and parallel port, and the floppy drive. 751 CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -879,6 +881,7 @@ config ARCH_SHARK
879 select ISA_DMA 881 select ISA_DMA
880 select NEED_MACH_MEMORY_H 882 select NEED_MACH_MEMORY_H
881 select PCI 883 select PCI
884 select VIRT_TO_BUS
882 select ZONE_DMA 885 select ZONE_DMA
883 help 886 help
884 Support for the StrongARM based Digital DNARD machine, also known 887 Support for the StrongARM based Digital DNARD machine, also known
@@ -1006,12 +1009,12 @@ config ARCH_MULTI_V4_V5
1006 bool 1009 bool
1007 1010
1008config ARCH_MULTI_V6 1011config ARCH_MULTI_V6
1009 bool "ARMv6 based platforms (ARM11, Scorpion, ...)" 1012 bool "ARMv6 based platforms (ARM11)"
1010 select ARCH_MULTI_V6_V7 1013 select ARCH_MULTI_V6_V7
1011 select CPU_V6 1014 select CPU_V6
1012 1015
1013config ARCH_MULTI_V7 1016config ARCH_MULTI_V7
1014 bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)" 1017 bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)"
1015 default y 1018 default y
1016 select ARCH_MULTI_V6_V7 1019 select ARCH_MULTI_V6_V7
1017 select ARCH_VEXPRESS 1020 select ARCH_VEXPRESS
@@ -1183,9 +1186,9 @@ config ARM_NR_BANKS
1183 default 8 1186 default 8
1184 1187
1185config IWMMXT 1188config IWMMXT
1186 bool "Enable iWMMXt support" 1189 bool "Enable iWMMXt support" if !CPU_PJ4
1187 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 1190 depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
1188 default y if PXA27x || PXA3xx || ARCH_MMP 1191 default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
1189 help 1192 help
1190 Enable support for iWMMXt context switching at run time if 1193 Enable support for iWMMXt context switching at run time if
1191 running on a CPU that supports it. 1194 running on a CPU that supports it.
@@ -1439,6 +1442,16 @@ config ARM_ERRATA_775420
1439 to deadlock. This workaround puts DSB before executing ISB if 1442 to deadlock. This workaround puts DSB before executing ISB if
1440 an abort may occur on cache maintenance. 1443 an abort may occur on cache maintenance.
1441 1444
1445config ARM_ERRATA_798181
1446 bool "ARM errata: TLBI/DSB failure on Cortex-A15"
1447 depends on CPU_V7 && SMP
1448 help
1449 On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
1450 adequately shooting down all use of the old entries. This
1451 option enables the Linux kernel workaround for this erratum
1452 which sends an IPI to the CPUs that are running the same ASID
1453 as the one being invalidated.
1454
1442endmenu 1455endmenu
1443 1456
1444source "arch/arm/common/Kconfig" 1457source "arch/arm/common/Kconfig"
@@ -1462,10 +1475,6 @@ config ISA_DMA
1462 bool 1475 bool
1463 select ISA_DMA_API 1476 select ISA_DMA_API
1464 1477
1465config ARCH_NO_VIRT_TO_BUS
1466 def_bool y
1467 depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK
1468
1469# Select ISA DMA interface 1478# Select ISA DMA interface
1470config ISA_DMA_API 1479config ISA_DMA_API
1471 bool 1480 bool
@@ -1657,13 +1666,16 @@ config LOCAL_TIMERS
1657 accounting to be spread across the timer interval, preventing a 1666 accounting to be spread across the timer interval, preventing a
1658 "thundering herd" at every timer tick. 1667 "thundering herd" at every timer tick.
1659 1668
1669# The GPIO number here must be sorted by descending number. In case of
1670# a multiplatform kernel, we just want the highest value required by the
1671# selected platforms.
1660config ARCH_NR_GPIO 1672config ARCH_NR_GPIO
1661 int 1673 int
1662 default 1024 if ARCH_SHMOBILE || ARCH_TEGRA 1674 default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
1663 default 355 if ARCH_U8500
1664 default 264 if MACH_H4700
1665 default 512 if SOC_OMAP5 1675 default 512 if SOC_OMAP5
1676 default 355 if ARCH_U8500
1666 default 288 if ARCH_VT8500 || ARCH_SUNXI 1677 default 288 if ARCH_VT8500 || ARCH_SUNXI
1678 default 264 if MACH_H4700
1667 default 0 1679 default 0
1668 help 1680 help
1669 Maximum number of GPIOs in the system. 1681 Maximum number of GPIOs in the system.
@@ -1887,8 +1899,9 @@ config XEN_DOM0
1887 1899
1888config XEN 1900config XEN
1889 bool "Xen guest support on ARM (EXPERIMENTAL)" 1901 bool "Xen guest support on ARM (EXPERIMENTAL)"
1890 depends on ARM && OF 1902 depends on ARM && AEABI && OF
1891 depends on CPU_V7 && !CPU_V6 1903 depends on CPU_V7 && !CPU_V6
1904 depends on !GENERIC_ATOMIC64
1892 help 1905 help
1893 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. 1906 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
1894 1907
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index acddddac7ee4..9b31f4311ea2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -492,9 +492,10 @@ config DEBUG_IMX_UART_PORT
492 DEBUG_IMX31_UART || \ 492 DEBUG_IMX31_UART || \
493 DEBUG_IMX35_UART || \ 493 DEBUG_IMX35_UART || \
494 DEBUG_IMX51_UART || \ 494 DEBUG_IMX51_UART || \
495 DEBUG_IMX50_IMX53_UART || \ 495 DEBUG_IMX53_UART || \
496 DEBUG_IMX6Q_UART 496 DEBUG_IMX6Q_UART
497 default 1 497 default 1
498 depends on ARCH_MXC
498 help 499 help
499 Choose UART port on which kernel low-level debug messages 500 Choose UART port on which kernel low-level debug messages
500 should be output. 501 should be output.
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 71768b8a1ab9..84aa2caf07ed 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -115,4 +115,4 @@ i:
115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
116 $(obj)/Image System.map "$(INSTALL_PATH)" 116 $(obj)/Image System.map "$(INSTALL_PATH)"
117 117
118subdir- := bootp compressed 118subdir- := bootp compressed dts
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts
index dd0c57dd9f30..3234875824dc 100644
--- a/arch/arm/boot/dts/armada-370-mirabox.dts
+++ b/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -54,7 +54,7 @@
54 }; 54 };
55 55
56 mvsdio@d00d4000 { 56 mvsdio@d00d4000 {
57 pinctrl-0 = <&sdio_pins2>; 57 pinctrl-0 = <&sdio_pins3>;
58 pinctrl-names = "default"; 58 pinctrl-names = "default";
59 status = "okay"; 59 status = "okay";
60 /* 60 /*
diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts
index f8e4855bc9a5..070bba4f2585 100644
--- a/arch/arm/boot/dts/armada-370-rd.dts
+++ b/arch/arm/boot/dts/armada-370-rd.dts
@@ -64,5 +64,13 @@
64 status = "okay"; 64 status = "okay";
65 /* No CD or WP GPIOs */ 65 /* No CD or WP GPIOs */
66 }; 66 };
67
68 usb@d0050000 {
69 status = "okay";
70 };
71
72 usb@d0051000 {
73 status = "okay";
74 };
67 }; 75 };
68}; 76};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 6f1acc75e155..5b708208b607 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -31,7 +31,6 @@
31 mpic: interrupt-controller@d0020000 { 31 mpic: interrupt-controller@d0020000 {
32 compatible = "marvell,mpic"; 32 compatible = "marvell,mpic";
33 #interrupt-cells = <1>; 33 #interrupt-cells = <1>;
34 #address-cells = <1>;
35 #size-cells = <1>; 34 #size-cells = <1>;
36 interrupt-controller; 35 interrupt-controller;
37 }; 36 };
@@ -54,7 +53,7 @@
54 reg = <0xd0012000 0x100>; 53 reg = <0xd0012000 0x100>;
55 reg-shift = <2>; 54 reg-shift = <2>;
56 interrupts = <41>; 55 interrupts = <41>;
57 reg-io-width = <4>; 56 reg-io-width = <1>;
58 status = "disabled"; 57 status = "disabled";
59 }; 58 };
60 serial@d0012100 { 59 serial@d0012100 {
@@ -62,7 +61,7 @@
62 reg = <0xd0012100 0x100>; 61 reg = <0xd0012100 0x100>;
63 reg-shift = <2>; 62 reg-shift = <2>;
64 interrupts = <42>; 63 interrupts = <42>;
65 reg-io-width = <4>; 64 reg-io-width = <1>;
66 status = "disabled"; 65 status = "disabled";
67 }; 66 };
68 67
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index 8188d138020e..a195debb67d3 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -59,6 +59,12 @@
59 "mpp50", "mpp51", "mpp52"; 59 "mpp50", "mpp51", "mpp52";
60 marvell,function = "sd0"; 60 marvell,function = "sd0";
61 }; 61 };
62
63 sdio_pins3: sdio-pins3 {
64 marvell,pins = "mpp48", "mpp49", "mpp50",
65 "mpp51", "mpp52", "mpp53";
66 marvell,function = "sd0";
67 };
62 }; 68 };
63 69
64 gpio0: gpio@d0018100 { 70 gpio0: gpio@d0018100 {
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 1443949c165e..ca00d8326c87 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -46,7 +46,7 @@
46 reg = <0xd0012200 0x100>; 46 reg = <0xd0012200 0x100>;
47 reg-shift = <2>; 47 reg-shift = <2>;
48 interrupts = <43>; 48 interrupts = <43>;
49 reg-io-width = <4>; 49 reg-io-width = <1>;
50 status = "disabled"; 50 status = "disabled";
51 }; 51 };
52 serial@d0012300 { 52 serial@d0012300 {
@@ -54,7 +54,7 @@
54 reg = <0xd0012300 0x100>; 54 reg = <0xd0012300 0x100>;
55 reg-shift = <2>; 55 reg-shift = <2>;
56 interrupts = <44>; 56 interrupts = <44>;
57 reg-io-width = <4>; 57 reg-io-width = <1>;
58 status = "disabled"; 58 status = "disabled";
59 }; 59 };
60 60
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index cb7bcc51608d..39253b9aedd1 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -322,6 +322,24 @@
322 }; 322 };
323 }; 323 };
324 324
325 spi0 {
326 pinctrl_spi0: spi0-0 {
327 atmel,pins =
328 <0 0 0x1 0x0 /* PA0 periph A SPI0_MISO pin */
329 0 1 0x1 0x0 /* PA1 periph A SPI0_MOSI pin */
330 0 2 0x1 0x0>; /* PA2 periph A SPI0_SPCK pin */
331 };
332 };
333
334 spi1 {
335 pinctrl_spi1: spi1-0 {
336 atmel,pins =
337 <1 0 0x1 0x0 /* PB0 periph A SPI1_MISO pin */
338 1 1 0x1 0x0 /* PB1 periph A SPI1_MOSI pin */
339 1 2 0x1 0x0>; /* PB2 periph A SPI1_SPCK pin */
340 };
341 };
342
325 pioA: gpio@fffff400 { 343 pioA: gpio@fffff400 {
326 compatible = "atmel,at91rm9200-gpio"; 344 compatible = "atmel,at91rm9200-gpio";
327 reg = <0xfffff400 0x200>; 345 reg = <0xfffff400 0x200>;
@@ -471,6 +489,28 @@
471 status = "disabled"; 489 status = "disabled";
472 }; 490 };
473 491
492 spi0: spi@fffc8000 {
493 #address-cells = <1>;
494 #size-cells = <0>;
495 compatible = "atmel,at91rm9200-spi";
496 reg = <0xfffc8000 0x200>;
497 interrupts = <12 4 3>;
498 pinctrl-names = "default";
499 pinctrl-0 = <&pinctrl_spi0>;
500 status = "disabled";
501 };
502
503 spi1: spi@fffcc000 {
504 #address-cells = <1>;
505 #size-cells = <0>;
506 compatible = "atmel,at91rm9200-spi";
507 reg = <0xfffcc000 0x200>;
508 interrupts = <13 4 3>;
509 pinctrl-names = "default";
510 pinctrl-0 = <&pinctrl_spi1>;
511 status = "disabled";
512 };
513
474 adc0: adc@fffe0000 { 514 adc0: adc@fffe0000 {
475 compatible = "atmel,at91sam9260-adc"; 515 compatible = "atmel,at91sam9260-adc";
476 reg = <0xfffe0000 0x100>; 516 reg = <0xfffe0000 0x100>;
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 271d4de026e9..94b58ab2cc08 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -303,6 +303,24 @@
303 }; 303 };
304 }; 304 };
305 305
306 spi0 {
307 pinctrl_spi0: spi0-0 {
308 atmel,pins =
309 <0 0 0x2 0x0 /* PA0 periph B SPI0_MISO pin */
310 0 1 0x2 0x0 /* PA1 periph B SPI0_MOSI pin */
311 0 2 0x2 0x0>; /* PA2 periph B SPI0_SPCK pin */
312 };
313 };
314
315 spi1 {
316 pinctrl_spi1: spi1-0 {
317 atmel,pins =
318 <1 12 0x1 0x0 /* PB12 periph A SPI1_MISO pin */
319 1 13 0x1 0x0 /* PB13 periph A SPI1_MOSI pin */
320 1 14 0x1 0x0>; /* PB14 periph A SPI1_SPCK pin */
321 };
322 };
323
306 pioA: gpio@fffff200 { 324 pioA: gpio@fffff200 {
307 compatible = "atmel,at91rm9200-gpio"; 325 compatible = "atmel,at91rm9200-gpio";
308 reg = <0xfffff200 0x200>; 326 reg = <0xfffff200 0x200>;
@@ -462,6 +480,28 @@
462 reg = <0xfffffd40 0x10>; 480 reg = <0xfffffd40 0x10>;
463 status = "disabled"; 481 status = "disabled";
464 }; 482 };
483
484 spi0: spi@fffa4000 {
485 #address-cells = <1>;
486 #size-cells = <0>;
487 compatible = "atmel,at91rm9200-spi";
488 reg = <0xfffa4000 0x200>;
489 interrupts = <14 4 3>;
490 pinctrl-names = "default";
491 pinctrl-0 = <&pinctrl_spi0>;
492 status = "disabled";
493 };
494
495 spi1: spi@fffa8000 {
496 #address-cells = <1>;
497 #size-cells = <0>;
498 compatible = "atmel,at91rm9200-spi";
499 reg = <0xfffa8000 0x200>;
500 interrupts = <15 4 3>;
501 pinctrl-names = "default";
502 pinctrl-0 = <&pinctrl_spi1>;
503 status = "disabled";
504 };
465 }; 505 };
466 506
467 nand0: nand@40000000 { 507 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 1eb08728f527..a14e424b2e81 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -79,6 +79,16 @@
79 }; 79 };
80 }; 80 };
81 }; 81 };
82
83 spi0: spi@fffa4000 {
84 status = "okay";
85 cs-gpios = <&pioA 5 0>, <0>, <0>, <0>;
86 mtd_dataflash@0 {
87 compatible = "atmel,at45", "atmel,dataflash";
88 spi-max-frequency = <50000000>;
89 reg = <0>;
90 };
91 };
82 }; 92 };
83 93
84 nand0: nand@40000000 { 94 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index da15e83e7f17..23d1f468f27f 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -96,6 +96,16 @@
96 status = "okay"; 96 status = "okay";
97 pinctrl-0 = <&pinctrl_ssc0_tx>; 97 pinctrl-0 = <&pinctrl_ssc0_tx>;
98 }; 98 };
99
100 spi0: spi@fffc8000 {
101 status = "okay";
102 cs-gpios = <0>, <&pioC 11 0>, <0>, <0>;
103 mtd_dataflash@0 {
104 compatible = "atmel,at45", "atmel,dataflash";
105 spi-max-frequency = <50000000>;
106 reg = <1>;
107 };
108 };
99 }; 109 };
100 110
101 nand0: nand@40000000 { 111 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 6b1d4cab24c2..cfdf429578b5 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -322,6 +322,24 @@
322 }; 322 };
323 }; 323 };
324 324
325 spi0 {
326 pinctrl_spi0: spi0-0 {
327 atmel,pins =
328 <1 0 0x1 0x0 /* PB0 periph A SPI0_MISO pin */
329 1 1 0x1 0x0 /* PB1 periph A SPI0_MOSI pin */
330 1 2 0x1 0x0>; /* PB2 periph A SPI0_SPCK pin */
331 };
332 };
333
334 spi1 {
335 pinctrl_spi1: spi1-0 {
336 atmel,pins =
337 <1 14 0x1 0x0 /* PB14 periph A SPI1_MISO pin */
338 1 15 0x1 0x0 /* PB15 periph A SPI1_MOSI pin */
339 1 16 0x1 0x0>; /* PB16 periph A SPI1_SPCK pin */
340 };
341 };
342
325 pioA: gpio@fffff200 { 343 pioA: gpio@fffff200 {
326 compatible = "atmel,at91rm9200-gpio"; 344 compatible = "atmel,at91rm9200-gpio";
327 reg = <0xfffff200 0x200>; 345 reg = <0xfffff200 0x200>;
@@ -531,6 +549,28 @@
531 reg = <0xfffffd40 0x10>; 549 reg = <0xfffffd40 0x10>;
532 status = "disabled"; 550 status = "disabled";
533 }; 551 };
552
553 spi0: spi@fffa4000 {
554 #address-cells = <1>;
555 #size-cells = <0>;
556 compatible = "atmel,at91rm9200-spi";
557 reg = <0xfffa4000 0x200>;
558 interrupts = <14 4 3>;
559 pinctrl-names = "default";
560 pinctrl-0 = <&pinctrl_spi0>;
561 status = "disabled";
562 };
563
564 spi1: spi@fffa8000 {
565 #address-cells = <1>;
566 #size-cells = <0>;
567 compatible = "atmel,at91rm9200-spi";
568 reg = <0xfffa8000 0x200>;
569 interrupts = <15 4 3>;
570 pinctrl-names = "default";
571 pinctrl-0 = <&pinctrl_spi1>;
572 status = "disabled";
573 };
534 }; 574 };
535 575
536 nand0: nand@40000000 { 576 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 20c31913c270..92c52a7d70bc 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -102,6 +102,16 @@
102 }; 102 };
103 }; 103 };
104 }; 104 };
105
106 spi0: spi@fffa4000{
107 status = "okay";
108 cs-gpios = <&pioB 3 0>, <0>, <0>, <0>;
109 mtd_dataflash@0 {
110 compatible = "atmel,at45", "atmel,dataflash";
111 spi-max-frequency = <13000000>;
112 reg = <0>;
113 };
114 };
105 }; 115 };
106 116
107 nand0: nand@40000000 { 117 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 7750f98dd764..b2961f1ea51b 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -261,6 +261,24 @@
261 }; 261 };
262 }; 262 };
263 263
264 spi0 {
265 pinctrl_spi0: spi0-0 {
266 atmel,pins =
267 <0 11 0x1 0x0 /* PA11 periph A SPI0_MISO pin */
268 0 12 0x1 0x0 /* PA12 periph A SPI0_MOSI pin */
269 0 13 0x1 0x0>; /* PA13 periph A SPI0_SPCK pin */
270 };
271 };
272
273 spi1 {
274 pinctrl_spi1: spi1-0 {
275 atmel,pins =
276 <0 21 0x2 0x0 /* PA21 periph B SPI1_MISO pin */
277 0 22 0x2 0x0 /* PA22 periph B SPI1_MOSI pin */
278 0 23 0x2 0x0>; /* PA23 periph B SPI1_SPCK pin */
279 };
280 };
281
264 pioA: gpio@fffff400 { 282 pioA: gpio@fffff400 {
265 compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; 283 compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
266 reg = <0xfffff400 0x200>; 284 reg = <0xfffff400 0x200>;
@@ -373,6 +391,28 @@
373 #size-cells = <0>; 391 #size-cells = <0>;
374 status = "disabled"; 392 status = "disabled";
375 }; 393 };
394
395 spi0: spi@f0000000 {
396 #address-cells = <1>;
397 #size-cells = <0>;
398 compatible = "atmel,at91rm9200-spi";
399 reg = <0xf0000000 0x100>;
400 interrupts = <13 4 3>;
401 pinctrl-names = "default";
402 pinctrl-0 = <&pinctrl_spi0>;
403 status = "disabled";
404 };
405
406 spi1: spi@f0004000 {
407 #address-cells = <1>;
408 #size-cells = <0>;
409 compatible = "atmel,at91rm9200-spi";
410 reg = <0xf0004000 0x100>;
411 interrupts = <14 4 3>;
412 pinctrl-names = "default";
413 pinctrl-0 = <&pinctrl_spi1>;
414 status = "disabled";
415 };
376 }; 416 };
377 417
378 nand0: nand@40000000 { 418 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d400f8de4387..34c842b1efb2 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -67,6 +67,16 @@
67 }; 67 };
68 }; 68 };
69 }; 69 };
70
71 spi0: spi@f0000000 {
72 status = "okay";
73 cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
74 m25p80@0 {
75 compatible = "atmel,at25df321a";
76 spi-max-frequency = <50000000>;
77 reg = <0>;
78 };
79 };
70 }; 80 };
71 81
72 nand0: nand@40000000 { 82 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index aa98e641931f..347b438d47fa 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -238,8 +238,32 @@
238 nand { 238 nand {
239 pinctrl_nand: nand-0 { 239 pinctrl_nand: nand-0 {
240 atmel,pins = 240 atmel,pins =
241 <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */ 241 <3 0 0x1 0x0 /* PD0 periph A Read Enable */
242 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */ 242 3 1 0x1 0x0 /* PD1 periph A Write Enable */
243 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */
244 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */
245 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */
246 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */
247 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */
248 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */
249 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */
250 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */
251 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */
252 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */
253 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */
254 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */
255 };
256
257 pinctrl_nand_16bits: nand_16bits-0 {
258 atmel,pins =
259 <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */
260 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */
261 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */
262 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */
263 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */
264 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */
265 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */
266 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */
243 }; 267 };
244 }; 268 };
245 269
@@ -319,6 +343,24 @@
319 }; 343 };
320 }; 344 };
321 345
346 spi0 {
347 pinctrl_spi0: spi0-0 {
348 atmel,pins =
349 <0 11 0x1 0x0 /* PA11 periph A SPI0_MISO pin */
350 0 12 0x1 0x0 /* PA12 periph A SPI0_MOSI pin */
351 0 13 0x1 0x0>; /* PA13 periph A SPI0_SPCK pin */
352 };
353 };
354
355 spi1 {
356 pinctrl_spi1: spi1-0 {
357 atmel,pins =
358 <0 21 0x2 0x0 /* PA21 periph B SPI1_MISO pin */
359 0 22 0x2 0x0 /* PA22 periph B SPI1_MOSI pin */
360 0 23 0x2 0x0>; /* PA23 periph B SPI1_SPCK pin */
361 };
362 };
363
322 pioA: gpio@fffff400 { 364 pioA: gpio@fffff400 {
323 compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; 365 compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
324 reg = <0xfffff400 0x200>; 366 reg = <0xfffff400 0x200>;
@@ -505,6 +547,28 @@
505 trigger-value = <0x6>; 547 trigger-value = <0x6>;
506 }; 548 };
507 }; 549 };
550
551 spi0: spi@f0000000 {
552 #address-cells = <1>;
553 #size-cells = <0>;
554 compatible = "atmel,at91rm9200-spi";
555 reg = <0xf0000000 0x100>;
556 interrupts = <13 4 3>;
557 pinctrl-names = "default";
558 pinctrl-0 = <&pinctrl_spi0>;
559 status = "disabled";
560 };
561
562 spi1: spi@f0004000 {
563 #address-cells = <1>;
564 #size-cells = <0>;
565 compatible = "atmel,at91rm9200-spi";
566 reg = <0xf0004000 0x100>;
567 interrupts = <14 4 3>;
568 pinctrl-names = "default";
569 pinctrl-0 = <&pinctrl_spi1>;
570 status = "disabled";
571 };
508 }; 572 };
509 573
510 nand0: nand@40000000 { 574 nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index 8a7cf1d9cf5d..09f5e667ca7a 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -84,6 +84,16 @@
84 }; 84 };
85 }; 85 };
86 }; 86 };
87
88 spi0: spi@f0000000 {
89 status = "okay";
90 cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
91 m25p80@0 {
92 compatible = "atmel,at25df321a";
93 spi-max-frequency = <50000000>;
94 reg = <0>;
95 };
96 };
87 }; 97 };
88 98
89 usb0: ohci@00600000 { 99 usb0: ohci@00600000 {
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 4bf2a8774aa7..7e0481e2441a 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -105,7 +105,7 @@
105 compatible = "fixed-clock"; 105 compatible = "fixed-clock";
106 reg = <1>; 106 reg = <1>;
107 #clock-cells = <0>; 107 #clock-cells = <0>;
108 clock-frequency = <150000000>; 108 clock-frequency = <250000000>;
109 }; 109 };
110 }; 110 };
111}; 111};
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi
index 69140ba99f46..aaa63d0a8096 100644
--- a/arch/arm/boot/dts/dbx5x0.dtsi
+++ b/arch/arm/boot/dts/dbx5x0.dtsi
@@ -191,8 +191,8 @@
191 191
192 prcmu: prcmu@80157000 { 192 prcmu: prcmu@80157000 {
193 compatible = "stericsson,db8500-prcmu"; 193 compatible = "stericsson,db8500-prcmu";
194 reg = <0x80157000 0x1000>; 194 reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>;
195 reg-names = "prcmu"; 195 reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm";
196 interrupts = <0 47 0x4>; 196 interrupts = <0 47 0x4>;
197 #address-cells = <1>; 197 #address-cells = <1>;
198 #size-cells = <1>; 198 #size-cells = <1>;
@@ -319,9 +319,8 @@
319 }; 319 };
320 }; 320 };
321 321
322 ab8500@5 { 322 ab8500 {
323 compatible = "stericsson,ab8500"; 323 compatible = "stericsson,ab8500";
324 reg = <5>; /* mailbox 5 is i2c */
325 interrupt-parent = <&intc>; 324 interrupt-parent = <&intc>;
326 interrupts = <0 40 0x4>; 325 interrupts = <0 40 0x4>;
327 interrupt-controller; 326 interrupt-controller;
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 67dbe20868a2..f7509cafc377 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -197,6 +197,11 @@
197 status = "disabled"; 197 status = "disabled";
198 }; 198 };
199 199
200 rtc@d8500 {
201 compatible = "marvell,orion-rtc";
202 reg = <0xd8500 0x20>;
203 };
204
200 crypto: crypto@30000 { 205 crypto: crypto@30000 {
201 compatible = "marvell,orion-crypto"; 206 compatible = "marvell,orion-crypto";
202 reg = <0x30000 0x10000>, 207 reg = <0x30000 0x10000>,
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index e1347fceb5bc..1a62bcf18aa3 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -275,18 +275,27 @@
275 compatible = "arm,pl330", "arm,primecell"; 275 compatible = "arm,pl330", "arm,primecell";
276 reg = <0x12680000 0x1000>; 276 reg = <0x12680000 0x1000>;
277 interrupts = <0 35 0>; 277 interrupts = <0 35 0>;
278 #dma-cells = <1>;
279 #dma-channels = <8>;
280 #dma-requests = <32>;
278 }; 281 };
279 282
280 pdma1: pdma@12690000 { 283 pdma1: pdma@12690000 {
281 compatible = "arm,pl330", "arm,primecell"; 284 compatible = "arm,pl330", "arm,primecell";
282 reg = <0x12690000 0x1000>; 285 reg = <0x12690000 0x1000>;
283 interrupts = <0 36 0>; 286 interrupts = <0 36 0>;
287 #dma-cells = <1>;
288 #dma-channels = <8>;
289 #dma-requests = <32>;
284 }; 290 };
285 291
286 mdma1: mdma@12850000 { 292 mdma1: mdma@12850000 {
287 compatible = "arm,pl330", "arm,primecell"; 293 compatible = "arm,pl330", "arm,primecell";
288 reg = <0x12850000 0x1000>; 294 reg = <0x12850000 0x1000>;
289 interrupts = <0 34 0>; 295 interrupts = <0 34 0>;
296 #dma-cells = <1>;
297 #dma-channels = <8>;
298 #dma-requests = <1>;
290 }; 299 };
291 }; 300 };
292}; 301};
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 5f3562ad6746..9a99755920c0 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -142,12 +142,18 @@
142 compatible = "arm,pl330", "arm,primecell"; 142 compatible = "arm,pl330", "arm,primecell";
143 reg = <0x120000 0x1000>; 143 reg = <0x120000 0x1000>;
144 interrupts = <0 34 0>; 144 interrupts = <0 34 0>;
145 #dma-cells = <1>;
146 #dma-channels = <8>;
147 #dma-requests = <32>;
145 }; 148 };
146 149
147 pdma1: pdma@121B0000 { 150 pdma1: pdma@121B0000 {
148 compatible = "arm,pl330", "arm,primecell"; 151 compatible = "arm,pl330", "arm,primecell";
149 reg = <0x121000 0x1000>; 152 reg = <0x121000 0x1000>;
150 interrupts = <0 35 0>; 153 interrupts = <0 35 0>;
154 #dma-cells = <1>;
155 #dma-channels = <8>;
156 #dma-requests = <32>;
151 }; 157 };
152 }; 158 };
153 159
diff --git a/arch/arm/boot/dts/href.dtsi b/arch/arm/boot/dts/href.dtsi
index 592fb9dc35bd..379128eb9d98 100644
--- a/arch/arm/boot/dts/href.dtsi
+++ b/arch/arm/boot/dts/href.dtsi
@@ -221,7 +221,7 @@
221 }; 221 };
222 }; 222 };
223 223
224 ab8500@5 { 224 ab8500 {
225 ab8500-regulators { 225 ab8500-regulators {
226 ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { 226 ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
227 regulator-name = "V-DISPLAY"; 227 regulator-name = "V-DISPLAY";
diff --git a/arch/arm/boot/dts/hrefv60plus.dts b/arch/arm/boot/dts/hrefv60plus.dts
index 55f4191a626e..2b587a74b813 100644
--- a/arch/arm/boot/dts/hrefv60plus.dts
+++ b/arch/arm/boot/dts/hrefv60plus.dts
@@ -158,7 +158,7 @@
158 }; 158 };
159 }; 159 };
160 160
161 ab8500@5 { 161 ab8500 {
162 ab8500-regulators { 162 ab8500-regulators {
163 ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { 163 ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
164 regulator-name = "V-DISPLAY"; 164 regulator-name = "V-DISPLAY";
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 6ce3d17c3a29..fd36e1cca104 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -152,7 +152,6 @@
152 i2c0: i2c@80058000 { 152 i2c0: i2c@80058000 {
153 pinctrl-names = "default"; 153 pinctrl-names = "default";
154 pinctrl-0 = <&i2c0_pins_a>; 154 pinctrl-0 = <&i2c0_pins_a>;
155 clock-frequency = <400000>;
156 status = "okay"; 155 status = "okay";
157 156
158 sgtl5000: codec@0a { 157 sgtl5000: codec@0a {
diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts
index e6cde8aa7fff..6c6a5442800a 100644
--- a/arch/arm/boot/dts/imx28-sps1.dts
+++ b/arch/arm/boot/dts/imx28-sps1.dts
@@ -70,7 +70,6 @@
70 i2c0: i2c@80058000 { 70 i2c0: i2c@80058000 {
71 pinctrl-names = "default"; 71 pinctrl-names = "default";
72 pinctrl-0 = <&i2c0_pins_a>; 72 pinctrl-0 = <&i2c0_pins_a>;
73 clock-frequency = <400000>;
74 status = "okay"; 73 status = "okay";
75 74
76 rtc: rtc@51 { 75 rtc: rtc@51 {
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index e54fffd48369..468c0a1d48d9 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -42,10 +42,9 @@
42 fsl,pins = <689 0x10000 /* DISP1_DRDY */ 42 fsl,pins = <689 0x10000 /* DISP1_DRDY */
43 482 0x10000 /* DISP1_HSYNC */ 43 482 0x10000 /* DISP1_HSYNC */
44 489 0x10000 /* DISP1_VSYNC */ 44 489 0x10000 /* DISP1_VSYNC */
45 684 0x10000 /* DISP1_DAT_0 */
46 515 0x10000 /* DISP1_DAT_22 */ 45 515 0x10000 /* DISP1_DAT_22 */
47 523 0x10000 /* DISP1_DAT_23 */ 46 523 0x10000 /* DISP1_DAT_23 */
48 543 0x10000 /* DISP1_DAT_21 */ 47 545 0x10000 /* DISP1_DAT_21 */
49 553 0x10000 /* DISP1_DAT_20 */ 48 553 0x10000 /* DISP1_DAT_20 */
50 558 0x10000 /* DISP1_DAT_19 */ 49 558 0x10000 /* DISP1_DAT_19 */
51 564 0x10000 /* DISP1_DAT_18 */ 50 564 0x10000 /* DISP1_DAT_18 */
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 06ec460b4581..281a223591ff 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -91,6 +91,7 @@
91 compatible = "arm,cortex-a9-twd-timer"; 91 compatible = "arm,cortex-a9-twd-timer";
92 reg = <0x00a00600 0x20>; 92 reg = <0x00a00600 0x20>;
93 interrupts = <1 13 0xf01>; 93 interrupts = <1 13 0xf01>;
94 clocks = <&clks 15>;
94 }; 95 };
95 96
96 L2: l2-cache@00a02000 { 97 L2: l2-cache@00a02000 {
diff --git a/arch/arm/boot/dts/kirkwood-dns320.dts b/arch/arm/boot/dts/kirkwood-dns320.dts
index 5bb0bf39d3b8..c9c44b2f62d7 100644
--- a/arch/arm/boot/dts/kirkwood-dns320.dts
+++ b/arch/arm/boot/dts/kirkwood-dns320.dts
@@ -42,12 +42,10 @@
42 42
43 ocp@f1000000 { 43 ocp@f1000000 {
44 serial@12000 { 44 serial@12000 {
45 clock-frequency = <166666667>;
46 status = "okay"; 45 status = "okay";
47 }; 46 };
48 47
49 serial@12100 { 48 serial@12100 {
50 clock-frequency = <166666667>;
51 status = "okay"; 49 status = "okay";
52 }; 50 };
53 }; 51 };
diff --git a/arch/arm/boot/dts/kirkwood-dns325.dts b/arch/arm/boot/dts/kirkwood-dns325.dts
index d430713ea9b9..e4e4930dc5cf 100644
--- a/arch/arm/boot/dts/kirkwood-dns325.dts
+++ b/arch/arm/boot/dts/kirkwood-dns325.dts
@@ -50,7 +50,6 @@
50 }; 50 };
51 }; 51 };
52 serial@12000 { 52 serial@12000 {
53 clock-frequency = <200000000>;
54 status = "okay"; 53 status = "okay";
55 }; 54 };
56 }; 55 };
diff --git a/arch/arm/boot/dts/kirkwood-dockstar.dts b/arch/arm/boot/dts/kirkwood-dockstar.dts
index 2e3dd34e21a5..0196cf6b0ef2 100644
--- a/arch/arm/boot/dts/kirkwood-dockstar.dts
+++ b/arch/arm/boot/dts/kirkwood-dockstar.dts
@@ -37,7 +37,6 @@
37 }; 37 };
38 }; 38 };
39 serial@12000 { 39 serial@12000 {
40 clock-frequency = <200000000>;
41 status = "ok"; 40 status = "ok";
42 }; 41 };
43 42
diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts
index ef2d8c705709..289e51d86372 100644
--- a/arch/arm/boot/dts/kirkwood-dreamplug.dts
+++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts
@@ -38,7 +38,6 @@
38 }; 38 };
39 }; 39 };
40 serial@12000 { 40 serial@12000 {
41 clock-frequency = <200000000>;
42 status = "ok"; 41 status = "ok";
43 }; 42 };
44 43
diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts
index 1b133e0c566e..c3573be7b92c 100644
--- a/arch/arm/boot/dts/kirkwood-goflexnet.dts
+++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts
@@ -73,11 +73,11 @@
73 }; 73 };
74 }; 74 };
75 serial@12000 { 75 serial@12000 {
76 clock-frequency = <200000000>;
77 status = "ok"; 76 status = "ok";
78 }; 77 };
79 78
80 nand@3000000 { 79 nand@3000000 {
80 chip-delay = <40>;
81 status = "okay"; 81 status = "okay";
82 82
83 partition@0 { 83 partition@0 {
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
index 71902da33d63..5335b1aa8601 100644
--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
@@ -51,7 +51,6 @@
51 }; 51 };
52 }; 52 };
53 serial@12000 { 53 serial@12000 {
54 clock-frequency = <200000000>;
55 status = "okay"; 54 status = "okay";
56 }; 55 };
57 56
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 504f16be8b54..12ccf74ac3c4 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -78,7 +78,6 @@
78 }; 78 };
79 }; 79 };
80 serial@12000 { 80 serial@12000 {
81 clock-frequency = <200000000>;
82 status = "ok"; 81 status = "ok";
83 }; 82 };
84 83
diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index 6cae4599c4b3..3694e94f6e99 100644
--- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -96,11 +96,11 @@
96 marvell,function = "gpio"; 96 marvell,function = "gpio";
97 }; 97 };
98 pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { 98 pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
99 marvell,pins = "mpp44"; 99 marvell,pins = "mpp46";
100 marvell,function = "gpio"; 100 marvell,function = "gpio";
101 }; 101 };
102 pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { 102 pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
103 marvell,pins = "mpp45"; 103 marvell,pins = "mpp47";
104 marvell,function = "gpio"; 104 marvell,function = "gpio";
105 }; 105 };
106 106
@@ -115,7 +115,6 @@
115 }; 115 };
116 116
117 serial@12000 { 117 serial@12000 {
118 clock-frequency = <200000000>;
119 status = "ok"; 118 status = "ok";
120 }; 119 };
121 120
@@ -158,14 +157,14 @@
158 gpios = <&gpio0 16 0>; 157 gpios = <&gpio0 16 0>;
159 linux,default-trigger = "default-on"; 158 linux,default-trigger = "default-on";
160 }; 159 };
161 health_led1 { 160 rebuild_led {
161 label = "status:white:rebuild_led";
162 gpios = <&gpio1 4 0>;
163 };
164 health_led {
162 label = "status:red:health_led"; 165 label = "status:red:health_led";
163 gpios = <&gpio1 5 0>; 166 gpios = <&gpio1 5 0>;
164 }; 167 };
165 health_led2 {
166 label = "status:white:health_led";
167 gpios = <&gpio1 4 0>;
168 };
169 backup_led { 168 backup_led {
170 label = "status:blue:backup_led"; 169 label = "status:blue:backup_led";
171 gpios = <&gpio0 15 0>; 170 gpios = <&gpio0 15 0>;
diff --git a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
index 8db3123ac80f..5bbd0542cdd3 100644
--- a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
+++ b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
@@ -34,7 +34,6 @@
34 }; 34 };
35 35
36 serial@12000 { 36 serial@12000 {
37 clock-frequency = <200000000>;
38 status = "ok"; 37 status = "ok";
39 }; 38 };
40 39
diff --git a/arch/arm/boot/dts/kirkwood-lschlv2.dts b/arch/arm/boot/dts/kirkwood-lschlv2.dts
index 9510c9ea666c..9f55d95f35f5 100644
--- a/arch/arm/boot/dts/kirkwood-lschlv2.dts
+++ b/arch/arm/boot/dts/kirkwood-lschlv2.dts
@@ -13,7 +13,6 @@
13 13
14 ocp@f1000000 { 14 ocp@f1000000 {
15 serial@12000 { 15 serial@12000 {
16 clock-frequency = <166666667>;
17 status = "okay"; 16 status = "okay";
18 }; 17 };
19 }; 18 };
diff --git a/arch/arm/boot/dts/kirkwood-lsxhl.dts b/arch/arm/boot/dts/kirkwood-lsxhl.dts
index 739019c4cba9..5c84c118ed8d 100644
--- a/arch/arm/boot/dts/kirkwood-lsxhl.dts
+++ b/arch/arm/boot/dts/kirkwood-lsxhl.dts
@@ -13,7 +13,6 @@
13 13
14 ocp@f1000000 { 14 ocp@f1000000 {
15 serial@12000 { 15 serial@12000 {
16 clock-frequency = <200000000>;
17 status = "okay"; 16 status = "okay";
18 }; 17 };
19 }; 18 };
diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts
index 662dfd81b1ce..758824118a9a 100644
--- a/arch/arm/boot/dts/kirkwood-mplcec4.dts
+++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts
@@ -90,7 +90,6 @@
90 }; 90 };
91 91
92 serial@12000 { 92 serial@12000 {
93 clock-frequency = <200000000>;
94 status = "ok"; 93 status = "ok";
95 }; 94 };
96 95
diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
index e8e7ecef1650..6affd924fe11 100644
--- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
@@ -23,7 +23,6 @@
23 }; 23 };
24 24
25 serial@12000 { 25 serial@12000 {
26 clock-frequency = <166666667>;
27 status = "okay"; 26 status = "okay";
28 }; 27 };
29 28
diff --git a/arch/arm/boot/dts/kirkwood-nsa310.dts b/arch/arm/boot/dts/kirkwood-nsa310.dts
index 3a178cf708d7..a7412b937a8a 100644
--- a/arch/arm/boot/dts/kirkwood-nsa310.dts
+++ b/arch/arm/boot/dts/kirkwood-nsa310.dts
@@ -117,7 +117,6 @@
117 }; 117 };
118 118
119 serial@12000 { 119 serial@12000 {
120 clock-frequency = <200000000>;
121 status = "ok"; 120 status = "ok";
122 }; 121 };
123 122
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
index ede7fe0d7a87..d27f7245f8e7 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
@@ -18,12 +18,10 @@
18 18
19 ocp@f1000000 { 19 ocp@f1000000 {
20 serial@12000 { 20 serial@12000 {
21 clock-frequency = <200000000>;
22 status = "ok"; 21 status = "ok";
23 }; 22 };
24 23
25 serial@12100 { 24 serial@12100 {
26 clock-frequency = <200000000>;
27 status = "ok"; 25 status = "ok";
28 }; 26 };
29 27
diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts
index 842ff95d60df..66eb45b00b25 100644
--- a/arch/arm/boot/dts/kirkwood-topkick.dts
+++ b/arch/arm/boot/dts/kirkwood-topkick.dts
@@ -108,7 +108,6 @@
108 }; 108 };
109 109
110 serial@12000 { 110 serial@12000 {
111 clock-frequency = <200000000>;
112 status = "ok"; 111 status = "ok";
113 }; 112 };
114 113
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 2c738d9dc82a..fada7e6d24d8 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -38,6 +38,7 @@
38 interrupt-controller; 38 interrupt-controller;
39 #interrupt-cells = <2>; 39 #interrupt-cells = <2>;
40 interrupts = <35>, <36>, <37>, <38>; 40 interrupts = <35>, <36>, <37>, <38>;
41 clocks = <&gate_clk 7>;
41 }; 42 };
42 43
43 gpio1: gpio@10140 { 44 gpio1: gpio@10140 {
@@ -49,6 +50,7 @@
49 interrupt-controller; 50 interrupt-controller;
50 #interrupt-cells = <2>; 51 #interrupt-cells = <2>;
51 interrupts = <39>, <40>, <41>; 52 interrupts = <39>, <40>, <41>;
53 clocks = <&gate_clk 7>;
52 }; 54 };
53 55
54 serial@12000 { 56 serial@12000 {
@@ -57,7 +59,6 @@
57 reg-shift = <2>; 59 reg-shift = <2>;
58 interrupts = <33>; 60 interrupts = <33>;
59 clocks = <&gate_clk 7>; 61 clocks = <&gate_clk 7>;
60 /* set clock-frequency in board dts */
61 status = "disabled"; 62 status = "disabled";
62 }; 63 };
63 64
@@ -67,7 +68,6 @@
67 reg-shift = <2>; 68 reg-shift = <2>;
68 interrupts = <34>; 69 interrupts = <34>;
69 clocks = <&gate_clk 7>; 70 clocks = <&gate_clk 7>;
70 /* set clock-frequency in board dts */
71 status = "disabled"; 71 status = "disabled";
72 }; 72 };
73 73
@@ -75,6 +75,7 @@
75 compatible = "marvell,kirkwood-rtc", "marvell,orion-rtc"; 75 compatible = "marvell,kirkwood-rtc", "marvell,orion-rtc";
76 reg = <0x10300 0x20>; 76 reg = <0x10300 0x20>;
77 interrupts = <53>; 77 interrupts = <53>;
78 clocks = <&gate_clk 7>;
78 }; 79 };
79 80
80 spi@10600 { 81 spi@10600 {
diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts
index 31f2157cd7d7..67f8670c4d6a 100644
--- a/arch/arm/boot/dts/msm8660-surf.dts
+++ b/arch/arm/boot/dts/msm8660-surf.dts
@@ -38,4 +38,10 @@
38 <0x19c00000 0x1000>; 38 <0x19c00000 0x1000>;
39 interrupts = <0 195 0x0>; 39 interrupts = <0 195 0x0>;
40 }; 40 };
41
42 qcom,ssbi@500000 {
43 compatible = "qcom,ssbi";
44 reg = <0x500000 0x1000>;
45 qcom,controller-type = "pmic-arbiter";
46 };
41}; 47};
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts
index 9e621b5ad3dd..c9b09a813a4b 100644
--- a/arch/arm/boot/dts/msm8960-cdp.dts
+++ b/arch/arm/boot/dts/msm8960-cdp.dts
@@ -38,4 +38,10 @@
38 <0x16400000 0x1000>; 38 <0x16400000 0x1000>;
39 interrupts = <0 154 0x0>; 39 interrupts = <0 154 0x0>;
40 }; 40 };
41
42 qcom,ssbi@500000 {
43 compatible = "qcom,ssbi";
44 reg = <0x500000 0x1000>;
45 qcom,controller-type = "pmic-arbiter";
46 };
41}; 47};
diff --git a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
index 5a3a58b7e18f..0077fc8510b7 100644
--- a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
+++ b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
@@ -11,7 +11,7 @@
11 11
12/ { 12/ {
13 model = "LaCie Ethernet Disk mini V2"; 13 model = "LaCie Ethernet Disk mini V2";
14 compatible = "lacie,ethernet-disk-mini-v2", "marvell-orion5x-88f5182", "marvell,orion5x"; 14 compatible = "lacie,ethernet-disk-mini-v2", "marvell,orion5x-88f5182", "marvell,orion5x";
15 15
16 memory { 16 memory {
17 reg = <0x00000000 0x4000000>; /* 64 MB */ 17 reg = <0x00000000 0x4000000>; /* 64 MB */
diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi
index 8aad00f81ed9..f7bec3b1ba32 100644
--- a/arch/arm/boot/dts/orion5x.dtsi
+++ b/arch/arm/boot/dts/orion5x.dtsi
@@ -13,6 +13,9 @@
13 compatible = "marvell,orion5x"; 13 compatible = "marvell,orion5x";
14 interrupt-parent = <&intc>; 14 interrupt-parent = <&intc>;
15 15
16 aliases {
17 gpio0 = &gpio0;
18 };
16 intc: interrupt-controller { 19 intc: interrupt-controller {
17 compatible = "marvell,orion-intc", "marvell,intc"; 20 compatible = "marvell,orion-intc", "marvell,intc";
18 interrupt-controller; 21 interrupt-controller;
@@ -32,7 +35,9 @@
32 #gpio-cells = <2>; 35 #gpio-cells = <2>;
33 gpio-controller; 36 gpio-controller;
34 reg = <0x10100 0x40>; 37 reg = <0x10100 0x40>;
35 ngpio = <32>; 38 ngpios = <32>;
39 interrupt-controller;
40 #interrupt-cells = <2>;
36 interrupts = <6>, <7>, <8>, <9>; 41 interrupts = <6>, <7>, <8>, <9>;
37 }; 42 };
38 43
@@ -91,7 +96,7 @@
91 reg = <0x90000 0x10000>, 96 reg = <0x90000 0x10000>,
92 <0xf2200000 0x800>; 97 <0xf2200000 0x800>;
93 reg-names = "regs", "sram"; 98 reg-names = "regs", "sram";
94 interrupts = <22>; 99 interrupts = <28>;
95 status = "okay"; 100 status = "okay";
96 }; 101 };
97 }; 102 };
diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts
index 27f31a5fa494..d3ec32f6b790 100644
--- a/arch/arm/boot/dts/snowball.dts
+++ b/arch/arm/boot/dts/snowball.dts
@@ -298,7 +298,7 @@
298 }; 298 };
299 }; 299 };
300 300
301 ab8500@5 { 301 ab8500 {
302 ab8500-regulators { 302 ab8500-regulators {
303 ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { 303 ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
304 regulator-name = "V-DISPLAY"; 304 regulator-name = "V-DISPLAY";
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 936d2306e7e1..7e8769bd5977 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -75,6 +75,9 @@
75 compatible = "arm,pl330", "arm,primecell"; 75 compatible = "arm,pl330", "arm,primecell";
76 reg = <0xffe01000 0x1000>; 76 reg = <0xffe01000 0x1000>;
77 interrupts = <0 180 4>; 77 interrupts = <0 180 4>;
78 #dma-cells = <1>;
79 #dma-channels = <8>;
80 #dma-requests = <32>;
78 }; 81 };
79 }; 82 };
80 83
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index 1513c1927cc8..122ae94076c8 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -89,7 +89,7 @@
89 pinmux: pinmux@e0700000 { 89 pinmux: pinmux@e0700000 {
90 compatible = "st,spear1310-pinmux"; 90 compatible = "st,spear1310-pinmux";
91 reg = <0xe0700000 0x1000>; 91 reg = <0xe0700000 0x1000>;
92 #gpio-range-cells = <2>; 92 #gpio-range-cells = <3>;
93 }; 93 };
94 94
95 apb { 95 apb {
@@ -212,7 +212,7 @@
212 interrupt-controller; 212 interrupt-controller;
213 gpio-controller; 213 gpio-controller;
214 #gpio-cells = <2>; 214 #gpio-cells = <2>;
215 gpio-ranges = <&pinmux 0 246>; 215 gpio-ranges = <&pinmux 0 0 246>;
216 status = "disabled"; 216 status = "disabled";
217 217
218 st-plgpio,ngpio = <246>; 218 st-plgpio,ngpio = <246>;
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index 34da11aa6795..c511c4772efd 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -63,7 +63,7 @@
63 pinmux: pinmux@e0700000 { 63 pinmux: pinmux@e0700000 {
64 compatible = "st,spear1340-pinmux"; 64 compatible = "st,spear1340-pinmux";
65 reg = <0xe0700000 0x1000>; 65 reg = <0xe0700000 0x1000>;
66 #gpio-range-cells = <2>; 66 #gpio-range-cells = <3>;
67 }; 67 };
68 68
69 pwm: pwm@e0180000 { 69 pwm: pwm@e0180000 {
@@ -127,7 +127,7 @@
127 interrupt-controller; 127 interrupt-controller;
128 gpio-controller; 128 gpio-controller;
129 #gpio-cells = <2>; 129 #gpio-cells = <2>;
130 gpio-ranges = <&pinmux 0 252>; 130 gpio-ranges = <&pinmux 0 0 252>;
131 status = "disabled"; 131 status = "disabled";
132 132
133 st-plgpio,ngpio = <250>; 133 st-plgpio,ngpio = <250>;
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index ab45b8c81982..95372080eea6 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -25,7 +25,7 @@
25 pinmux: pinmux@b4000000 { 25 pinmux: pinmux@b4000000 {
26 compatible = "st,spear310-pinmux"; 26 compatible = "st,spear310-pinmux";
27 reg = <0xb4000000 0x1000>; 27 reg = <0xb4000000 0x1000>;
28 #gpio-range-cells = <2>; 28 #gpio-range-cells = <3>;
29 }; 29 };
30 30
31 fsmc: flash@44000000 { 31 fsmc: flash@44000000 {
@@ -102,7 +102,7 @@
102 interrupt-controller; 102 interrupt-controller;
103 gpio-controller; 103 gpio-controller;
104 #gpio-cells = <2>; 104 #gpio-cells = <2>;
105 gpio-ranges = <&pinmux 0 102>; 105 gpio-ranges = <&pinmux 0 0 102>;
106 status = "disabled"; 106 status = "disabled";
107 107
108 st-plgpio,ngpio = <102>; 108 st-plgpio,ngpio = <102>;
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index caa5520b1fd4..ffea342aeec9 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -24,7 +24,7 @@
24 pinmux: pinmux@b3000000 { 24 pinmux: pinmux@b3000000 {
25 compatible = "st,spear320-pinmux"; 25 compatible = "st,spear320-pinmux";
26 reg = <0xb3000000 0x1000>; 26 reg = <0xb3000000 0x1000>;
27 #gpio-range-cells = <2>; 27 #gpio-range-cells = <3>;
28 }; 28 };
29 29
30 clcd@90000000 { 30 clcd@90000000 {
@@ -130,7 +130,7 @@
130 interrupt-controller; 130 interrupt-controller;
131 gpio-controller; 131 gpio-controller;
132 #gpio-cells = <2>; 132 #gpio-cells = <2>;
133 gpio-ranges = <&pinmux 0 102>; 133 gpio-ranges = <&pinmux 0 0 102>;
134 status = "disabled"; 134 status = "disabled";
135 135
136 st-plgpio,ngpio = <102>; 136 st-plgpio,ngpio = <102>;
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 9a428931d042..3d3f64d2111a 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -118,6 +118,7 @@
118 compatible = "arm,cortex-a9-twd-timer"; 118 compatible = "arm,cortex-a9-twd-timer";
119 reg = <0x50040600 0x20>; 119 reg = <0x50040600 0x20>;
120 interrupts = <1 13 0x304>; 120 interrupts = <1 13 0x304>;
121 clocks = <&tegra_car 132>;
121 }; 122 };
122 123
123 intc: interrupt-controller { 124 intc: interrupt-controller {
@@ -384,7 +385,7 @@
384 385
385 spi@7000d800 { 386 spi@7000d800 {
386 compatible = "nvidia,tegra20-slink"; 387 compatible = "nvidia,tegra20-slink";
387 reg = <0x7000d480 0x200>; 388 reg = <0x7000d800 0x200>;
388 interrupts = <0 83 0x04>; 389 interrupts = <0 83 0x04>;
389 nvidia,dma-request-selector = <&apbdma 17>; 390 nvidia,dma-request-selector = <&apbdma 17>;
390 #address-cells = <1>; 391 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 767803e1fd55..dbf46c272562 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -119,6 +119,7 @@
119 compatible = "arm,cortex-a9-twd-timer"; 119 compatible = "arm,cortex-a9-twd-timer";
120 reg = <0x50040600 0x20>; 120 reg = <0x50040600 0x20>;
121 interrupts = <1 13 0xf04>; 121 interrupts = <1 13 0xf04>;
122 clocks = <&tegra_car 214>;
122 }; 123 };
123 124
124 intc: interrupt-controller { 125 intc: interrupt-controller {
@@ -371,7 +372,7 @@
371 372
372 spi@7000d800 { 373 spi@7000d800 {
373 compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink"; 374 compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
374 reg = <0x7000d480 0x200>; 375 reg = <0x7000d800 0x200>;
375 interrupts = <0 83 0x04>; 376 interrupts = <0 83 0x04>;
376 nvidia,dma-request-selector = <&apbdma 17>; 377 nvidia,dma-request-selector = <&apbdma 17>;
377 #address-cells = <1>; 378 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/vt8500-bv07.dts b/arch/arm/boot/dts/vt8500-bv07.dts
index 567cf4e8ab84..877b33afa7ed 100644
--- a/arch/arm/boot/dts/vt8500-bv07.dts
+++ b/arch/arm/boot/dts/vt8500-bv07.dts
@@ -11,26 +11,22 @@
11 11
12/ { 12/ {
13 model = "Benign BV07 Netbook"; 13 model = "Benign BV07 Netbook";
14};
14 15
15 /* 16&fb {
16 * Display node is based on Sascha Hauer's patch on dri-devel. 17 bits-per-pixel = <16>;
17 * Added a bpp property to calculate the size of the framebuffer 18 display-timings {
18 * until the binding is formalized. 19 native-mode = <&timing0>;
19 */ 20 timing0: 800x480 {
20 display: display@0 { 21 clock-frequency = <0>; /* unused but required */
21 modes { 22 hactive = <800>;
22 mode0: mode@0 { 23 vactive = <480>;
23 hactive = <800>; 24 hfront-porch = <40>;
24 vactive = <480>; 25 hback-porch = <88>;
25 hback-porch = <88>; 26 hsync-len = <0>;
26 hfront-porch = <40>; 27 vback-porch = <32>;
27 hsync-len = <0>; 28 vfront-porch = <11>;
28 vback-porch = <32>; 29 vsync-len = <1>;
29 vfront-porch = <11>;
30 vsync-len = <1>;
31 clock = <0>; /* unused but required */
32 bpp = <16>; /* non-standard but required */
33 };
34 }; 30 };
35 }; 31 };
36}; 32};
diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi
index cf31ced46602..68c8dc644383 100644
--- a/arch/arm/boot/dts/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500.dtsi
@@ -98,12 +98,10 @@
98 interrupts = <43>; 98 interrupts = <43>;
99 }; 99 };
100 100
101 fb@d800e400 { 101 fb: fb@d8050800 {
102 compatible = "via,vt8500-fb"; 102 compatible = "via,vt8500-fb";
103 reg = <0xd800e400 0x400>; 103 reg = <0xd800e400 0x400>;
104 interrupts = <12>; 104 interrupts = <12>;
105 display = <&display>;
106 default-mode = <&mode0>;
107 }; 105 };
108 106
109 ge_rops@d8050400 { 107 ge_rops@d8050400 {
diff --git a/arch/arm/boot/dts/wm8505-ref.dts b/arch/arm/boot/dts/wm8505-ref.dts
index fd4e248074c6..edd2cec3d37f 100644
--- a/arch/arm/boot/dts/wm8505-ref.dts
+++ b/arch/arm/boot/dts/wm8505-ref.dts
@@ -11,26 +11,22 @@
11 11
12/ { 12/ {
13 model = "Wondermedia WM8505 Netbook"; 13 model = "Wondermedia WM8505 Netbook";
14};
14 15
15 /* 16&fb {
16 * Display node is based on Sascha Hauer's patch on dri-devel. 17 bits-per-pixel = <32>;
17 * Added a bpp property to calculate the size of the framebuffer 18 display-timings {
18 * until the binding is formalized. 19 native-mode = <&timing0>;
19 */ 20 timing0: 800x480 {
20 display: display@0 { 21 clock-frequency = <0>; /* unused but required */
21 modes { 22 hactive = <800>;
22 mode0: mode@0 { 23 vactive = <480>;
23 hactive = <800>; 24 hfront-porch = <40>;
24 vactive = <480>; 25 hback-porch = <88>;
25 hback-porch = <88>; 26 hsync-len = <0>;
26 hfront-porch = <40>; 27 vback-porch = <32>;
27 hsync-len = <0>; 28 vfront-porch = <11>;
28 vback-porch = <32>; 29 vsync-len = <1>;
29 vfront-porch = <11>;
30 vsync-len = <1>;
31 clock = <0>; /* unused but required */
32 bpp = <32>; /* non-standard but required */
33 };
34 }; 30 };
35 }; 31 };
36}; 32};
diff --git a/arch/arm/boot/dts/wm8505.dtsi b/arch/arm/boot/dts/wm8505.dtsi
index e74a1c0fb9a2..bcf668d31b28 100644
--- a/arch/arm/boot/dts/wm8505.dtsi
+++ b/arch/arm/boot/dts/wm8505.dtsi
@@ -128,11 +128,9 @@
128 interrupts = <0>; 128 interrupts = <0>;
129 }; 129 };
130 130
131 fb@d8050800 { 131 fb: fb@d8050800 {
132 compatible = "wm,wm8505-fb"; 132 compatible = "wm,wm8505-fb";
133 reg = <0xd8050800 0x200>; 133 reg = <0xd8050800 0x200>;
134 display = <&display>;
135 default-mode = <&mode0>;
136 }; 134 };
137 135
138 ge_rops@d8050400 { 136 ge_rops@d8050400 {
diff --git a/arch/arm/boot/dts/wm8650-mid.dts b/arch/arm/boot/dts/wm8650-mid.dts
index cefd938f842f..61671a0d9ede 100644
--- a/arch/arm/boot/dts/wm8650-mid.dts
+++ b/arch/arm/boot/dts/wm8650-mid.dts
@@ -11,26 +11,24 @@
11 11
12/ { 12/ {
13 model = "Wondermedia WM8650-MID Tablet"; 13 model = "Wondermedia WM8650-MID Tablet";
14};
15
16&fb {
17 bits-per-pixel = <16>;
14 18
15 /* 19 display-timings {
16 * Display node is based on Sascha Hauer's patch on dri-devel. 20 native-mode = <&timing0>;
17 * Added a bpp property to calculate the size of the framebuffer 21 timing0: 800x480 {
18 * until the binding is formalized. 22 clock-frequency = <0>; /* unused but required */
19 */ 23 hactive = <800>;
20 display: display@0 { 24 vactive = <480>;
21 modes { 25 hfront-porch = <40>;
22 mode0: mode@0 { 26 hback-porch = <88>;
23 hactive = <800>; 27 hsync-len = <0>;
24 vactive = <480>; 28 vback-porch = <32>;
25 hback-porch = <88>; 29 vfront-porch = <11>;
26 hfront-porch = <40>; 30 vsync-len = <1>;
27 hsync-len = <0>;
28 vback-porch = <32>;
29 vfront-porch = <11>;
30 vsync-len = <1>;
31 clock = <0>; /* unused but required */
32 bpp = <16>; /* non-standard but required */
33 };
34 }; 31 };
35 }; 32 };
36}; 33};
34
diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
index db3c0a12e052..9313407bbc30 100644
--- a/arch/arm/boot/dts/wm8650.dtsi
+++ b/arch/arm/boot/dts/wm8650.dtsi
@@ -128,11 +128,9 @@
128 interrupts = <43>; 128 interrupts = <43>;
129 }; 129 };
130 130
131 fb@d8050800 { 131 fb: fb@d8050800 {
132 compatible = "wm,wm8505-fb"; 132 compatible = "wm,wm8505-fb";
133 reg = <0xd8050800 0x200>; 133 reg = <0xd8050800 0x200>;
134 display = <&display>;
135 default-mode = <&mode0>;
136 }; 134 };
137 135
138 ge_rops@d8050400 { 136 ge_rops@d8050400 {
diff --git a/arch/arm/boot/dts/wm8850-w70v2.dts b/arch/arm/boot/dts/wm8850-w70v2.dts
index fcc660c89540..32d22532cd6c 100644
--- a/arch/arm/boot/dts/wm8850-w70v2.dts
+++ b/arch/arm/boot/dts/wm8850-w70v2.dts
@@ -15,28 +15,6 @@
15/ { 15/ {
16 model = "Wondermedia WM8850-W70v2 Tablet"; 16 model = "Wondermedia WM8850-W70v2 Tablet";
17 17
18 /*
19 * Display node is based on Sascha Hauer's patch on dri-devel.
20 * Added a bpp property to calculate the size of the framebuffer
21 * until the binding is formalized.
22 */
23 display: display@0 {
24 modes {
25 mode0: mode@0 {
26 hactive = <800>;
27 vactive = <480>;
28 hback-porch = <88>;
29 hfront-porch = <40>;
30 hsync-len = <0>;
31 vback-porch = <32>;
32 vfront-porch = <11>;
33 vsync-len = <1>;
34 clock = <0>; /* unused but required */
35 bpp = <16>; /* non-standard but required */
36 };
37 };
38 };
39
40 backlight { 18 backlight {
41 compatible = "pwm-backlight"; 19 compatible = "pwm-backlight";
42 pwms = <&pwm 0 50000 1>; /* duty inverted */ 20 pwms = <&pwm 0 50000 1>; /* duty inverted */
@@ -45,3 +23,21 @@
45 default-brightness-level = <5>; 23 default-brightness-level = <5>;
46 }; 24 };
47}; 25};
26
27&fb {
28 bits-per-pixel = <16>;
29 display-timings {
30 native-mode = <&timing0>;
31 timing0: 800x480 {
32 clock-frequency = <0>; /* unused but required */
33 hactive = <800>;
34 vactive = <480>;
35 hfront-porch = <40>;
36 hback-porch = <88>;
37 hsync-len = <0>;
38 vback-porch = <32>;
39 vfront-porch = <11>;
40 vsync-len = <1>;
41 };
42 };
43};
diff --git a/arch/arm/boot/dts/wm8850.dtsi b/arch/arm/boot/dts/wm8850.dtsi
index e8cbfdc87bba..7149cd13e3b9 100644
--- a/arch/arm/boot/dts/wm8850.dtsi
+++ b/arch/arm/boot/dts/wm8850.dtsi
@@ -135,11 +135,9 @@
135 }; 135 };
136 }; 136 };
137 137
138 fb@d8051700 { 138 fb: fb@d8051700 {
139 compatible = "wm,wm8505-fb"; 139 compatible = "wm,wm8505-fb";
140 reg = <0xd8051700 0x200>; 140 reg = <0xd8051700 0x200>;
141 display = <&display>;
142 default-mode = <&mode0>;
143 }; 141 };
144 142
145 ge_rops@d8050400 { 143 ge_rops@d8050400 {
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index e36b01025321..088d6c11a0fa 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -188,6 +188,7 @@ CONFIG_USB_EHCI_HCD=y
188CONFIG_USB_EHCI_MXC=y 188CONFIG_USB_EHCI_MXC=y
189CONFIG_USB_CHIPIDEA=y 189CONFIG_USB_CHIPIDEA=y
190CONFIG_USB_CHIPIDEA_HOST=y 190CONFIG_USB_CHIPIDEA_HOST=y
191CONFIG_USB_PHY=y
191CONFIG_USB_MXS_PHY=y 192CONFIG_USB_MXS_PHY=y
192CONFIG_USB_STORAGE=y 193CONFIG_USB_STORAGE=y
193CONFIG_MMC=y 194CONFIG_MMC=y
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 92386b20bd09..afa7249fac6e 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -134,6 +134,7 @@ CONFIG_SND_DEBUG_VERBOSE=y
134# CONFIG_SND_SPI is not set 134# CONFIG_SND_SPI is not set
135CONFIG_SND_SOC=y 135CONFIG_SND_SOC=y
136CONFIG_USB=y 136CONFIG_USB=y
137CONFIG_USB_PHY=y
137CONFIG_USB_OHCI_HCD=y 138CONFIG_USB_OHCI_HCD=y
138CONFIG_USB_STORAGE=y 139CONFIG_USB_STORAGE=y
139CONFIG_USB_GADGET=y 140CONFIG_USB_GADGET=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index fbbc5bb022d5..87924d671115 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -116,9 +116,11 @@ CONFIG_SND_SOC=y
116CONFIG_SND_MXS_SOC=y 116CONFIG_SND_MXS_SOC=y
117CONFIG_SND_SOC_MXS_SGTL5000=y 117CONFIG_SND_SOC_MXS_SGTL5000=y
118CONFIG_USB=y 118CONFIG_USB=y
119CONFIG_USB_EHCI_HCD=y
119CONFIG_USB_CHIPIDEA=y 120CONFIG_USB_CHIPIDEA=y
120CONFIG_USB_CHIPIDEA_HOST=y 121CONFIG_USB_CHIPIDEA_HOST=y
121CONFIG_USB_STORAGE=y 122CONFIG_USB_STORAGE=y
123CONFIG_USB_PHY=y
122CONFIG_USB_MXS_PHY=y 124CONFIG_USB_MXS_PHY=y
123CONFIG_MMC=y 125CONFIG_MMC=y
124CONFIG_MMC_MXS=y 126CONFIG_MMC_MXS=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 42eab9a2a0fd..7e0ebb64a7f9 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -195,6 +195,7 @@ CONFIG_SND_SOC=y
195CONFIG_SND_OMAP_SOC=y 195CONFIG_SND_OMAP_SOC=y
196# CONFIG_USB_HID is not set 196# CONFIG_USB_HID is not set
197CONFIG_USB=y 197CONFIG_USB=y
198CONFIG_USB_PHY=y
198CONFIG_USB_DEBUG=y 199CONFIG_USB_DEBUG=y
199CONFIG_USB_DEVICEFS=y 200CONFIG_USB_DEVICEFS=y
200# CONFIG_USB_DEVICE_CLASS is not set 201# CONFIG_USB_DEVICE_CLASS is not set
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b16bae2c9a60..bd07864f14a0 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -126,6 +126,8 @@ CONFIG_INPUT_MISC=y
126CONFIG_INPUT_TWL4030_PWRBUTTON=y 126CONFIG_INPUT_TWL4030_PWRBUTTON=y
127CONFIG_VT_HW_CONSOLE_BINDING=y 127CONFIG_VT_HW_CONSOLE_BINDING=y
128# CONFIG_LEGACY_PTYS is not set 128# CONFIG_LEGACY_PTYS is not set
129CONFIG_SERIAL_8250=y
130CONFIG_SERIAL_8250_CONSOLE=y
129CONFIG_SERIAL_8250_NR_UARTS=32 131CONFIG_SERIAL_8250_NR_UARTS=32
130CONFIG_SERIAL_8250_EXTENDED=y 132CONFIG_SERIAL_8250_EXTENDED=y
131CONFIG_SERIAL_8250_MANY_PORTS=y 133CONFIG_SERIAL_8250_MANY_PORTS=y
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index 720799fd3a81..dff714d886d5 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -24,7 +24,7 @@ extern struct arm_delay_ops {
24 void (*delay)(unsigned long); 24 void (*delay)(unsigned long);
25 void (*const_udelay)(unsigned long); 25 void (*const_udelay)(unsigned long);
26 void (*udelay)(unsigned long); 26 void (*udelay)(unsigned long);
27 bool const_clock; 27 unsigned long ticks_per_jiffy;
28} arm_delay_ops; 28} arm_delay_ops;
29 29
30#define __delay(n) arm_delay_ops.delay(n) 30#define __delay(n) arm_delay_ops.delay(n)
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index cca9f15704ed..ea289e1435e7 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -19,14 +19,6 @@
19#undef _CACHE 19#undef _CACHE
20#undef MULTI_CACHE 20#undef MULTI_CACHE
21 21
22#if defined(CONFIG_CPU_CACHE_V3)
23# ifdef _CACHE
24# define MULTI_CACHE 1
25# else
26# define _CACHE v3
27# endif
28#endif
29
30#if defined(CONFIG_CPU_CACHE_V4) 22#if defined(CONFIG_CPU_CACHE_V4)
31# ifdef _CACHE 23# ifdef _CACHE
32# define MULTI_CACHE 1 24# define MULTI_CACHE 1
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 02fe2fbe2477..ed94b1a366ae 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
37 * IOP3XX processor registers 37 * IOP3XX processor registers
38 */ 38 */
39#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 39#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
40#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 40#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
41#define IOP3XX_PERIPHERAL_SIZE 0x00002000 41#define IOP3XX_PERIPHERAL_SIZE 0x00002000
42#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ 42#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
43 IOP3XX_PERIPHERAL_SIZE - 1) 43 IOP3XX_PERIPHERAL_SIZE - 1)
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 8c5e828f484d..91b99abe7a95 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
41#endif 41#endif
42#endif 42#endif
43 43
44/*
45 * Needed to be able to broadcast the TLB invalidation for kmap.
46 */
47#ifdef CONFIG_ARM_ERRATA_798181
48#undef ARCH_NEEDS_KMAP_HIGH_GET
49#endif
50
44#ifdef ARCH_NEEDS_KMAP_HIGH_GET 51#ifdef ARCH_NEEDS_KMAP_HIGH_GET
45extern void *kmap_high_get(struct page *page); 52extern void *kmap_high_get(struct page *page);
46#else 53#else
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 863a6611323c..a7b85e0d0cc1 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
28#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 28#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
29 29
30DECLARE_PER_CPU(atomic64_t, active_asids);
31
30#else /* !CONFIG_CPU_HAS_ASID */ 32#else /* !CONFIG_CPU_HAS_ASID */
31 33
32#ifdef CONFIG_MMU 34#ifdef CONFIG_MMU
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 6ef8afd1b64c..86b8fe398b95 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -111,7 +111,7 @@
111#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ 111#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
112#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ 112#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
113#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ 113#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
114#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ 114#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
115 115
116/* 116/*
117 * Hyp-mode PL2 PTE definitions for LPAE. 117 * Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 80d6fc4dbe4a..9bcd262a9008 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t);
61#define FIRST_USER_ADDRESS PAGE_SIZE 61#define FIRST_USER_ADDRESS PAGE_SIZE
62 62
63/* 63/*
64 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
65 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
66 * page shared between user and kernel).
67 */
68#ifdef CONFIG_ARM_LPAE
69#define USER_PGTABLES_CEILING TASK_SIZE
70#endif
71
72/*
64 * The pgprot_* and protection_map entries will be fixed up in runtime 73 * The pgprot_* and protection_map entries will be fixed up in runtime
65 * to include the cachable and bufferable bits based on memory policy, 74 * to include the cachable and bufferable bits based on memory policy,
66 * as well as any architecture dependent bits like global/ASID and SMP 75 * as well as any architecture dependent bits like global/ASID and SMP
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 5a85f148b607..21a23e378bbe 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -21,9 +21,6 @@ extern void (*arm_pm_idle)(void);
21 21
22extern unsigned int user_debug; 22extern unsigned int user_debug;
23 23
24extern void disable_hlt(void);
25extern void enable_hlt(void);
26
27#endif /* !__ASSEMBLY__ */ 24#endif /* !__ASSEMBLY__ */
28 25
29#endif /* __ASM_ARM_SYSTEM_MISC_H */ 26#endif /* __ASM_ARM_SYSTEM_MISC_H */
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 4db8c8820f0d..ab865e65a84c 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -14,7 +14,6 @@
14 14
15#include <asm/glue.h> 15#include <asm/glue.h>
16 16
17#define TLB_V3_PAGE (1 << 0)
18#define TLB_V4_U_PAGE (1 << 1) 17#define TLB_V4_U_PAGE (1 << 1)
19#define TLB_V4_D_PAGE (1 << 2) 18#define TLB_V4_D_PAGE (1 << 2)
20#define TLB_V4_I_PAGE (1 << 3) 19#define TLB_V4_I_PAGE (1 << 3)
@@ -22,7 +21,6 @@
22#define TLB_V6_D_PAGE (1 << 5) 21#define TLB_V6_D_PAGE (1 << 5)
23#define TLB_V6_I_PAGE (1 << 6) 22#define TLB_V6_I_PAGE (1 << 6)
24 23
25#define TLB_V3_FULL (1 << 8)
26#define TLB_V4_U_FULL (1 << 9) 24#define TLB_V4_U_FULL (1 << 9)
27#define TLB_V4_D_FULL (1 << 10) 25#define TLB_V4_D_FULL (1 << 10)
28#define TLB_V4_I_FULL (1 << 11) 26#define TLB_V4_I_FULL (1 << 11)
@@ -52,7 +50,6 @@
52 * ============= 50 * =============
53 * 51 *
54 * We have the following to choose from: 52 * We have the following to choose from:
55 * v3 - ARMv3
56 * v4 - ARMv4 without write buffer 53 * v4 - ARMv4 without write buffer
57 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction 54 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
58 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction 55 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
@@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void)
330 if (tlb_flag(TLB_WB)) 327 if (tlb_flag(TLB_WB))
331 dsb(); 328 dsb();
332 329
333 tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
334 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); 330 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
335 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); 331 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
336 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); 332 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
@@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
351 if (tlb_flag(TLB_WB)) 347 if (tlb_flag(TLB_WB))
352 dsb(); 348 dsb();
353 349
354 if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { 350 if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
355 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { 351 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
356 tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
357 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); 352 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
358 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); 353 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
359 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); 354 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
@@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
385 if (tlb_flag(TLB_WB)) 380 if (tlb_flag(TLB_WB))
386 dsb(); 381 dsb();
387 382
388 if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && 383 if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
389 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 384 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
390 tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
391 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); 385 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
392 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); 386 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
393 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); 387 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
@@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
418 if (tlb_flag(TLB_WB)) 412 if (tlb_flag(TLB_WB))
419 dsb(); 413 dsb();
420 414
421 tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
422 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); 415 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
423 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); 416 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
424 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); 417 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
@@ -450,6 +443,21 @@ static inline void local_flush_bp_all(void)
450 isb(); 443 isb();
451} 444}
452 445
446#ifdef CONFIG_ARM_ERRATA_798181
447static inline void dummy_flush_tlb_a15_erratum(void)
448{
449 /*
450 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
451 */
452 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
453 dsb();
454}
455#else
456static inline void dummy_flush_tlb_a15_erratum(void)
457{
458}
459#endif
460
453/* 461/*
454 * flush_pmd_entry 462 * flush_pmd_entry
455 * 463 *
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 5c27696de14f..8b1f37bfeeec 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -2,6 +2,7 @@
2#define _ASM_ARM_XEN_EVENTS_H 2#define _ASM_ARM_XEN_EVENTS_H
3 3
4#include <asm/ptrace.h> 4#include <asm/ptrace.h>
5#include <asm/atomic.h>
5 6
6enum ipi_vector { 7enum ipi_vector {
7 XEN_PLACEHOLDER_VECTOR, 8 XEN_PLACEHOLDER_VECTOR,
@@ -15,26 +16,8 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
15 return raw_irqs_disabled_flags(regs->ARM_cpsr); 16 return raw_irqs_disabled_flags(regs->ARM_cpsr);
16} 17}
17 18
18/* 19#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \
19 * We cannot use xchg because it does not support 8-byte 20 atomic64_t, \
20 * values. However it is safe to use {ldr,dtd}exd directly because all 21 counter), (val))
21 * platforms which Xen can run on support those instructions.
22 */
23static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val)
24{
25 xen_ulong_t oldval;
26 unsigned int tmp;
27
28 wmb();
29 asm volatile("@ xchg_xen_ulong\n"
30 "1: ldrexd %0, %H0, [%3]\n"
31 " strexd %1, %2, %H2, [%3]\n"
32 " teq %1, #0\n"
33 " bne 1b"
34 : "=&r" (oldval), "=&r" (tmp)
35 : "r" (val), "r" (ptr)
36 : "memory", "cc");
37 return oldval;
38}
39 22
40#endif /* _ASM_ARM_XEN_EVENTS_H */ 23#endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c
index 85aa2b292692..43076536965c 100644
--- a/arch/arm/kernel/early_printk.c
+++ b/arch/arm/kernel/early_printk.c
@@ -29,28 +29,17 @@ static void early_console_write(struct console *con, const char *s, unsigned n)
29 early_write(s, n); 29 early_write(s, n);
30} 30}
31 31
32static struct console early_console = { 32static struct console early_console_dev = {
33 .name = "earlycon", 33 .name = "earlycon",
34 .write = early_console_write, 34 .write = early_console_write,
35 .flags = CON_PRINTBUFFER | CON_BOOT, 35 .flags = CON_PRINTBUFFER | CON_BOOT,
36 .index = -1, 36 .index = -1,
37}; 37};
38 38
39asmlinkage void early_printk(const char *fmt, ...)
40{
41 char buf[512];
42 int n;
43 va_list ap;
44
45 va_start(ap, fmt);
46 n = vscnprintf(buf, sizeof(buf), fmt, ap);
47 early_write(buf, n);
48 va_end(ap);
49}
50
51static int __init setup_early_printk(char *buf) 39static int __init setup_early_printk(char *buf)
52{ 40{
53 register_console(&early_console); 41 early_console = &early_console_dev;
42 register_console(&early_console_dev);
54 return 0; 43 return 0;
55} 44}
56 45
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 3248cde504ed..fefd7f971437 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old)
276 */ 276 */
277 277
278.macro mcount_enter 278.macro mcount_enter
279/*
280 * This pad compensates for the push {lr} at the call site. Note that we are
281 * unable to unwind through a function which does not otherwise save its lr.
282 */
283 UNWIND(.pad #4)
279 stmdb sp!, {r0-r3, lr} 284 stmdb sp!, {r0-r3, lr}
285 UNWIND(.save {r0-r3, lr})
280.endm 286.endm
281 287
282.macro mcount_get_lr reg 288.macro mcount_get_lr reg
@@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old)
289.endm 295.endm
290 296
291ENTRY(__gnu_mcount_nc) 297ENTRY(__gnu_mcount_nc)
298UNWIND(.fnstart)
292#ifdef CONFIG_DYNAMIC_FTRACE 299#ifdef CONFIG_DYNAMIC_FTRACE
293 mov ip, lr 300 mov ip, lr
294 ldmia sp!, {lr} 301 ldmia sp!, {lr}
@@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc)
296#else 303#else
297 __mcount 304 __mcount
298#endif 305#endif
306UNWIND(.fnend)
299ENDPROC(__gnu_mcount_nc) 307ENDPROC(__gnu_mcount_nc)
300 308
301#ifdef CONFIG_DYNAMIC_FTRACE 309#ifdef CONFIG_DYNAMIC_FTRACE
302ENTRY(ftrace_caller) 310ENTRY(ftrace_caller)
311UNWIND(.fnstart)
303 __ftrace_caller 312 __ftrace_caller
313UNWIND(.fnend)
304ENDPROC(ftrace_caller) 314ENDPROC(ftrace_caller)
305#endif 315#endif
306 316
307#ifdef CONFIG_FUNCTION_GRAPH_TRACER 317#ifdef CONFIG_FUNCTION_GRAPH_TRACER
308ENTRY(ftrace_graph_caller) 318ENTRY(ftrace_graph_caller)
319UNWIND(.fnstart)
309 __ftrace_graph_caller 320 __ftrace_graph_caller
321UNWIND(.fnend)
310ENDPROC(ftrace_graph_caller) 322ENDPROC(ftrace_graph_caller)
311#endif 323#endif
312 324
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index e0eb9a1cae77..8bac553fe213 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -267,7 +267,7 @@ __create_page_tables:
267 addne r6, r6, #1 << SECTION_SHIFT 267 addne r6, r6, #1 << SECTION_SHIFT
268 strne r6, [r3] 268 strne r6, [r3]
269 269
270#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) 270#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
271 sub r4, r4, #4 @ Fixup page table pointer 271 sub r4, r4, #4 @ Fixup page table pointer
272 @ for 64-bit descriptors 272 @ for 64-bit descriptors
273#endif 273#endif
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 96093b75ab90..1fd749ee4a1b 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
966 } 966 }
967 967
968 if (err) { 968 if (err) {
969 pr_warning("CPU %d debug is powered down!\n", cpu); 969 pr_warn_once("CPU %d debug is powered down!\n", cpu);
970 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); 970 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
971 return; 971 return;
972 } 972 }
@@ -987,7 +987,7 @@ clear_vcr:
987 isb(); 987 isb();
988 988
989 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { 989 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
990 pr_warning("CPU %d failed to disable vector catch\n", cpu); 990 pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
991 return; 991 return;
992 } 992 }
993 993
@@ -1007,7 +1007,7 @@ clear_vcr:
1007 } 1007 }
1008 1008
1009 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { 1009 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
1010 pr_warning("CPU %d failed to clear debug register pairs\n", cpu); 1010 pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
1011 return; 1011 return;
1012 } 1012 }
1013 1013
@@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
1043 return NOTIFY_OK; 1043 return NOTIFY_OK;
1044} 1044}
1045 1045
1046static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { 1046static struct notifier_block dbg_cpu_pm_nb = {
1047 .notifier_call = dbg_cpu_pm_notify, 1047 .notifier_call = dbg_cpu_pm_notify,
1048}; 1048};
1049 1049
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 146157dfe27c..8c3094d0f7b7 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events,
253 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 253 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
254 struct pmu *leader_pmu = event->group_leader->pmu; 254 struct pmu *leader_pmu = event->group_leader->pmu;
255 255
256 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) 256 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
257 return 1;
258
259 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
257 return 1; 260 return 1;
258 261
259 return armpmu->get_event_idx(hw_events, event) >= 0; 262 return armpmu->get_event_idx(hw_events, event) >= 0;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 047d3e40e470..c9a5e2ce8aa9 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -57,38 +57,6 @@ static const char *isa_modes[] = {
57 "ARM" , "Thumb" , "Jazelle", "ThumbEE" 57 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
58}; 58};
59 59
60static volatile int hlt_counter;
61
62void disable_hlt(void)
63{
64 hlt_counter++;
65}
66
67EXPORT_SYMBOL(disable_hlt);
68
69void enable_hlt(void)
70{
71 hlt_counter--;
72 BUG_ON(hlt_counter < 0);
73}
74
75EXPORT_SYMBOL(enable_hlt);
76
77static int __init nohlt_setup(char *__unused)
78{
79 hlt_counter = 1;
80 return 1;
81}
82
83static int __init hlt_setup(char *__unused)
84{
85 hlt_counter = 0;
86 return 1;
87}
88
89__setup("nohlt", nohlt_setup);
90__setup("hlt", hlt_setup);
91
92extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); 60extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
93typedef void (*phys_reset_t)(unsigned long); 61typedef void (*phys_reset_t)(unsigned long);
94 62
@@ -172,54 +140,38 @@ static void default_idle(void)
172 local_irq_enable(); 140 local_irq_enable();
173} 141}
174 142
175/* 143void arch_cpu_idle_prepare(void)
176 * The idle thread.
177 * We always respect 'hlt_counter' to prevent low power idle.
178 */
179void cpu_idle(void)
180{ 144{
181 local_fiq_enable(); 145 local_fiq_enable();
146}
182 147
183 /* endless idle loop with no priority at all */ 148void arch_cpu_idle_enter(void)
184 while (1) { 149{
185 tick_nohz_idle_enter(); 150 ledtrig_cpu(CPU_LED_IDLE_START);
186 rcu_idle_enter(); 151#ifdef CONFIG_PL310_ERRATA_769419
187 ledtrig_cpu(CPU_LED_IDLE_START); 152 wmb();
188 while (!need_resched()) {
189#ifdef CONFIG_HOTPLUG_CPU
190 if (cpu_is_offline(smp_processor_id()))
191 cpu_die();
192#endif 153#endif
154}
193 155
194 /* 156void arch_cpu_idle_exit(void)
195 * We need to disable interrupts here 157{
196 * to ensure we don't miss a wakeup call. 158 ledtrig_cpu(CPU_LED_IDLE_END);
197 */ 159}
198 local_irq_disable(); 160
199#ifdef CONFIG_PL310_ERRATA_769419 161#ifdef CONFIG_HOTPLUG_CPU
200 wmb(); 162void arch_cpu_idle_dead(void)
163{
164 cpu_die();
165}
201#endif 166#endif
202 if (hlt_counter) { 167
203 local_irq_enable(); 168/*
204 cpu_relax(); 169 * Called from the core idle loop.
205 } else if (!need_resched()) { 170 */
206 stop_critical_timings(); 171void arch_cpu_idle(void)
207 if (cpuidle_idle_call()) 172{
208 default_idle(); 173 if (cpuidle_idle_call())
209 start_critical_timings(); 174 default_idle();
210 /*
211 * default_idle functions must always
212 * return with IRQs enabled.
213 */
214 WARN_ON(irqs_disabled());
215 } else
216 local_irq_enable();
217 }
218 ledtrig_cpu(CPU_LED_IDLE_END);
219 rcu_idle_exit();
220 tick_nohz_idle_exit();
221 schedule_preempt_disabled();
222 }
223} 175}
224 176
225static char reboot_mode = 'h'; 177static char reboot_mode = 'h';
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index bd6f56b9ec21..59d2adb764a9 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
45 45
46static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; 46static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
47 47
48static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) 48static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
49{ 49{
50 return (cyc * mult) >> shift; 50 return (cyc * mult) >> shift;
51} 51}
52 52
53static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) 53static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
54{ 54{
55 u64 epoch_ns; 55 u64 epoch_ns;
56 u32 epoch_cyc; 56 u32 epoch_cyc;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3f6cbb2e3eda..234e339196c0 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -56,7 +56,6 @@
56#include <asm/virt.h> 56#include <asm/virt.h>
57 57
58#include "atags.h" 58#include "atags.h"
59#include "tcm.h"
60 59
61 60
62#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 61#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
@@ -353,6 +352,23 @@ void __init early_print(const char *str, ...)
353 printk("%s", buf); 352 printk("%s", buf);
354} 353}
355 354
355static void __init cpuid_init_hwcaps(void)
356{
357 unsigned int divide_instrs;
358
359 if (cpu_architecture() < CPU_ARCH_ARMv7)
360 return;
361
362 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
363
364 switch (divide_instrs) {
365 case 2:
366 elf_hwcap |= HWCAP_IDIVA;
367 case 1:
368 elf_hwcap |= HWCAP_IDIVT;
369 }
370}
371
356static void __init feat_v6_fixup(void) 372static void __init feat_v6_fixup(void)
357{ 373{
358 int id = read_cpuid_id(); 374 int id = read_cpuid_id();
@@ -483,8 +499,11 @@ static void __init setup_processor(void)
483 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", 499 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
484 list->elf_name, ENDIANNESS); 500 list->elf_name, ENDIANNESS);
485 elf_hwcap = list->elf_hwcap; 501 elf_hwcap = list->elf_hwcap;
502
503 cpuid_init_hwcaps();
504
486#ifndef CONFIG_ARM_THUMB 505#ifndef CONFIG_ARM_THUMB
487 elf_hwcap &= ~HWCAP_THUMB; 506 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
488#endif 507#endif
489 508
490 feat_v6_fixup(); 509 feat_v6_fixup();
@@ -524,7 +543,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
524 size -= start & ~PAGE_MASK; 543 size -= start & ~PAGE_MASK;
525 bank->start = PAGE_ALIGN(start); 544 bank->start = PAGE_ALIGN(start);
526 545
527#ifndef CONFIG_LPAE 546#ifndef CONFIG_ARM_LPAE
528 if (bank->start + size < bank->start) { 547 if (bank->start + size < bank->start) {
529 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " 548 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
530 "32-bit physical address space\n", (long long)start); 549 "32-bit physical address space\n", (long long)start);
@@ -778,8 +797,6 @@ void __init setup_arch(char **cmdline_p)
778 797
779 reserve_crashkernel(); 798 reserve_crashkernel();
780 799
781 tcm_init();
782
783#ifdef CONFIG_MULTI_IRQ_HANDLER 800#ifdef CONFIG_MULTI_IRQ_HANDLER
784 handle_arch_irq = mdesc->handle_irq; 801 handle_arch_irq = mdesc->handle_irq;
785#endif 802#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 31644f1978d5..4619177bcfe6 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -336,7 +336,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
336 /* 336 /*
337 * OK, it's off to the idle thread for us 337 * OK, it's off to the idle thread for us
338 */ 338 */
339 cpu_idle(); 339 cpu_startup_entry(CPUHP_ONLINE);
340} 340}
341 341
342void __init smp_cpus_done(unsigned int max_cpus) 342void __init smp_cpus_done(unsigned int max_cpus)
@@ -480,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
480 evt->features = CLOCK_EVT_FEAT_ONESHOT | 480 evt->features = CLOCK_EVT_FEAT_ONESHOT |
481 CLOCK_EVT_FEAT_PERIODIC | 481 CLOCK_EVT_FEAT_PERIODIC |
482 CLOCK_EVT_FEAT_DUMMY; 482 CLOCK_EVT_FEAT_DUMMY;
483 evt->rating = 400; 483 evt->rating = 100;
484 evt->mult = 1; 484 evt->mult = 1;
485 evt->set_mode = broadcast_timer_set_mode; 485 evt->set_mode = broadcast_timer_set_mode;
486 486
@@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb,
673 if (freq->flags & CPUFREQ_CONST_LOOPS) 673 if (freq->flags & CPUFREQ_CONST_LOOPS)
674 return NOTIFY_OK; 674 return NOTIFY_OK;
675 675
676 if (arm_delay_ops.const_clock)
677 return NOTIFY_OK;
678
679 if (!per_cpu(l_p_j_ref, cpu)) { 676 if (!per_cpu(l_p_j_ref, cpu)) {
680 per_cpu(l_p_j_ref, cpu) = 677 per_cpu(l_p_j_ref, cpu) =
681 per_cpu(cpu_data, cpu).loops_per_jiffy; 678 per_cpu(cpu_data, cpu).loops_per_jiffy;
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index bd0300531399..e82e1d248772 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -12,6 +12,7 @@
12 12
13#include <asm/smp_plat.h> 13#include <asm/smp_plat.h>
14#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15#include <asm/mmu_context.h>
15 16
16/**********************************************************************/ 17/**********************************************************************/
17 18
@@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored)
69 local_flush_bp_all(); 70 local_flush_bp_all();
70} 71}
71 72
73#ifdef CONFIG_ARM_ERRATA_798181
74static int erratum_a15_798181(void)
75{
76 unsigned int midr = read_cpuid_id();
77
78 /* Cortex-A15 r0p0..r3p2 affected */
79 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
80 return 0;
81 return 1;
82}
83#else
84static int erratum_a15_798181(void)
85{
86 return 0;
87}
88#endif
89
90static void ipi_flush_tlb_a15_erratum(void *arg)
91{
92 dmb();
93}
94
95static void broadcast_tlb_a15_erratum(void)
96{
97 if (!erratum_a15_798181())
98 return;
99
100 dummy_flush_tlb_a15_erratum();
101 smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
102 NULL, 1);
103}
104
105static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
106{
107 int cpu;
108 cpumask_t mask = { CPU_BITS_NONE };
109
110 if (!erratum_a15_798181())
111 return;
112
113 dummy_flush_tlb_a15_erratum();
114 for_each_online_cpu(cpu) {
115 if (cpu == smp_processor_id())
116 continue;
117 /*
118 * We only need to send an IPI if the other CPUs are running
119 * the same ASID as the one being invalidated. There is no
120 * need for locking around the active_asids check since the
121 * switch_mm() function has at least one dmb() (as required by
122 * this workaround) in case a context switch happens on
123 * another CPU after the condition below.
124 */
125 if (atomic64_read(&mm->context.id) ==
126 atomic64_read(&per_cpu(active_asids, cpu)))
127 cpumask_set_cpu(cpu, &mask);
128 }
129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
130}
131
72void flush_tlb_all(void) 132void flush_tlb_all(void)
73{ 133{
74 if (tlb_ops_need_broadcast()) 134 if (tlb_ops_need_broadcast())
75 on_each_cpu(ipi_flush_tlb_all, NULL, 1); 135 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
76 else 136 else
77 local_flush_tlb_all(); 137 local_flush_tlb_all();
138 broadcast_tlb_a15_erratum();
78} 139}
79 140
80void flush_tlb_mm(struct mm_struct *mm) 141void flush_tlb_mm(struct mm_struct *mm)
@@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm)
83 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); 144 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
84 else 145 else
85 local_flush_tlb_mm(mm); 146 local_flush_tlb_mm(mm);
147 broadcast_tlb_mm_a15_erratum(mm);
86} 148}
87 149
88void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 150void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
95 &ta, 1); 157 &ta, 1);
96 } else 158 } else
97 local_flush_tlb_page(vma, uaddr); 159 local_flush_tlb_page(vma, uaddr);
160 broadcast_tlb_mm_a15_erratum(vma->vm_mm);
98} 161}
99 162
100void flush_tlb_kernel_page(unsigned long kaddr) 163void flush_tlb_kernel_page(unsigned long kaddr)
@@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
105 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); 168 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
106 } else 169 } else
107 local_flush_tlb_kernel_page(kaddr); 170 local_flush_tlb_kernel_page(kaddr);
171 broadcast_tlb_a15_erratum();
108} 172}
109 173
110void flush_tlb_range(struct vm_area_struct *vma, 174void flush_tlb_range(struct vm_area_struct *vma,
@@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
119 &ta, 1); 183 &ta, 1);
120 } else 184 } else
121 local_flush_tlb_range(vma, start, end); 185 local_flush_tlb_range(vma, start, end);
186 broadcast_tlb_mm_a15_erratum(vma->vm_mm);
122} 187}
123 188
124void flush_tlb_kernel_range(unsigned long start, unsigned long end) 189void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
130 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); 195 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
131 } else 196 } else
132 local_flush_tlb_kernel_range(start, end); 197 local_flush_tlb_kernel_range(start, end);
198 broadcast_tlb_a15_erratum();
133} 199}
134 200
135void flush_bp_all(void) 201void flush_bp_all(void)
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index 30ae6bb4a310..f50f19e5c138 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -17,7 +17,6 @@
17#include <asm/mach/map.h> 17#include <asm/mach/map.h>
18#include <asm/memory.h> 18#include <asm/memory.h>
19#include <asm/system_info.h> 19#include <asm/system_info.h>
20#include "tcm.h"
21 20
22static struct gen_pool *tcm_pool; 21static struct gen_pool *tcm_pool;
23static bool dtcm_present; 22static bool dtcm_present;
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 79282ebcd939..f10316b4ecdc 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -100,7 +100,7 @@ static void __init parse_dt_topology(void)
100 int alloc_size, cpu = 0; 100 int alloc_size, cpu = 0;
101 101
102 alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); 102 alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
103 cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT); 103 cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
104 104
105 while ((cn = of_find_node_by_type(cn, "cpu"))) { 105 while ((cn = of_find_node_by_type(cn, "cpu"))) {
106 const u32 *rate, *reg; 106 const u32 *rate, *reg;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 5a936988eb24..842098d78f58 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext)
201 break; 201 break;
202 case KVM_CAP_ARM_SET_DEVICE_ADDR: 202 case KVM_CAP_ARM_SET_DEVICE_ADDR:
203 r = 1; 203 r = 1;
204 break;
204 case KVM_CAP_NR_VCPUS: 205 case KVM_CAP_NR_VCPUS:
205 r = num_online_cpus(); 206 r = num_online_cpus();
206 break; 207 break;
@@ -613,7 +614,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
613 614
614 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) 615 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
615 || !arm_exit_handlers[hsr_ec]) { 616 || !arm_exit_handlers[hsr_ec]) {
616 kvm_err("Unkown exception class: %#08lx, " 617 kvm_err("Unknown exception class: %#08lx, "
617 "hsr: %#08x\n", hsr_ec, 618 "hsr: %#08x\n", hsr_ec,
618 (unsigned int)vcpu->arch.hsr); 619 (unsigned int)vcpu->arch.hsr);
619 BUG(); 620 BUG();
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 4ea9a982269c..7bed7556077a 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
79 u32 val; 79 u32 val;
80 int cpu; 80 int cpu;
81 81
82 cpu = get_cpu();
83
84 if (!p->is_write) 82 if (!p->is_write)
85 return read_from_write_only(vcpu, p); 83 return read_from_write_only(vcpu, p);
86 84
85 cpu = get_cpu();
86
87 cpumask_setall(&vcpu->arch.require_dcache_flush); 87 cpumask_setall(&vcpu->arch.require_dcache_flush);
88 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); 88 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
89 89
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
index c9a17316e9fe..0e4cfe123b38 100644
--- a/arch/arm/kvm/vgic.c
+++ b/arch/arm/kvm/vgic.c
@@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
883 lr, irq, vgic_cpu->vgic_lr[lr]); 883 lr, irq, vgic_cpu->vgic_lr[lr]);
884 BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 884 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
885 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; 885 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
886 886 return true;
887 goto out;
888 } 887 }
889 888
890 /* Try to use another LR for this interrupt */ 889 /* Try to use another LR for this interrupt */
@@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
898 vgic_cpu->vgic_irq_lr_map[irq] = lr; 897 vgic_cpu->vgic_irq_lr_map[irq] = lr;
899 set_bit(lr, vgic_cpu->lr_used); 898 set_bit(lr, vgic_cpu->lr_used);
900 899
901out:
902 if (!vgic_irq_is_edge(vcpu, irq)) 900 if (!vgic_irq_is_edge(vcpu, irq))
903 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; 901 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
904 902
@@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1018 1016
1019 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); 1017 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
1020 1018
1021 /*
1022 * We do not need to take the distributor lock here, since the only
1023 * action we perform is clearing the irq_active_bit for an EOIed
1024 * level interrupt. There is a potential race with
1025 * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
1026 * check if the interrupt is already active. Two possibilities:
1027 *
1028 * - The queuing is occurring on the same vcpu: cannot happen,
1029 * as we're already in the context of this vcpu, and
1030 * executing the handler
1031 * - The interrupt has been migrated to another vcpu, and we
1032 * ignore this interrupt for this run. Big deal. It is still
1033 * pending though, and will get considered when this vcpu
1034 * exits.
1035 */
1036 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { 1019 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
1037 /* 1020 /*
1038 * Some level interrupts have been EOIed. Clear their 1021 * Some level interrupts have been EOIed. Clear their
@@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1054 } else { 1037 } else {
1055 vgic_cpu_irq_clear(vcpu, irq); 1038 vgic_cpu_irq_clear(vcpu, irq);
1056 } 1039 }
1040
1041 /*
1042 * Despite being EOIed, the LR may not have
1043 * been marked as empty.
1044 */
1045 set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
1046 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
1057 } 1047 }
1058 } 1048 }
1059 1049
@@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1064} 1054}
1065 1055
1066/* 1056/*
1067 * Sync back the VGIC state after a guest run. We do not really touch 1057 * Sync back the VGIC state after a guest run. The distributor lock is
1068 * the distributor here (the irq_pending_on_cpu bit is safe to set), 1058 * needed so we don't get preempted in the middle of the state processing.
1069 * so there is no need for taking its lock.
1070 */ 1059 */
1071static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1060static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1072{ 1061{
@@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1112 1101
1113void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1102void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1114{ 1103{
1104 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1105
1115 if (!irqchip_in_kernel(vcpu->kvm)) 1106 if (!irqchip_in_kernel(vcpu->kvm))
1116 return; 1107 return;
1117 1108
1109 spin_lock(&dist->lock);
1118 __kvm_vgic_sync_hwstate(vcpu); 1110 __kvm_vgic_sync_hwstate(vcpu);
1111 spin_unlock(&dist->lock);
1119} 1112}
1120 1113
1121int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) 1114int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 6b93f6a1a3c7..64dbfa57204a 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles)
58static void __timer_const_udelay(unsigned long xloops) 58static void __timer_const_udelay(unsigned long xloops)
59{ 59{
60 unsigned long long loops = xloops; 60 unsigned long long loops = xloops;
61 loops *= loops_per_jiffy; 61 loops *= arm_delay_ops.ticks_per_jiffy;
62 __timer_delay(loops >> UDELAY_SHIFT); 62 __timer_delay(loops >> UDELAY_SHIFT);
63} 63}
64 64
@@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
73 pr_info("Switching to timer-based delay loop\n"); 73 pr_info("Switching to timer-based delay loop\n");
74 delay_timer = timer; 74 delay_timer = timer;
75 lpj_fine = timer->freq / HZ; 75 lpj_fine = timer->freq / HZ;
76 loops_per_jiffy = lpj_fine; 76
77 /* cpufreq may scale loops_per_jiffy, so keep a private copy */
78 arm_delay_ops.ticks_per_jiffy = lpj_fine;
77 arm_delay_ops.delay = __timer_delay; 79 arm_delay_ops.delay = __timer_delay;
78 arm_delay_ops.const_udelay = __timer_const_udelay; 80 arm_delay_ops.const_udelay = __timer_const_udelay;
79 arm_delay_ops.udelay = __timer_udelay; 81 arm_delay_ops.udelay = __timer_udelay;
80 arm_delay_ops.const_clock = true; 82
81 delay_calibrated = true; 83 delay_calibrated = true;
82 } else { 84 } else {
83 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); 85 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index d912e7397ecc..94b0650ea98f 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -14,31 +14,15 @@
14 14
15 .text 15 .text
16 .align 5 16 .align 5
17 .word 0
18
191: subs r2, r2, #4 @ 1 do we have enough
20 blt 5f @ 1 bytes to align with?
21 cmp r3, #2 @ 1
22 strltb r1, [ip], #1 @ 1
23 strleb r1, [ip], #1 @ 1
24 strb r1, [ip], #1 @ 1
25 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
26/*
27 * The pointer is now aligned and the length is adjusted. Try doing the
28 * memset again.
29 */
30 17
31ENTRY(memset) 18ENTRY(memset)
32/* 19 ands r3, r0, #3 @ 1 unaligned?
33 * Preserve the contents of r0 for the return value. 20 mov ip, r0 @ preserve r0 as return value
34 */ 21 bne 6f @ 1
35 mov ip, r0
36 ands r3, ip, #3 @ 1 unaligned?
37 bne 1b @ 1
38/* 22/*
39 * we know that the pointer in ip is aligned to a word boundary. 23 * we know that the pointer in ip is aligned to a word boundary.
40 */ 24 */
41 orr r1, r1, r1, lsl #8 251: orr r1, r1, r1, lsl #8
42 orr r1, r1, r1, lsl #16 26 orr r1, r1, r1, lsl #16
43 mov r3, r1 27 mov r3, r1
44 cmp r2, #16 28 cmp r2, #16
@@ -127,4 +111,13 @@ ENTRY(memset)
127 tst r2, #1 111 tst r2, #1
128 strneb r1, [ip], #1 112 strneb r1, [ip], #1
129 mov pc, lr 113 mov pc, lr
114
1156: subs r2, r2, #4 @ 1 do we have enough
116 blt 5b @ 1 bytes to align with?
117 cmp r3, #2 @ 1
118 strltb r1, [ip], #1 @ 1
119 strleb r1, [ip], #1 @ 1
120 strb r1, [ip], #1 @ 1
121 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
122 b 1b
130ENDPROC(memset) 123ENDPROC(memset)
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index b67cd5374117..44199bc2c665 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -232,6 +232,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
232 CLKDEV_CON_DEV_ID("t2_clk", "fffdc000.timer", &tc5_clk), 232 CLKDEV_CON_DEV_ID("t2_clk", "fffdc000.timer", &tc5_clk),
233 CLKDEV_CON_DEV_ID("hclk", "500000.ohci", &ohci_clk), 233 CLKDEV_CON_DEV_ID("hclk", "500000.ohci", &ohci_clk),
234 CLKDEV_CON_DEV_ID("mci_clk", "fffa8000.mmc", &mmc_clk), 234 CLKDEV_CON_DEV_ID("mci_clk", "fffa8000.mmc", &mmc_clk),
235 CLKDEV_CON_DEV_ID("spi_clk", "fffc8000.spi", &spi0_clk),
236 CLKDEV_CON_DEV_ID("spi_clk", "fffcc000.spi", &spi1_clk),
235 /* fake hclk clock */ 237 /* fake hclk clock */
236 CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk), 238 CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
237 CLKDEV_CON_ID("pioA", &pioA_clk), 239 CLKDEV_CON_ID("pioA", &pioA_clk),
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index d3addee43d8d..2ec5efea3f03 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -262,6 +262,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
262 CLKDEV_CON_DEV_ID("mci_clk", "fffd0000.mmc", &mmc1_clk), 262 CLKDEV_CON_DEV_ID("mci_clk", "fffd0000.mmc", &mmc1_clk),
263 CLKDEV_CON_DEV_ID(NULL, "fff84000.i2c", &twi0_clk), 263 CLKDEV_CON_DEV_ID(NULL, "fff84000.i2c", &twi0_clk),
264 CLKDEV_CON_DEV_ID(NULL, "fff88000.i2c", &twi1_clk), 264 CLKDEV_CON_DEV_ID(NULL, "fff88000.i2c", &twi1_clk),
265 CLKDEV_CON_DEV_ID("spi_clk", "fffa4000.spi", &spi0_clk),
266 CLKDEV_CON_DEV_ID("spi_clk", "fffa8000.spi", &spi1_clk),
265 /* fake hclk clock */ 267 /* fake hclk clock */
266 CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &uhphs_clk), 268 CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &uhphs_clk),
267 CLKDEV_CON_DEV_ID(NULL, "fffff200.gpio", &pioA_clk), 269 CLKDEV_CON_DEV_ID(NULL, "fffff200.gpio", &pioA_clk),
diff --git a/arch/arm/mach-at91/at91sam9n12.c b/arch/arm/mach-at91/at91sam9n12.c
index 5dfc8fd87103..ccd078355eed 100644
--- a/arch/arm/mach-at91/at91sam9n12.c
+++ b/arch/arm/mach-at91/at91sam9n12.c
@@ -172,6 +172,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
172 CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma_clk), 172 CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma_clk),
173 CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk), 173 CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk),
174 CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk), 174 CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk),
175 CLKDEV_CON_DEV_ID("spi_clk", "f0000000.spi", &spi0_clk),
176 CLKDEV_CON_DEV_ID("spi_clk", "f0004000.spi", &spi1_clk),
175 CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk), 177 CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk),
176 CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk), 178 CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk),
177 CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk), 179 CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk),
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 44a9a62dcc13..a200d8a17123 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -237,6 +237,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
237 CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk), 237 CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk),
238 CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk), 238 CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk),
239 CLKDEV_CON_DEV_ID(NULL, "f8018000.i2c", &twi2_clk), 239 CLKDEV_CON_DEV_ID(NULL, "f8018000.i2c", &twi2_clk),
240 CLKDEV_CON_DEV_ID("spi_clk", "f0000000.spi", &spi0_clk),
241 CLKDEV_CON_DEV_ID("spi_clk", "f0004000.spi", &spi1_clk),
240 CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk), 242 CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk),
241 CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk), 243 CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk),
242 CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk), 244 CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk),
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
index 2ea7059b840b..c20a870ea9c9 100644
--- a/arch/arm/mach-at91/board-foxg20.c
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -176,6 +176,7 @@ static struct w1_gpio_platform_data w1_gpio_pdata = {
176 /* If you choose to use a pin other than PB16 it needs to be 3.3V */ 176 /* If you choose to use a pin other than PB16 it needs to be 3.3V */
177 .pin = AT91_PIN_PB16, 177 .pin = AT91_PIN_PB16,
178 .is_open_drain = 1, 178 .is_open_drain = 1,
179 .ext_pullup_enable_pin = -EINVAL,
179}; 180};
180 181
181static struct platform_device w1_device = { 182static struct platform_device w1_device = {
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
index a033b8df9fb2..869cbecf00b7 100644
--- a/arch/arm/mach-at91/board-stamp9g20.c
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -188,6 +188,7 @@ static struct spi_board_info portuxg20_spi_devices[] = {
188static struct w1_gpio_platform_data w1_gpio_pdata = { 188static struct w1_gpio_platform_data w1_gpio_pdata = {
189 .pin = AT91_PIN_PA29, 189 .pin = AT91_PIN_PA29,
190 .is_open_drain = 1, 190 .is_open_drain = 1,
191 .ext_pullup_enable_pin = -EINVAL,
191}; 192};
192 193
193static struct platform_device w1_device = { 194static struct platform_device w1_device = {
diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h
index eed465ab0dd7..5fc23771c154 100644
--- a/arch/arm/mach-at91/include/mach/gpio.h
+++ b/arch/arm/mach-at91/include/mach/gpio.h
@@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin);
209extern void at91_gpio_suspend(void); 209extern void at91_gpio_suspend(void);
210extern void at91_gpio_resume(void); 210extern void at91_gpio_resume(void);
211 211
212#ifdef CONFIG_PINCTRL_AT91
213extern void at91_pinctrl_gpio_suspend(void);
214extern void at91_pinctrl_gpio_resume(void);
215#else
216static inline void at91_pinctrl_gpio_suspend(void) {}
217static inline void at91_pinctrl_gpio_resume(void) {}
218#endif
219
212#endif /* __ASSEMBLY__ */ 220#endif /* __ASSEMBLY__ */
213 221
214#endif 222#endif
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index 8e210262aeee..e0ca59171022 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value)
92 92
93void at91_irq_suspend(void) 93void at91_irq_suspend(void)
94{ 94{
95 int i = 0, bit; 95 int bit = -1;
96 96
97 if (has_aic5()) { 97 if (has_aic5()) {
98 /* disable enabled irqs */ 98 /* disable enabled irqs */
99 while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { 99 while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
100 at91_aic_write(AT91_AIC5_SSR, 100 at91_aic_write(AT91_AIC5_SSR,
101 bit & AT91_AIC5_INTSEL_MSK); 101 bit & AT91_AIC5_INTSEL_MSK);
102 at91_aic_write(AT91_AIC5_IDCR, 1); 102 at91_aic_write(AT91_AIC5_IDCR, 1);
103 i = bit;
104 } 103 }
105 /* enable wakeup irqs */ 104 /* enable wakeup irqs */
106 i = 0; 105 bit = -1;
107 while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { 106 while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
108 at91_aic_write(AT91_AIC5_SSR, 107 at91_aic_write(AT91_AIC5_SSR,
109 bit & AT91_AIC5_INTSEL_MSK); 108 bit & AT91_AIC5_INTSEL_MSK);
110 at91_aic_write(AT91_AIC5_IECR, 1); 109 at91_aic_write(AT91_AIC5_IECR, 1);
111 i = bit;
112 } 110 }
113 } else { 111 } else {
114 at91_aic_write(AT91_AIC_IDCR, *backups); 112 at91_aic_write(AT91_AIC_IDCR, *backups);
@@ -118,23 +116,21 @@ void at91_irq_suspend(void)
118 116
119void at91_irq_resume(void) 117void at91_irq_resume(void)
120{ 118{
121 int i = 0, bit; 119 int bit = -1;
122 120
123 if (has_aic5()) { 121 if (has_aic5()) {
124 /* disable wakeup irqs */ 122 /* disable wakeup irqs */
125 while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { 123 while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
126 at91_aic_write(AT91_AIC5_SSR, 124 at91_aic_write(AT91_AIC5_SSR,
127 bit & AT91_AIC5_INTSEL_MSK); 125 bit & AT91_AIC5_INTSEL_MSK);
128 at91_aic_write(AT91_AIC5_IDCR, 1); 126 at91_aic_write(AT91_AIC5_IDCR, 1);
129 i = bit;
130 } 127 }
131 /* enable irqs disabled for suspend */ 128 /* enable irqs disabled for suspend */
132 i = 0; 129 bit = -1;
133 while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { 130 while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
134 at91_aic_write(AT91_AIC5_SSR, 131 at91_aic_write(AT91_AIC5_SSR,
135 bit & AT91_AIC5_INTSEL_MSK); 132 bit & AT91_AIC5_INTSEL_MSK);
136 at91_aic_write(AT91_AIC5_IECR, 1); 133 at91_aic_write(AT91_AIC5_IECR, 1);
137 i = bit;
138 } 134 }
139 } else { 135 } else {
140 at91_aic_write(AT91_AIC_IDCR, *wakeups); 136 at91_aic_write(AT91_AIC_IDCR, *wakeups);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index adb6db888a1f..73f1f250403a 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz;
201 201
202static int at91_pm_enter(suspend_state_t state) 202static int at91_pm_enter(suspend_state_t state)
203{ 203{
204 at91_gpio_suspend(); 204 if (of_have_populated_dt())
205 at91_pinctrl_gpio_suspend();
206 else
207 at91_gpio_suspend();
205 at91_irq_suspend(); 208 at91_irq_suspend();
206 209
207 pr_debug("AT91: PM - wake mask %08x, pm state %d\n", 210 pr_debug("AT91: PM - wake mask %08x, pm state %d\n",
@@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state)
286error: 289error:
287 target_state = PM_SUSPEND_ON; 290 target_state = PM_SUSPEND_ON;
288 at91_irq_resume(); 291 at91_irq_resume();
289 at91_gpio_resume(); 292 if (of_have_populated_dt())
293 at91_pinctrl_gpio_resume();
294 else
295 at91_gpio_resume();
290 return 0; 296 return 0;
291} 297}
292 298
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index bf02471d7e7c..f11289519c39 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -6,6 +6,7 @@ config ARCH_BCM
6 select ARM_ERRATA_764369 if SMP 6 select ARM_ERRATA_764369 if SMP
7 select ARM_GIC 7 select ARM_GIC
8 select CPU_V7 8 select CPU_V7
9 select CLKSRC_OF
9 select GENERIC_CLOCKEVENTS 10 select GENERIC_CLOCKEVENTS
10 select GENERIC_TIME 11 select GENERIC_TIME
11 select GPIO_BCM 12 select GPIO_BCM
diff --git a/arch/arm/mach-bcm/board_bcm.c b/arch/arm/mach-bcm/board_bcm.c
index f0f9abafad29..259593540477 100644
--- a/arch/arm/mach-bcm/board_bcm.c
+++ b/arch/arm/mach-bcm/board_bcm.c
@@ -16,14 +16,11 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/irqchip.h> 18#include <linux/irqchip.h>
19#include <linux/clocksource.h>
19 20
20#include <asm/mach/arch.h> 21#include <asm/mach/arch.h>
21#include <asm/mach/time.h> 22#include <asm/mach/time.h>
22 23
23static void timer_init(void)
24{
25}
26
27 24
28static void __init board_init(void) 25static void __init board_init(void)
29{ 26{
@@ -35,7 +32,7 @@ static const char * const bcm11351_dt_compat[] = { "bcm,bcm11351", NULL, };
35 32
36DT_MACHINE_START(BCM11351_DT, "Broadcom Application Processor") 33DT_MACHINE_START(BCM11351_DT, "Broadcom Application Processor")
37 .init_irq = irqchip_init, 34 .init_irq = irqchip_init,
38 .init_time = timer_init, 35 .init_time = clocksource_of_init,
39 .init_machine = board_init, 36 .init_machine = board_init,
40 .dt_compat = bcm11351_dt_compat, 37 .dt_compat = bcm11351_dt_compat,
41MACHINE_END 38MACHINE_END
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index e698f26cc0cb..52e4bb5cf12d 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -22,19 +22,9 @@
22 22
23static struct map_desc cns3xxx_io_desc[] __initdata = { 23static struct map_desc cns3xxx_io_desc[] __initdata = {
24 { 24 {
25 .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, 25 .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT,
26 .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), 26 .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE),
27 .length = SZ_4K, 27 .length = SZ_8K,
28 .type = MT_DEVICE,
29 }, {
30 .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
31 .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
32 .length = SZ_4K,
33 .type = MT_DEVICE,
34 }, {
35 .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
36 .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
37 .length = SZ_4K,
38 .type = MT_DEVICE, 28 .type = MT_DEVICE,
39 }, { 29 }, {
40 .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, 30 .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT,
diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
index 191c8e57f289..b1021aafa481 100644
--- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
+++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
@@ -94,10 +94,10 @@
94#define RTC_INTR_STS_OFFSET 0x34 94#define RTC_INTR_STS_OFFSET 0x34
95 95
96#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ 96#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */
97#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ 97#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */
98 98
99#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ 99#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */
100#define CNS3XXX_PM_BASE_VIRT 0xFFF08000 100#define CNS3XXX_PM_BASE_VIRT 0xFB001000
101 101
102#define PM_CLK_GATE_OFFSET 0x00 102#define PM_CLK_GATE_OFFSET 0x00
103#define PM_SOFT_RST_OFFSET 0x04 103#define PM_SOFT_RST_OFFSET 0x04
@@ -109,7 +109,7 @@
109#define PM_PLL_HM_PD_OFFSET 0x1C 109#define PM_PLL_HM_PD_OFFSET 0x1C
110 110
111#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ 111#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */
112#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 112#define CNS3XXX_UART0_BASE_VIRT 0xFB002000
113 113
114#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ 114#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */
115#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 115#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000
@@ -130,7 +130,7 @@
130#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 130#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000
131 131
132#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ 132#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */
133#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 133#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000
134 134
135#define TIMER1_COUNTER_OFFSET 0x00 135#define TIMER1_COUNTER_OFFSET 0x00
136#define TIMER1_AUTO_RELOAD_OFFSET 0x04 136#define TIMER1_AUTO_RELOAD_OFFSET 0x04
@@ -227,16 +227,16 @@
227 * Testchip peripheral and fpga gic regions 227 * Testchip peripheral and fpga gic regions
228 */ 228 */
229#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ 229#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */
230#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 230#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000
231 231
232#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ 232#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */
233#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 233#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100)
234 234
235#define CNS3XXX_TC11MP_TWD_BASE 0x90000600 235#define CNS3XXX_TC11MP_TWD_BASE 0x90000600
236#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 236#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600)
237 237
238#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ 238#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */
239#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 239#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000)
240 240
241#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ 241#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */
242#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 242#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 147b8e1a4407..886481c12173 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -242,6 +242,73 @@ static struct vpfe_config vpfe_cfg = {
242 .ccdc = "DM355 CCDC", 242 .ccdc = "DM355 CCDC",
243}; 243};
244 244
245/* venc standards timings */
246static struct vpbe_enc_mode_info dm355evm_enc_preset_timing[] = {
247 {
248 .name = "ntsc",
249 .timings_type = VPBE_ENC_STD,
250 .std_id = V4L2_STD_NTSC,
251 .interlaced = 1,
252 .xres = 720,
253 .yres = 480,
254 .aspect = {11, 10},
255 .fps = {30000, 1001},
256 .left_margin = 0x79,
257 .upper_margin = 0x10,
258 },
259 {
260 .name = "pal",
261 .timings_type = VPBE_ENC_STD,
262 .std_id = V4L2_STD_PAL,
263 .interlaced = 1,
264 .xres = 720,
265 .yres = 576,
266 .aspect = {54, 59},
267 .fps = {25, 1},
268 .left_margin = 0x7E,
269 .upper_margin = 0x16
270 },
271};
272
273#define VENC_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL)
274
275/*
276 * The outputs available from VPBE + ecnoders. Keep the
277 * the order same as that of encoders. First those from venc followed by that
278 * from encoders. Index in the output refers to index on a particular encoder.
279 * Driver uses this index to pass it to encoder when it supports more than
280 * one output. Application uses index of the array to set an output.
281 */
282static struct vpbe_output dm355evm_vpbe_outputs[] = {
283 {
284 .output = {
285 .index = 0,
286 .name = "Composite",
287 .type = V4L2_OUTPUT_TYPE_ANALOG,
288 .std = VENC_STD_ALL,
289 .capabilities = V4L2_OUT_CAP_STD,
290 },
291 .subdev_name = DM355_VPBE_VENC_SUBDEV_NAME,
292 .default_mode = "ntsc",
293 .num_modes = ARRAY_SIZE(dm355evm_enc_preset_timing),
294 .modes = dm355evm_enc_preset_timing,
295 .if_params = V4L2_MBUS_FMT_FIXED,
296 },
297};
298
299static struct vpbe_config dm355evm_display_cfg = {
300 .module_name = "dm355-vpbe-display",
301 .i2c_adapter_id = 1,
302 .osd = {
303 .module_name = DM355_VPBE_OSD_SUBDEV_NAME,
304 },
305 .venc = {
306 .module_name = DM355_VPBE_VENC_SUBDEV_NAME,
307 },
308 .num_outputs = ARRAY_SIZE(dm355evm_vpbe_outputs),
309 .outputs = dm355evm_vpbe_outputs,
310};
311
245static struct platform_device *davinci_evm_devices[] __initdata = { 312static struct platform_device *davinci_evm_devices[] __initdata = {
246 &dm355evm_dm9000, 313 &dm355evm_dm9000,
247 &davinci_nand_device, 314 &davinci_nand_device,
@@ -253,8 +320,6 @@ static struct davinci_uart_config uart_config __initdata = {
253 320
254static void __init dm355_evm_map_io(void) 321static void __init dm355_evm_map_io(void)
255{ 322{
256 /* setup input configuration for VPFE input devices */
257 dm355_set_vpfe_config(&vpfe_cfg);
258 dm355_init(); 323 dm355_init();
259} 324}
260 325
@@ -344,6 +409,8 @@ static __init void dm355_evm_init(void)
344 davinci_setup_mmc(0, &dm355evm_mmc_config); 409 davinci_setup_mmc(0, &dm355evm_mmc_config);
345 davinci_setup_mmc(1, &dm355evm_mmc_config); 410 davinci_setup_mmc(1, &dm355evm_mmc_config);
346 411
412 dm355_init_video(&vpfe_cfg, &dm355evm_display_cfg);
413
347 dm355_init_spi0(BIT(0), dm355_evm_spi_info, 414 dm355_init_spi0(BIT(0), dm355_evm_spi_info,
348 ARRAY_SIZE(dm355_evm_spi_info)); 415 ARRAY_SIZE(dm355_evm_spi_info));
349 416
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index c2d4958a0cb6..2a6674356585 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -27,6 +27,7 @@
27#include <linux/input.h> 27#include <linux/input.h>
28#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
29#include <linux/spi/eeprom.h> 29#include <linux/spi/eeprom.h>
30#include <linux/v4l2-dv-timings.h>
30 31
31#include <asm/mach-types.h> 32#include <asm/mach-types.h>
32#include <asm/mach/arch.h> 33#include <asm/mach/arch.h>
@@ -39,6 +40,7 @@
39#include <linux/platform_data/mtd-davinci.h> 40#include <linux/platform_data/mtd-davinci.h>
40#include <linux/platform_data/keyscan-davinci.h> 41#include <linux/platform_data/keyscan-davinci.h>
41 42
43#include <media/ths7303.h>
42#include <media/tvp514x.h> 44#include <media/tvp514x.h>
43 45
44#include "davinci.h" 46#include "davinci.h"
@@ -374,6 +376,166 @@ static struct vpfe_config vpfe_cfg = {
374 .ccdc = "ISIF", 376 .ccdc = "ISIF",
375}; 377};
376 378
379/* venc standards timings */
380static struct vpbe_enc_mode_info dm365evm_enc_std_timing[] = {
381 {
382 .name = "ntsc",
383 .timings_type = VPBE_ENC_STD,
384 .std_id = V4L2_STD_NTSC,
385 .interlaced = 1,
386 .xres = 720,
387 .yres = 480,
388 .aspect = {11, 10},
389 .fps = {30000, 1001},
390 .left_margin = 0x79,
391 .upper_margin = 0x10,
392 },
393 {
394 .name = "pal",
395 .timings_type = VPBE_ENC_STD,
396 .std_id = V4L2_STD_PAL,
397 .interlaced = 1,
398 .xres = 720,
399 .yres = 576,
400 .aspect = {54, 59},
401 .fps = {25, 1},
402 .left_margin = 0x7E,
403 .upper_margin = 0x16,
404 },
405};
406
407/* venc dv timings */
408static struct vpbe_enc_mode_info dm365evm_enc_preset_timing[] = {
409 {
410 .name = "480p59_94",
411 .timings_type = VPBE_ENC_DV_TIMINGS,
412 .dv_timings = V4L2_DV_BT_CEA_720X480P59_94,
413 .interlaced = 0,
414 .xres = 720,
415 .yres = 480,
416 .aspect = {1, 1},
417 .fps = {5994, 100},
418 .left_margin = 0x8F,
419 .upper_margin = 0x2D,
420 },
421 {
422 .name = "576p50",
423 .timings_type = VPBE_ENC_DV_TIMINGS,
424 .dv_timings = V4L2_DV_BT_CEA_720X576P50,
425 .interlaced = 0,
426 .xres = 720,
427 .yres = 576,
428 .aspect = {1, 1},
429 .fps = {50, 1},
430 .left_margin = 0x8C,
431 .upper_margin = 0x36,
432 },
433 {
434 .name = "720p60",
435 .timings_type = VPBE_ENC_DV_TIMINGS,
436 .dv_timings = V4L2_DV_BT_CEA_1280X720P60,
437 .interlaced = 0,
438 .xres = 1280,
439 .yres = 720,
440 .aspect = {1, 1},
441 .fps = {60, 1},
442 .left_margin = 0x117,
443 .right_margin = 70,
444 .upper_margin = 38,
445 .lower_margin = 3,
446 .hsync_len = 80,
447 .vsync_len = 5,
448 },
449 {
450 .name = "1080i60",
451 .timings_type = VPBE_ENC_DV_TIMINGS,
452 .dv_timings = V4L2_DV_BT_CEA_1920X1080I60,
453 .interlaced = 1,
454 .xres = 1920,
455 .yres = 1080,
456 .aspect = {1, 1},
457 .fps = {30, 1},
458 .left_margin = 0xc9,
459 .right_margin = 80,
460 .upper_margin = 30,
461 .lower_margin = 3,
462 .hsync_len = 88,
463 .vsync_len = 5,
464 },
465};
466
467#define VENC_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL)
468
469/*
470 * The outputs available from VPBE + ecnoders. Keep the
471 * the order same as that of encoders. First those from venc followed by that
472 * from encoders. Index in the output refers to index on a particular
473 * encoder.Driver uses this index to pass it to encoder when it supports more
474 * than one output. Application uses index of the array to set an output.
475 */
476static struct vpbe_output dm365evm_vpbe_outputs[] = {
477 {
478 .output = {
479 .index = 0,
480 .name = "Composite",
481 .type = V4L2_OUTPUT_TYPE_ANALOG,
482 .std = VENC_STD_ALL,
483 .capabilities = V4L2_OUT_CAP_STD,
484 },
485 .subdev_name = DM365_VPBE_VENC_SUBDEV_NAME,
486 .default_mode = "ntsc",
487 .num_modes = ARRAY_SIZE(dm365evm_enc_std_timing),
488 .modes = dm365evm_enc_std_timing,
489 .if_params = V4L2_MBUS_FMT_FIXED,
490 },
491 {
492 .output = {
493 .index = 1,
494 .name = "Component",
495 .type = V4L2_OUTPUT_TYPE_ANALOG,
496 .capabilities = V4L2_OUT_CAP_DV_TIMINGS,
497 },
498 .subdev_name = DM365_VPBE_VENC_SUBDEV_NAME,
499 .default_mode = "480p59_94",
500 .num_modes = ARRAY_SIZE(dm365evm_enc_preset_timing),
501 .modes = dm365evm_enc_preset_timing,
502 .if_params = V4L2_MBUS_FMT_FIXED,
503 },
504};
505
506/*
507 * Amplifiers on the board
508 */
509struct ths7303_platform_data ths7303_pdata = {
510 .ch_1 = 3,
511 .ch_2 = 3,
512 .ch_3 = 3,
513 .init_enable = 1,
514};
515
516static struct amp_config_info vpbe_amp = {
517 .module_name = "ths7303",
518 .is_i2c = 1,
519 .board_info = {
520 I2C_BOARD_INFO("ths7303", 0x2c),
521 .platform_data = &ths7303_pdata,
522 }
523};
524
525static struct vpbe_config dm365evm_display_cfg = {
526 .module_name = "dm365-vpbe-display",
527 .i2c_adapter_id = 1,
528 .amp = &vpbe_amp,
529 .osd = {
530 .module_name = DM365_VPBE_OSD_SUBDEV_NAME,
531 },
532 .venc = {
533 .module_name = DM365_VPBE_VENC_SUBDEV_NAME,
534 },
535 .num_outputs = ARRAY_SIZE(dm365evm_vpbe_outputs),
536 .outputs = dm365evm_vpbe_outputs,
537};
538
377static void __init evm_init_i2c(void) 539static void __init evm_init_i2c(void)
378{ 540{
379 davinci_init_i2c(&i2c_pdata); 541 davinci_init_i2c(&i2c_pdata);
@@ -564,8 +726,6 @@ static struct davinci_uart_config uart_config __initdata = {
564 726
565static void __init dm365_evm_map_io(void) 727static void __init dm365_evm_map_io(void)
566{ 728{
567 /* setup input configuration for VPFE input devices */
568 dm365_set_vpfe_config(&vpfe_cfg);
569 dm365_init(); 729 dm365_init();
570} 730}
571 731
@@ -597,6 +757,8 @@ static __init void dm365_evm_init(void)
597 757
598 davinci_setup_mmc(0, &dm365evm_mmc_config); 758 davinci_setup_mmc(0, &dm365evm_mmc_config);
599 759
760 dm365_init_video(&vpfe_cfg, &dm365evm_display_cfg);
761
600 /* maybe setup mmc1/etc ... _after_ mmc0 */ 762 /* maybe setup mmc1/etc ... _after_ mmc0 */
601 evm_init_cpld(); 763 evm_init_cpld();
602 764
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 71735e7797cc..745280d4144c 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -622,7 +622,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_std_timing[] = {
622 { 622 {
623 .name = "ntsc", 623 .name = "ntsc",
624 .timings_type = VPBE_ENC_STD, 624 .timings_type = VPBE_ENC_STD,
625 .std_id = V4L2_STD_525_60, 625 .std_id = V4L2_STD_NTSC,
626 .interlaced = 1, 626 .interlaced = 1,
627 .xres = 720, 627 .xres = 720,
628 .yres = 480, 628 .yres = 480,
@@ -634,7 +634,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_std_timing[] = {
634 { 634 {
635 .name = "pal", 635 .name = "pal",
636 .timings_type = VPBE_ENC_STD, 636 .timings_type = VPBE_ENC_STD,
637 .std_id = V4L2_STD_625_50, 637 .std_id = V4L2_STD_PAL,
638 .interlaced = 1, 638 .interlaced = 1,
639 .xres = 720, 639 .xres = 720,
640 .yres = 576, 640 .yres = 576,
@@ -649,7 +649,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_std_timing[] = {
649static struct vpbe_enc_mode_info dm644xevm_enc_preset_timing[] = { 649static struct vpbe_enc_mode_info dm644xevm_enc_preset_timing[] = {
650 { 650 {
651 .name = "480p59_94", 651 .name = "480p59_94",
652 .timings_type = VPBE_ENC_CUSTOM_TIMINGS, 652 .timings_type = VPBE_ENC_DV_TIMINGS,
653 .dv_timings = V4L2_DV_BT_CEA_720X480P59_94, 653 .dv_timings = V4L2_DV_BT_CEA_720X480P59_94,
654 .interlaced = 0, 654 .interlaced = 0,
655 .xres = 720, 655 .xres = 720,
@@ -661,7 +661,7 @@ static struct vpbe_enc_mode_info dm644xevm_enc_preset_timing[] = {
661 }, 661 },
662 { 662 {
663 .name = "576p50", 663 .name = "576p50",
664 .timings_type = VPBE_ENC_CUSTOM_TIMINGS, 664 .timings_type = VPBE_ENC_DV_TIMINGS,
665 .dv_timings = V4L2_DV_BT_CEA_720X576P50, 665 .dv_timings = V4L2_DV_BT_CEA_720X576P50,
666 .interlaced = 0, 666 .interlaced = 0,
667 .xres = 720, 667 .xres = 720,
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index de7adff324dc..fc4871ac1c2c 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -514,7 +514,7 @@ static const struct vpif_output dm6467_ch0_outputs[] = {
514 .index = 1, 514 .index = 1,
515 .name = "Component", 515 .name = "Component",
516 .type = V4L2_OUTPUT_TYPE_ANALOG, 516 .type = V4L2_OUTPUT_TYPE_ANALOG,
517 .capabilities = V4L2_OUT_CAP_CUSTOM_TIMINGS, 517 .capabilities = V4L2_OUT_CAP_DV_TIMINGS,
518 }, 518 },
519 .subdev_name = "adv7343", 519 .subdev_name = "adv7343",
520 .output_route = ADV7343_COMPONENT_ID, 520 .output_route = ADV7343_COMPONENT_ID,
diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h
index 12d544befcfa..1ab3df423dac 100644
--- a/arch/arm/mach-davinci/davinci.h
+++ b/arch/arm/mach-davinci/davinci.h
@@ -36,12 +36,19 @@
36#include <media/davinci/vpbe_osd.h> 36#include <media/davinci/vpbe_osd.h>
37 37
38#define DAVINCI_SYSTEM_MODULE_BASE 0x01c40000 38#define DAVINCI_SYSTEM_MODULE_BASE 0x01c40000
39#define SYSMOD_VDAC_CONFIG 0x2c
39#define SYSMOD_VIDCLKCTL 0x38 40#define SYSMOD_VIDCLKCTL 0x38
40#define SYSMOD_VPSS_CLKCTL 0x44 41#define SYSMOD_VPSS_CLKCTL 0x44
41#define SYSMOD_VDD3P3VPWDN 0x48 42#define SYSMOD_VDD3P3VPWDN 0x48
42#define SYSMOD_VSCLKDIS 0x6c 43#define SYSMOD_VSCLKDIS 0x6c
43#define SYSMOD_PUPDCTL1 0x7c 44#define SYSMOD_PUPDCTL1 0x7c
44 45
46/* VPSS CLKCTL bit definitions */
47#define VPSS_MUXSEL_EXTCLK_ENABLE BIT(1)
48#define VPSS_VENCCLKEN_ENABLE BIT(3)
49#define VPSS_DACCLKEN_ENABLE BIT(4)
50#define VPSS_PLLC2SYSCLK5_ENABLE BIT(5)
51
45extern void __iomem *davinci_sysmod_base; 52extern void __iomem *davinci_sysmod_base;
46#define DAVINCI_SYSMOD_VIRT(x) (davinci_sysmod_base + (x)) 53#define DAVINCI_SYSMOD_VIRT(x) (davinci_sysmod_base + (x))
47void davinci_map_sysmod(void); 54void davinci_map_sysmod(void);
@@ -74,7 +81,7 @@ void __init dm355_init(void);
74void dm355_init_spi0(unsigned chipselect_mask, 81void dm355_init_spi0(unsigned chipselect_mask,
75 const struct spi_board_info *info, unsigned len); 82 const struct spi_board_info *info, unsigned len);
76void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata); 83void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata);
77void dm355_set_vpfe_config(struct vpfe_config *cfg); 84int dm355_init_video(struct vpfe_config *, struct vpbe_config *);
78 85
79/* DM365 function declarations */ 86/* DM365 function declarations */
80void __init dm365_init(void); 87void __init dm365_init(void);
@@ -84,7 +91,7 @@ void __init dm365_init_ks(struct davinci_ks_platform_data *pdata);
84void __init dm365_init_rtc(void); 91void __init dm365_init_rtc(void);
85void dm365_init_spi0(unsigned chipselect_mask, 92void dm365_init_spi0(unsigned chipselect_mask,
86 const struct spi_board_info *info, unsigned len); 93 const struct spi_board_info *info, unsigned len);
87void dm365_set_vpfe_config(struct vpfe_config *cfg); 94int dm365_init_video(struct vpfe_config *, struct vpbe_config *);
88 95
89/* DM644x function declarations */ 96/* DM644x function declarations */
90void __init dm644x_init(void); 97void __init dm644x_init(void);
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index b49c3b77d55e..bf9a9d4ad9f5 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -35,6 +35,8 @@
35#include "asp.h" 35#include "asp.h"
36 36
37#define DM355_UART2_BASE (IO_PHYS + 0x206000) 37#define DM355_UART2_BASE (IO_PHYS + 0x206000)
38#define DM355_OSD_BASE (IO_PHYS + 0x70200)
39#define DM355_VENC_BASE (IO_PHYS + 0x70400)
38 40
39/* 41/*
40 * Device specific clocks 42 * Device specific clocks
@@ -345,8 +347,8 @@ static struct clk_lookup dm355_clks[] = {
345 CLK(NULL, "pll1_aux", &pll1_aux_clk), 347 CLK(NULL, "pll1_aux", &pll1_aux_clk),
346 CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp), 348 CLK(NULL, "pll1_sysclkbp", &pll1_sysclkbp),
347 CLK(NULL, "vpss_dac", &vpss_dac_clk), 349 CLK(NULL, "vpss_dac", &vpss_dac_clk),
348 CLK(NULL, "vpss_master", &vpss_master_clk), 350 CLK("vpss", "master", &vpss_master_clk),
349 CLK(NULL, "vpss_slave", &vpss_slave_clk), 351 CLK("vpss", "slave", &vpss_slave_clk),
350 CLK(NULL, "clkout1", &clkout1_clk), 352 CLK(NULL, "clkout1", &clkout1_clk),
351 CLK(NULL, "clkout2", &clkout2_clk), 353 CLK(NULL, "clkout2", &clkout2_clk),
352 CLK(NULL, "pll2", &pll2_clk), 354 CLK(NULL, "pll2", &pll2_clk),
@@ -744,11 +746,146 @@ static struct platform_device vpfe_capture_dev = {
744 }, 746 },
745}; 747};
746 748
747void dm355_set_vpfe_config(struct vpfe_config *cfg) 749static struct resource dm355_osd_resources[] = {
750 {
751 .start = DM355_OSD_BASE,
752 .end = DM355_OSD_BASE + 0x17f,
753 .flags = IORESOURCE_MEM,
754 },
755};
756
757static struct platform_device dm355_osd_dev = {
758 .name = DM355_VPBE_OSD_SUBDEV_NAME,
759 .id = -1,
760 .num_resources = ARRAY_SIZE(dm355_osd_resources),
761 .resource = dm355_osd_resources,
762 .dev = {
763 .dma_mask = &vpfe_capture_dma_mask,
764 .coherent_dma_mask = DMA_BIT_MASK(32),
765 },
766};
767
768static struct resource dm355_venc_resources[] = {
769 {
770 .start = IRQ_VENCINT,
771 .end = IRQ_VENCINT,
772 .flags = IORESOURCE_IRQ,
773 },
774 /* venc registers io space */
775 {
776 .start = DM355_VENC_BASE,
777 .end = DM355_VENC_BASE + 0x17f,
778 .flags = IORESOURCE_MEM,
779 },
780 /* VDAC config register io space */
781 {
782 .start = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG,
783 .end = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG + 3,
784 .flags = IORESOURCE_MEM,
785 },
786};
787
788static struct resource dm355_v4l2_disp_resources[] = {
789 {
790 .start = IRQ_VENCINT,
791 .end = IRQ_VENCINT,
792 .flags = IORESOURCE_IRQ,
793 },
794 /* venc registers io space */
795 {
796 .start = DM355_VENC_BASE,
797 .end = DM355_VENC_BASE + 0x17f,
798 .flags = IORESOURCE_MEM,
799 },
800};
801
802static int dm355_vpbe_setup_pinmux(enum v4l2_mbus_pixelcode if_type,
803 int field)
804{
805 switch (if_type) {
806 case V4L2_MBUS_FMT_SGRBG8_1X8:
807 davinci_cfg_reg(DM355_VOUT_FIELD_G70);
808 break;
809 case V4L2_MBUS_FMT_YUYV10_1X20:
810 if (field)
811 davinci_cfg_reg(DM355_VOUT_FIELD);
812 else
813 davinci_cfg_reg(DM355_VOUT_FIELD_G70);
814 break;
815 default:
816 return -EINVAL;
817 }
818
819 davinci_cfg_reg(DM355_VOUT_COUTL_EN);
820 davinci_cfg_reg(DM355_VOUT_COUTH_EN);
821
822 return 0;
823}
824
825static int dm355_venc_setup_clock(enum vpbe_enc_timings_type type,
826 unsigned int pclock)
748{ 827{
749 vpfe_capture_dev.dev.platform_data = cfg; 828 void __iomem *vpss_clk_ctrl_reg;
829
830 vpss_clk_ctrl_reg = DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL);
831
832 switch (type) {
833 case VPBE_ENC_STD:
834 writel(VPSS_DACCLKEN_ENABLE | VPSS_VENCCLKEN_ENABLE,
835 vpss_clk_ctrl_reg);
836 break;
837 case VPBE_ENC_DV_TIMINGS:
838 if (pclock > 27000000)
839 /*
840 * For HD, use external clock source since we cannot
841 * support HD mode with internal clocks.
842 */
843 writel(VPSS_MUXSEL_EXTCLK_ENABLE, vpss_clk_ctrl_reg);
844 break;
845 default:
846 return -EINVAL;
847 }
848
849 return 0;
750} 850}
751 851
852static struct platform_device dm355_vpbe_display = {
853 .name = "vpbe-v4l2",
854 .id = -1,
855 .num_resources = ARRAY_SIZE(dm355_v4l2_disp_resources),
856 .resource = dm355_v4l2_disp_resources,
857 .dev = {
858 .dma_mask = &vpfe_capture_dma_mask,
859 .coherent_dma_mask = DMA_BIT_MASK(32),
860 },
861};
862
863struct venc_platform_data dm355_venc_pdata = {
864 .setup_pinmux = dm355_vpbe_setup_pinmux,
865 .setup_clock = dm355_venc_setup_clock,
866};
867
868static struct platform_device dm355_venc_dev = {
869 .name = DM355_VPBE_VENC_SUBDEV_NAME,
870 .id = -1,
871 .num_resources = ARRAY_SIZE(dm355_venc_resources),
872 .resource = dm355_venc_resources,
873 .dev = {
874 .dma_mask = &vpfe_capture_dma_mask,
875 .coherent_dma_mask = DMA_BIT_MASK(32),
876 .platform_data = (void *)&dm355_venc_pdata,
877 },
878};
879
880static struct platform_device dm355_vpbe_dev = {
881 .name = "vpbe_controller",
882 .id = -1,
883 .dev = {
884 .dma_mask = &vpfe_capture_dma_mask,
885 .coherent_dma_mask = DMA_BIT_MASK(32),
886 },
887};
888
752/*----------------------------------------------------------------------*/ 889/*----------------------------------------------------------------------*/
753 890
754static struct map_desc dm355_io_desc[] = { 891static struct map_desc dm355_io_desc[] = {
@@ -868,19 +1005,36 @@ void __init dm355_init(void)
868 davinci_map_sysmod(); 1005 davinci_map_sysmod();
869} 1006}
870 1007
1008int __init dm355_init_video(struct vpfe_config *vpfe_cfg,
1009 struct vpbe_config *vpbe_cfg)
1010{
1011 if (vpfe_cfg || vpbe_cfg)
1012 platform_device_register(&dm355_vpss_device);
1013
1014 if (vpfe_cfg) {
1015 vpfe_capture_dev.dev.platform_data = vpfe_cfg;
1016 platform_device_register(&dm355_ccdc_dev);
1017 platform_device_register(&vpfe_capture_dev);
1018 }
1019
1020 if (vpbe_cfg) {
1021 dm355_vpbe_dev.dev.platform_data = vpbe_cfg;
1022 platform_device_register(&dm355_osd_dev);
1023 platform_device_register(&dm355_venc_dev);
1024 platform_device_register(&dm355_vpbe_dev);
1025 platform_device_register(&dm355_vpbe_display);
1026 }
1027
1028 return 0;
1029}
1030
871static int __init dm355_init_devices(void) 1031static int __init dm355_init_devices(void)
872{ 1032{
873 if (!cpu_is_davinci_dm355()) 1033 if (!cpu_is_davinci_dm355())
874 return 0; 1034 return 0;
875 1035
876 /* Add ccdc clock aliases */
877 clk_add_alias("master", dm355_ccdc_dev.name, "vpss_master", NULL);
878 clk_add_alias("slave", dm355_ccdc_dev.name, "vpss_master", NULL);
879 davinci_cfg_reg(DM355_INT_EDMA_CC); 1036 davinci_cfg_reg(DM355_INT_EDMA_CC);
880 platform_device_register(&dm355_edma_device); 1037 platform_device_register(&dm355_edma_device);
881 platform_device_register(&dm355_vpss_device);
882 platform_device_register(&dm355_ccdc_dev);
883 platform_device_register(&vpfe_capture_dev);
884 1038
885 return 0; 1039 return 0;
886} 1040}
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 6c3980540be0..ff771ceac3f1 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -39,16 +39,13 @@
39#include "asp.h" 39#include "asp.h"
40 40
41#define DM365_REF_FREQ 24000000 /* 24 MHz on the DM365 EVM */ 41#define DM365_REF_FREQ 24000000 /* 24 MHz on the DM365 EVM */
42
43/* Base of key scan register bank */
44#define DM365_KEYSCAN_BASE 0x01c69400
45
46#define DM365_RTC_BASE 0x01c69000 42#define DM365_RTC_BASE 0x01c69000
47 43#define DM365_KEYSCAN_BASE 0x01c69400
44#define DM365_OSD_BASE 0x01c71c00
45#define DM365_VENC_BASE 0x01c71e00
48#define DAVINCI_DM365_VC_BASE 0x01d0c000 46#define DAVINCI_DM365_VC_BASE 0x01d0c000
49#define DAVINCI_DMA_VC_TX 2 47#define DAVINCI_DMA_VC_TX 2
50#define DAVINCI_DMA_VC_RX 3 48#define DAVINCI_DMA_VC_RX 3
51
52#define DM365_EMAC_BASE 0x01d07000 49#define DM365_EMAC_BASE 0x01d07000
53#define DM365_EMAC_MDIO_BASE (DM365_EMAC_BASE + 0x4000) 50#define DM365_EMAC_MDIO_BASE (DM365_EMAC_BASE + 0x4000)
54#define DM365_EMAC_CNTRL_OFFSET 0x0000 51#define DM365_EMAC_CNTRL_OFFSET 0x0000
@@ -257,6 +254,12 @@ static struct clk vpss_master_clk = {
257 .flags = CLK_PSC, 254 .flags = CLK_PSC,
258}; 255};
259 256
257static struct clk vpss_slave_clk = {
258 .name = "vpss_slave",
259 .parent = &pll1_sysclk5,
260 .lpsc = DAVINCI_LPSC_VPSSSLV,
261};
262
260static struct clk arm_clk = { 263static struct clk arm_clk = {
261 .name = "arm_clk", 264 .name = "arm_clk",
262 .parent = &pll2_sysclk2, 265 .parent = &pll2_sysclk2,
@@ -449,7 +452,8 @@ static struct clk_lookup dm365_clks[] = {
449 CLK(NULL, "pll2_sysclk8", &pll2_sysclk8), 452 CLK(NULL, "pll2_sysclk8", &pll2_sysclk8),
450 CLK(NULL, "pll2_sysclk9", &pll2_sysclk9), 453 CLK(NULL, "pll2_sysclk9", &pll2_sysclk9),
451 CLK(NULL, "vpss_dac", &vpss_dac_clk), 454 CLK(NULL, "vpss_dac", &vpss_dac_clk),
452 CLK(NULL, "vpss_master", &vpss_master_clk), 455 CLK("vpss", "master", &vpss_master_clk),
456 CLK("vpss", "slave", &vpss_slave_clk),
453 CLK(NULL, "arm", &arm_clk), 457 CLK(NULL, "arm", &arm_clk),
454 CLK(NULL, "uart0", &uart0_clk), 458 CLK(NULL, "uart0", &uart0_clk),
455 CLK(NULL, "uart1", &uart1_clk), 459 CLK(NULL, "uart1", &uart1_clk),
@@ -1226,6 +1230,173 @@ static struct platform_device dm365_isif_dev = {
1226 }, 1230 },
1227}; 1231};
1228 1232
1233static struct resource dm365_osd_resources[] = {
1234 {
1235 .start = DM365_OSD_BASE,
1236 .end = DM365_OSD_BASE + 0xff,
1237 .flags = IORESOURCE_MEM,
1238 },
1239};
1240
1241static u64 dm365_video_dma_mask = DMA_BIT_MASK(32);
1242
1243static struct platform_device dm365_osd_dev = {
1244 .name = DM365_VPBE_OSD_SUBDEV_NAME,
1245 .id = -1,
1246 .num_resources = ARRAY_SIZE(dm365_osd_resources),
1247 .resource = dm365_osd_resources,
1248 .dev = {
1249 .dma_mask = &dm365_video_dma_mask,
1250 .coherent_dma_mask = DMA_BIT_MASK(32),
1251 },
1252};
1253
1254static struct resource dm365_venc_resources[] = {
1255 {
1256 .start = IRQ_VENCINT,
1257 .end = IRQ_VENCINT,
1258 .flags = IORESOURCE_IRQ,
1259 },
1260 /* venc registers io space */
1261 {
1262 .start = DM365_VENC_BASE,
1263 .end = DM365_VENC_BASE + 0x177,
1264 .flags = IORESOURCE_MEM,
1265 },
1266 /* vdaccfg registers io space */
1267 {
1268 .start = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG,
1269 .end = DAVINCI_SYSTEM_MODULE_BASE + SYSMOD_VDAC_CONFIG + 3,
1270 .flags = IORESOURCE_MEM,
1271 },
1272};
1273
1274static struct resource dm365_v4l2_disp_resources[] = {
1275 {
1276 .start = IRQ_VENCINT,
1277 .end = IRQ_VENCINT,
1278 .flags = IORESOURCE_IRQ,
1279 },
1280 /* venc registers io space */
1281 {
1282 .start = DM365_VENC_BASE,
1283 .end = DM365_VENC_BASE + 0x177,
1284 .flags = IORESOURCE_MEM,
1285 },
1286};
1287
1288static int dm365_vpbe_setup_pinmux(enum v4l2_mbus_pixelcode if_type,
1289 int field)
1290{
1291 switch (if_type) {
1292 case V4L2_MBUS_FMT_SGRBG8_1X8:
1293 davinci_cfg_reg(DM365_VOUT_FIELD_G81);
1294 davinci_cfg_reg(DM365_VOUT_COUTL_EN);
1295 davinci_cfg_reg(DM365_VOUT_COUTH_EN);
1296 break;
1297 case V4L2_MBUS_FMT_YUYV10_1X20:
1298 if (field)
1299 davinci_cfg_reg(DM365_VOUT_FIELD);
1300 else
1301 davinci_cfg_reg(DM365_VOUT_FIELD_G81);
1302 davinci_cfg_reg(DM365_VOUT_COUTL_EN);
1303 davinci_cfg_reg(DM365_VOUT_COUTH_EN);
1304 break;
1305 default:
1306 return -EINVAL;
1307 }
1308
1309 return 0;
1310}
1311
1312static int dm365_venc_setup_clock(enum vpbe_enc_timings_type type,
1313 unsigned int pclock)
1314{
1315 void __iomem *vpss_clkctl_reg;
1316 u32 val;
1317
1318 vpss_clkctl_reg = DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL);
1319
1320 switch (type) {
1321 case VPBE_ENC_STD:
1322 val = VPSS_VENCCLKEN_ENABLE | VPSS_DACCLKEN_ENABLE;
1323 break;
1324 case VPBE_ENC_DV_TIMINGS:
1325 if (pclock <= 27000000) {
1326 val = VPSS_VENCCLKEN_ENABLE | VPSS_DACCLKEN_ENABLE;
1327 } else {
1328 /* set sysclk4 to output 74.25 MHz from pll1 */
1329 val = VPSS_PLLC2SYSCLK5_ENABLE | VPSS_DACCLKEN_ENABLE |
1330 VPSS_VENCCLKEN_ENABLE;
1331 }
1332 break;
1333 default:
1334 return -EINVAL;
1335 }
1336 writel(val, vpss_clkctl_reg);
1337
1338 return 0;
1339}
1340
1341static struct platform_device dm365_vpbe_display = {
1342 .name = "vpbe-v4l2",
1343 .id = -1,
1344 .num_resources = ARRAY_SIZE(dm365_v4l2_disp_resources),
1345 .resource = dm365_v4l2_disp_resources,
1346 .dev = {
1347 .dma_mask = &dm365_video_dma_mask,
1348 .coherent_dma_mask = DMA_BIT_MASK(32),
1349 },
1350};
1351
1352struct venc_platform_data dm365_venc_pdata = {
1353 .setup_pinmux = dm365_vpbe_setup_pinmux,
1354 .setup_clock = dm365_venc_setup_clock,
1355};
1356
1357static struct platform_device dm365_venc_dev = {
1358 .name = DM365_VPBE_VENC_SUBDEV_NAME,
1359 .id = -1,
1360 .num_resources = ARRAY_SIZE(dm365_venc_resources),
1361 .resource = dm365_venc_resources,
1362 .dev = {
1363 .dma_mask = &dm365_video_dma_mask,
1364 .coherent_dma_mask = DMA_BIT_MASK(32),
1365 .platform_data = (void *)&dm365_venc_pdata,
1366 },
1367};
1368
1369static struct platform_device dm365_vpbe_dev = {
1370 .name = "vpbe_controller",
1371 .id = -1,
1372 .dev = {
1373 .dma_mask = &dm365_video_dma_mask,
1374 .coherent_dma_mask = DMA_BIT_MASK(32),
1375 },
1376};
1377
1378int __init dm365_init_video(struct vpfe_config *vpfe_cfg,
1379 struct vpbe_config *vpbe_cfg)
1380{
1381 if (vpfe_cfg || vpbe_cfg)
1382 platform_device_register(&dm365_vpss_device);
1383
1384 if (vpfe_cfg) {
1385 vpfe_capture_dev.dev.platform_data = vpfe_cfg;
1386 platform_device_register(&dm365_isif_dev);
1387 platform_device_register(&vpfe_capture_dev);
1388 }
1389 if (vpbe_cfg) {
1390 dm365_vpbe_dev.dev.platform_data = vpbe_cfg;
1391 platform_device_register(&dm365_osd_dev);
1392 platform_device_register(&dm365_venc_dev);
1393 platform_device_register(&dm365_vpbe_dev);
1394 platform_device_register(&dm365_vpbe_display);
1395 }
1396
1397 return 0;
1398}
1399
1229static int __init dm365_init_devices(void) 1400static int __init dm365_init_devices(void)
1230{ 1401{
1231 if (!cpu_is_davinci_dm365()) 1402 if (!cpu_is_davinci_dm365())
@@ -1239,16 +1410,6 @@ static int __init dm365_init_devices(void)
1239 clk_add_alias(NULL, dev_name(&dm365_mdio_device.dev), 1410 clk_add_alias(NULL, dev_name(&dm365_mdio_device.dev),
1240 NULL, &dm365_emac_device.dev); 1411 NULL, &dm365_emac_device.dev);
1241 1412
1242 /* Add isif clock alias */
1243 clk_add_alias("master", dm365_isif_dev.name, "vpss_master", NULL);
1244 platform_device_register(&dm365_vpss_device);
1245 platform_device_register(&dm365_isif_dev);
1246 platform_device_register(&vpfe_capture_dev);
1247 return 0; 1413 return 0;
1248} 1414}
1249postcore_initcall(dm365_init_devices); 1415postcore_initcall(dm365_init_devices);
1250
1251void dm365_set_vpfe_config(struct vpfe_config *cfg)
1252{
1253 vpfe_capture_dev.dev.platform_data = cfg;
1254}
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index db1dd92e00af..c2a9273330bf 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -300,8 +300,8 @@ static struct clk_lookup dm644x_clks[] = {
300 CLK(NULL, "dsp", &dsp_clk), 300 CLK(NULL, "dsp", &dsp_clk),
301 CLK(NULL, "arm", &arm_clk), 301 CLK(NULL, "arm", &arm_clk),
302 CLK(NULL, "vicp", &vicp_clk), 302 CLK(NULL, "vicp", &vicp_clk),
303 CLK(NULL, "vpss_master", &vpss_master_clk), 303 CLK("vpss", "master", &vpss_master_clk),
304 CLK(NULL, "vpss_slave", &vpss_slave_clk), 304 CLK("vpss", "slave", &vpss_slave_clk),
305 CLK(NULL, "arm", &arm_clk), 305 CLK(NULL, "arm", &arm_clk),
306 CLK(NULL, "uart0", &uart0_clk), 306 CLK(NULL, "uart0", &uart0_clk),
307 CLK(NULL, "uart1", &uart1_clk), 307 CLK(NULL, "uart1", &uart1_clk),
@@ -706,7 +706,7 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type,
706 v |= DM644X_VPSS_DACCLKEN; 706 v |= DM644X_VPSS_DACCLKEN;
707 writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); 707 writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
708 break; 708 break;
709 case VPBE_ENC_CUSTOM_TIMINGS: 709 case VPBE_ENC_DV_TIMINGS:
710 if (pclock <= 27000000) { 710 if (pclock <= 27000000) {
711 v |= DM644X_VPSS_DACCLKEN; 711 v |= DM644X_VPSS_DACCLKEN;
712 writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); 712 writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
@@ -901,11 +901,6 @@ int __init dm644x_init_video(struct vpfe_config *vpfe_cfg,
901 dm644x_vpfe_dev.dev.platform_data = vpfe_cfg; 901 dm644x_vpfe_dev.dev.platform_data = vpfe_cfg;
902 platform_device_register(&dm644x_ccdc_dev); 902 platform_device_register(&dm644x_ccdc_dev);
903 platform_device_register(&dm644x_vpfe_dev); 903 platform_device_register(&dm644x_vpfe_dev);
904 /* Add ccdc clock aliases */
905 clk_add_alias("master", dm644x_ccdc_dev.name,
906 "vpss_master", NULL);
907 clk_add_alias("slave", dm644x_ccdc_dev.name,
908 "vpss_slave", NULL);
909 } 904 }
910 905
911 if (vpbe_cfg) { 906 if (vpbe_cfg) {
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index a685e9706b7b..45b7c71d9cc1 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel);
743 */ 743 */
744int edma_alloc_slot(unsigned ctlr, int slot) 744int edma_alloc_slot(unsigned ctlr, int slot)
745{ 745{
746 if (!edma_cc[ctlr])
747 return -EINVAL;
748
746 if (slot >= 0) 749 if (slot >= 0)
747 slot = EDMA_CHAN_SLOT(slot); 750 slot = EDMA_CHAN_SLOT(slot);
748 751
diff --git a/arch/arm/mach-davinci/pm_domain.c b/arch/arm/mach-davinci/pm_domain.c
index c90250e3bef8..6b98413cebd6 100644
--- a/arch/arm/mach-davinci/pm_domain.c
+++ b/arch/arm/mach-davinci/pm_domain.c
@@ -53,7 +53,7 @@ static struct dev_pm_domain davinci_pm_domain = {
53 53
54static struct pm_clk_notifier_block platform_bus_notifier = { 54static struct pm_clk_notifier_block platform_bus_notifier = {
55 .pm_domain = &davinci_pm_domain, 55 .pm_domain = &davinci_pm_domain,
56 .con_ids = { "fck", NULL, }, 56 .con_ids = { "fck", "master", "slave", NULL },
57}; 57};
58 58
59static int __init davinci_pm_runtime_init(void) 59static int __init davinci_pm_runtime_init(void)
diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h
index d2afb4dd82ab..b5cc77d2380b 100644
--- a/arch/arm/mach-ep93xx/include/mach/uncompress.h
+++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h
@@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
47 47
48static inline void putc(int c) 48static inline void putc(int c)
49{ 49{
50 /* Transmit fifo not full? */ 50 int i;
51 while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF) 51
52 ; 52 for (i = 0; i < 10000; i++) {
53 /* Transmit fifo not full? */
54 if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF))
55 break;
56 }
53 57
54 __raw_writeb(c, PHYS_UART_DATA); 58 __raw_writeb(c, PHYS_UART_DATA);
55} 59}
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 497fcb793dc1..d28c7fbaba2d 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -97,6 +97,19 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
97static struct regulator_consumer_supply max8952_consumer = 97static struct regulator_consumer_supply max8952_consumer =
98 REGULATOR_SUPPLY("vdd_arm", NULL); 98 REGULATOR_SUPPLY("vdd_arm", NULL);
99 99
100static struct regulator_init_data universal_max8952_reg_data = {
101 .constraints = {
102 .name = "VARM_1.2V",
103 .min_uV = 770000,
104 .max_uV = 1400000,
105 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
106 .always_on = 1,
107 .boot_on = 1,
108 },
109 .num_consumer_supplies = 1,
110 .consumer_supplies = &max8952_consumer,
111};
112
100static struct max8952_platform_data universal_max8952_pdata __initdata = { 113static struct max8952_platform_data universal_max8952_pdata __initdata = {
101 .gpio_vid0 = EXYNOS4_GPX0(3), 114 .gpio_vid0 = EXYNOS4_GPX0(3),
102 .gpio_vid1 = EXYNOS4_GPX0(4), 115 .gpio_vid1 = EXYNOS4_GPX0(4),
@@ -105,19 +118,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = {
105 .dvs_mode = { 48, 32, 28, 18 }, /* 1.25, 1.20, 1.05, 0.95V */ 118 .dvs_mode = { 48, 32, 28, 18 }, /* 1.25, 1.20, 1.05, 0.95V */
106 .sync_freq = 0, /* default: fastest */ 119 .sync_freq = 0, /* default: fastest */
107 .ramp_speed = 0, /* default: fastest */ 120 .ramp_speed = 0, /* default: fastest */
108 121 .reg_data = &universal_max8952_reg_data,
109 .reg_data = {
110 .constraints = {
111 .name = "VARM_1.2V",
112 .min_uV = 770000,
113 .max_uV = 1400000,
114 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
115 .always_on = 1,
116 .boot_on = 1,
117 },
118 .num_consumer_supplies = 1,
119 .consumer_supplies = &max8952_consumer,
120 },
121}; 122};
122 123
123static struct regulator_consumer_supply lp3974_buck1_consumer = 124static struct regulator_consumer_supply lp3974_buck1_consumer =
diff --git a/arch/arm/mach-exynos/setup-usb-phy.c b/arch/arm/mach-exynos/setup-usb-phy.c
index b81cc569a8dd..6af40662a449 100644
--- a/arch/arm/mach-exynos/setup-usb-phy.c
+++ b/arch/arm/mach-exynos/setup-usb-phy.c
@@ -204,9 +204,9 @@ static int exynos4210_usb_phy1_exit(struct platform_device *pdev)
204 204
205int s5p_usb_phy_init(struct platform_device *pdev, int type) 205int s5p_usb_phy_init(struct platform_device *pdev, int type)
206{ 206{
207 if (type == S5P_USB_PHY_DEVICE) 207 if (type == USB_PHY_TYPE_DEVICE)
208 return exynos4210_usb_phy0_init(pdev); 208 return exynos4210_usb_phy0_init(pdev);
209 else if (type == S5P_USB_PHY_HOST) 209 else if (type == USB_PHY_TYPE_HOST)
210 return exynos4210_usb_phy1_init(pdev); 210 return exynos4210_usb_phy1_init(pdev);
211 211
212 return -EINVAL; 212 return -EINVAL;
@@ -214,9 +214,9 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type)
214 214
215int s5p_usb_phy_exit(struct platform_device *pdev, int type) 215int s5p_usb_phy_exit(struct platform_device *pdev, int type)
216{ 216{
217 if (type == S5P_USB_PHY_DEVICE) 217 if (type == USB_PHY_TYPE_DEVICE)
218 return exynos4210_usb_phy0_exit(pdev); 218 return exynos4210_usb_phy0_exit(pdev);
219 else if (type == S5P_USB_PHY_HOST) 219 else if (type == USB_PHY_TYPE_HOST)
220 return exynos4210_usb_phy1_exit(pdev); 220 return exynos4210_usb_phy1_exit(pdev);
221 221
222 return -EINVAL; 222 return -EINVAL;
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index abda5a18a664..0f2111a11315 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -67,6 +67,7 @@ config ARCH_NETWINDER
67 select ISA 67 select ISA
68 select ISA_DMA 68 select ISA_DMA
69 select PCI 69 select PCI
70 select VIRT_TO_BUS
70 help 71 help
71 Say Y here if you intend to run this kernel on the Rebel.COM 72 Say Y here if you intend to run this kernel on the Rebel.COM
72 NetWinder. Information about this machine can be found at: 73 NetWinder. Information about this machine can be found at:
diff --git a/arch/arm/mach-gemini/idle.c b/arch/arm/mach-gemini/idle.c
index 92bbd6bb600a..87dff4f5059e 100644
--- a/arch/arm/mach-gemini/idle.c
+++ b/arch/arm/mach-gemini/idle.c
@@ -13,9 +13,11 @@ static void gemini_idle(void)
13 * will never wakeup... Acctualy it is not very good to enable 13 * will never wakeup... Acctualy it is not very good to enable
14 * interrupts first since scheduler can miss a tick, but there is 14 * interrupts first since scheduler can miss a tick, but there is
15 * no other way around this. Platforms that needs it for power saving 15 * no other way around this. Platforms that needs it for power saving
16 * should call enable_hlt() in init code, since by default it is 16 * should enable it in init code, since by default it is
17 * disabled. 17 * disabled.
18 */ 18 */
19
20 /* FIXME: Enabling interrupts here is racy! */
19 local_irq_enable(); 21 local_irq_enable();
20 cpu_do_idle(); 22 cpu_do_idle();
21} 23}
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index 020852d3bdd8..6d8f6d1669ff 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -15,6 +15,8 @@
15#include <linux/stddef.h> 15#include <linux/stddef.h>
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/cpu.h>
19
18#include <asm/irq.h> 20#include <asm/irq.h>
19#include <asm/mach/irq.h> 21#include <asm/mach/irq.h>
20#include <asm/system_misc.h> 22#include <asm/system_misc.h>
@@ -77,7 +79,7 @@ void __init gemini_init_irq(void)
77 * Disable the idle handler by default since it is buggy 79 * Disable the idle handler by default since it is buggy
78 * For more info see arch/arm/mach-gemini/idle.c 80 * For more info see arch/arm/mach-gemini/idle.c
79 */ 81 */
80 disable_hlt(); 82 cpu_idle_poll_ctrl(true);
81 83
82 request_resource(&iomem_resource, &irq_resource); 84 request_resource(&iomem_resource, &irq_resource);
83 85
diff --git a/arch/arm/mach-highbank/hotplug.c b/arch/arm/mach-highbank/hotplug.c
index f30c52843396..890cae23c12a 100644
--- a/arch/arm/mach-highbank/hotplug.c
+++ b/arch/arm/mach-highbank/hotplug.c
@@ -28,13 +28,11 @@ extern void secondary_startup(void);
28 */ 28 */
29void __ref highbank_cpu_die(unsigned int cpu) 29void __ref highbank_cpu_die(unsigned int cpu)
30{ 30{
31 flush_cache_all();
32
33 highbank_set_cpu_jump(cpu, phys_to_virt(0)); 31 highbank_set_cpu_jump(cpu, phys_to_virt(0));
34 highbank_set_core_pwr();
35 32
36 cpu_do_idle(); 33 flush_cache_louis();
34 highbank_set_core_pwr();
37 35
38 /* We should never return from idle */ 36 while (1)
39 panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu); 37 cpu_do_idle();
40} 38}
diff --git a/arch/arm/mach-imx/clk-busy.c b/arch/arm/mach-imx/clk-busy.c
index 1ab91b5209e6..85b728cc27ab 100644
--- a/arch/arm/mach-imx/clk-busy.c
+++ b/arch/arm/mach-imx/clk-busy.c
@@ -169,7 +169,7 @@ struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
169 169
170 busy->mux.reg = reg; 170 busy->mux.reg = reg;
171 busy->mux.shift = shift; 171 busy->mux.shift = shift;
172 busy->mux.width = width; 172 busy->mux.mask = BIT(width) - 1;
173 busy->mux.lock = &imx_ccm_lock; 173 busy->mux.lock = &imx_ccm_lock;
174 busy->mux_ops = &clk_mux_ops; 174 busy->mux_ops = &clk_mux_ops;
175 175
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index 74e3a34d78b8..2193c834f55c 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -257,6 +257,7 @@ int __init mx35_clocks_init(void)
257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); 257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
258 clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); 258 clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
259 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); 259 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
260 clk_register_clkdev(clk[admux_gate], "audmux", NULL);
260 261
261 clk_prepare_enable(clk[spba_gate]); 262 clk_prepare_enable(clk[spba_gate]);
262 clk_prepare_enable(clk[gpio1_gate]); 263 clk_prepare_enable(clk[gpio1_gate]);
@@ -264,6 +265,8 @@ int __init mx35_clocks_init(void)
264 clk_prepare_enable(clk[gpio3_gate]); 265 clk_prepare_enable(clk[gpio3_gate]);
265 clk_prepare_enable(clk[iim_gate]); 266 clk_prepare_enable(clk[iim_gate]);
266 clk_prepare_enable(clk[emi_gate]); 267 clk_prepare_enable(clk[emi_gate]);
268 clk_prepare_enable(clk[max_gate]);
269 clk_prepare_enable(clk[iomuxc_gate]);
267 270
268 /* 271 /*
269 * SCC is needed to boot via mmc after a watchdog reset. The clock code 272 * SCC is needed to boot via mmc after a watchdog reset. The clock code
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 7b025ee528a5..d38e54f5b6d7 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m"
115static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; 115static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
116static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; 116static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
117static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; 117static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
118static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; 118static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
119static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; 119static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
120static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; 120static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
121static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; 121static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
@@ -172,7 +172,7 @@ static struct clk *clk[clk_max];
172static struct clk_onecell_data clk_data; 172static struct clk_onecell_data clk_data;
173 173
174static enum mx6q_clks const clks_init_on[] __initconst = { 174static enum mx6q_clks const clks_init_on[] __initconst = {
175 mmdc_ch0_axi, rom, 175 mmdc_ch0_axi, rom, pll1_sys,
176}; 176};
177 177
178static struct clk_div_table clk_enet_ref_table[] = { 178static struct clk_div_table clk_enet_ref_table[] = {
@@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void)
443 443
444 clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); 444 clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
445 clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); 445 clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
446 clk_register_clkdev(clk[twd], NULL, "smp_twd");
447 clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); 446 clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
448 clk_register_clkdev(clk[ahb], "ahb", NULL); 447 clk_register_clkdev(clk[ahb], "ahb", NULL);
449 clk_register_clkdev(clk[cko1], "cko1", NULL); 448 clk_register_clkdev(clk[cko1], "cko1", NULL);
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 5a800bfcec5b..5bf4a97ab241 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *);
110 110
111extern void imx_enable_cpu(int cpu, bool enable); 111extern void imx_enable_cpu(int cpu, bool enable);
112extern void imx_set_cpu_jump(int cpu, void *jump_addr); 112extern void imx_set_cpu_jump(int cpu, void *jump_addr);
113extern u32 imx_get_cpu_arg(int cpu);
114extern void imx_set_cpu_arg(int cpu, u32 arg);
113extern void v7_cpu_resume(void); 115extern void v7_cpu_resume(void);
114extern u32 *pl310_get_save_ptr(void); 116extern u32 *pl310_get_save_ptr(void);
115#ifdef CONFIG_SMP 117#ifdef CONFIG_SMP
diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S
index 921fc1555854..a58c8b0527cc 100644
--- a/arch/arm/mach-imx/headsmp.S
+++ b/arch/arm/mach-imx/headsmp.S
@@ -26,16 +26,16 @@ ENDPROC(v7_secondary_startup)
26 26
27#ifdef CONFIG_PM 27#ifdef CONFIG_PM
28/* 28/*
29 * The following code is located into the .data section. This is to 29 * The following code must assume it is running from physical address
30 * allow phys_l2x0_saved_regs to be accessed with a relative load 30 * where absolute virtual addresses to the data section have to be
31 * as we are running on physical address here. 31 * turned into relative ones.
32 */ 32 */
33 .data
34 .align
35 33
36#ifdef CONFIG_CACHE_L2X0 34#ifdef CONFIG_CACHE_L2X0
37 .macro pl310_resume 35 .macro pl310_resume
38 ldr r2, phys_l2x0_saved_regs 36 adr r0, l2x0_saved_regs_offset
37 ldr r2, [r0]
38 add r2, r2, r0
39 ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0 39 ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
40 ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value 40 ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value
41 str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl 41 str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl
@@ -43,9 +43,9 @@ ENDPROC(v7_secondary_startup)
43 str r1, [r0, #L2X0_CTRL] @ re-enable L2 43 str r1, [r0, #L2X0_CTRL] @ re-enable L2
44 .endm 44 .endm
45 45
46 .globl phys_l2x0_saved_regs 46l2x0_saved_regs_offset:
47phys_l2x0_saved_regs: 47 .word l2x0_saved_regs - .
48 .long 0 48
49#else 49#else
50 .macro pl310_resume 50 .macro pl310_resume
51 .endm 51 .endm
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 7bc5fe15dda2..361a253e2b63 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void)
46void imx_cpu_die(unsigned int cpu) 46void imx_cpu_die(unsigned int cpu)
47{ 47{
48 cpu_enter_lowpower(); 48 cpu_enter_lowpower();
49 /*
50 * We use the cpu jumping argument register to sync with
51 * imx_cpu_kill() which is running on cpu0 and waiting for
52 * the register being cleared to kill the cpu.
53 */
54 imx_set_cpu_arg(cpu, ~0);
49 cpu_do_idle(); 55 cpu_do_idle();
50} 56}
51 57
52int imx_cpu_kill(unsigned int cpu) 58int imx_cpu_kill(unsigned int cpu)
53{ 59{
60 unsigned long timeout = jiffies + msecs_to_jiffies(50);
61
62 while (imx_get_cpu_arg(cpu) == 0)
63 if (time_after(jiffies, timeout))
64 return 0;
54 imx_enable_cpu(cpu, false); 65 imx_enable_cpu(cpu, false);
66 imx_set_cpu_arg(cpu, 0);
55 return 1; 67 return 1;
56} 68}
diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/imx25-dt.c
index 03b65e5ea541..82348391582a 100644
--- a/arch/arm/mach-imx/imx25-dt.c
+++ b/arch/arm/mach-imx/imx25-dt.c
@@ -27,6 +27,11 @@ static const char * const imx25_dt_board_compat[] __initconst = {
27 NULL 27 NULL
28}; 28};
29 29
30static void __init imx25_timer_init(void)
31{
32 mx25_clocks_init_dt();
33}
34
30DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") 35DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
31 .map_io = mx25_map_io, 36 .map_io = mx25_map_io,
32 .init_early = imx25_init_early, 37 .init_early = imx25_init_early,
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index ee42d20cba19..5faba7a3c95f 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -22,8 +22,6 @@
22#include "common.h" 22#include "common.h"
23#include "hardware.h" 23#include "hardware.h"
24 24
25extern unsigned long phys_l2x0_saved_regs;
26
27static int imx6q_suspend_finish(unsigned long val) 25static int imx6q_suspend_finish(unsigned long val)
28{ 26{
29 cpu_do_idle(); 27 cpu_do_idle();
@@ -57,18 +55,5 @@ static const struct platform_suspend_ops imx6q_pm_ops = {
57 55
58void __init imx6q_pm_init(void) 56void __init imx6q_pm_init(void)
59{ 57{
60 /*
61 * The l2x0 core code provides an infrastucture to save and restore
62 * l2x0 registers across suspend/resume cycle. But because imx6q
63 * retains L2 content during suspend and needs to resume L2 before
64 * MMU is enabled, it can only utilize register saving support and
65 * have to take care of restoring on its own. So we save physical
66 * address of the data structure used by l2x0 core to save registers,
67 * and later restore the necessary ones in imx6q resume entry.
68 */
69#ifdef CONFIG_CACHE_L2X0
70 phys_l2x0_saved_regs = __pa(&l2x0_saved_regs);
71#endif
72
73 suspend_set_ops(&imx6q_pm_ops); 58 suspend_set_ops(&imx6q_pm_ops);
74} 59}
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index e15f1555c59b..09a742f8c7ab 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr)
43 src_base + SRC_GPR1 + cpu * 8); 43 src_base + SRC_GPR1 + cpu * 8);
44} 44}
45 45
46u32 imx_get_cpu_arg(int cpu)
47{
48 cpu = cpu_logical_map(cpu);
49 return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
50}
51
52void imx_set_cpu_arg(int cpu, u32 arg)
53{
54 cpu = cpu_logical_map(cpu);
55 writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
56}
57
46void imx_src_prepare_restart(void) 58void imx_src_prepare_restart(void)
47{ 59{
48 u32 val; 60 u32 val;
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 1dbeb7c99d58..6600cff6bd92 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -29,6 +29,7 @@
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/cpu.h>
32 33
33#include <mach/udc.h> 34#include <mach/udc.h>
34#include <mach/hardware.h> 35#include <mach/hardware.h>
@@ -239,7 +240,7 @@ void __init ixp4xx_init_irq(void)
239 * ixp4xx does not implement the XScale PWRMODE register 240 * ixp4xx does not implement the XScale PWRMODE register
240 * so it must not call cpu_do_idle(). 241 * so it must not call cpu_do_idle().
241 */ 242 */
242 disable_hlt(); 243 cpu_idle_poll_ctrl(true);
243 244
244 /* Route all sources to IRQ instead of FIQ */ 245 /* Route all sources to IRQ instead of FIQ */
245 *IXP4XX_ICLR = 0x0; 246 *IXP4XX_ICLR = 0x0;
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index d42730a1d4ab..d599e354ca57 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -163,6 +163,7 @@ static struct platform_device vulcan_max6369 = {
163 163
164static struct w1_gpio_platform_data vulcan_w1_gpio_pdata = { 164static struct w1_gpio_platform_data vulcan_w1_gpio_pdata = {
165 .pin = 14, 165 .pin = 14,
166 .ext_pullup_enable_pin = -EINVAL,
166}; 167};
167 168
168static struct platform_device vulcan_w1_gpio = { 169static struct platform_device vulcan_w1_gpio = {
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 2e73e9d53f70..d367aa6b47bb 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -41,16 +41,12 @@ static void __init kirkwood_legacy_clk_init(void)
41 41
42 struct device_node *np = of_find_compatible_node( 42 struct device_node *np = of_find_compatible_node(
43 NULL, NULL, "marvell,kirkwood-gating-clock"); 43 NULL, NULL, "marvell,kirkwood-gating-clock");
44
45 struct of_phandle_args clkspec; 44 struct of_phandle_args clkspec;
45 struct clk *clk;
46 46
47 clkspec.np = np; 47 clkspec.np = np;
48 clkspec.args_count = 1; 48 clkspec.args_count = 1;
49 49
50 clkspec.args[0] = CGC_BIT_GE0;
51 orion_clkdev_add(NULL, "mv643xx_eth_port.0",
52 of_clk_get_from_provider(&clkspec));
53
54 clkspec.args[0] = CGC_BIT_PEX0; 50 clkspec.args[0] = CGC_BIT_PEX0;
55 orion_clkdev_add("0", "pcie", 51 orion_clkdev_add("0", "pcie",
56 of_clk_get_from_provider(&clkspec)); 52 of_clk_get_from_provider(&clkspec));
@@ -59,9 +55,24 @@ static void __init kirkwood_legacy_clk_init(void)
59 orion_clkdev_add("1", "pcie", 55 orion_clkdev_add("1", "pcie",
60 of_clk_get_from_provider(&clkspec)); 56 of_clk_get_from_provider(&clkspec));
61 57
62 clkspec.args[0] = CGC_BIT_GE1; 58 clkspec.args[0] = CGC_BIT_SDIO;
63 orion_clkdev_add(NULL, "mv643xx_eth_port.1", 59 orion_clkdev_add(NULL, "mvsdio",
64 of_clk_get_from_provider(&clkspec)); 60 of_clk_get_from_provider(&clkspec));
61
62 /*
63 * The ethernet interfaces forget the MAC address assigned by
64 * u-boot if the clocks are turned off. Until proper DT support
65 * is available we always enable them for now.
66 */
67 clkspec.args[0] = CGC_BIT_GE0;
68 clk = of_clk_get_from_provider(&clkspec);
69 orion_clkdev_add(NULL, "mv643xx_eth_port.0", clk);
70 clk_prepare_enable(clk);
71
72 clkspec.args[0] = CGC_BIT_GE1;
73 clk = of_clk_get_from_provider(&clkspec);
74 orion_clkdev_add(NULL, "mv643xx_eth_port.1", clk);
75 clk_prepare_enable(clk);
65} 76}
66 77
67static void __init kirkwood_of_clk_init(void) 78static void __init kirkwood_of_clk_init(void)
diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
index f655b2637b0e..e5f70415905a 100644
--- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
+++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
@@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
20 .duplex = DUPLEX_FULL, 20 .duplex = DUPLEX_FULL,
21}; 21};
22 22
23static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = {
24 .phy_addr = MV643XX_ETH_PHY_ADDR(11),
25};
26
23void __init iomega_ix2_200_init(void) 27void __init iomega_ix2_200_init(void)
24{ 28{
25 /* 29 /*
26 * Basic setup. Needs to be called early. 30 * Basic setup. Needs to be called early.
27 */ 31 */
28 kirkwood_ge01_init(&iomega_ix2_200_ge00_data); 32 kirkwood_ge00_init(&iomega_ix2_200_ge00_data);
33 kirkwood_ge01_init(&iomega_ix2_200_ge01_data);
29} 34}
diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c
index 1c6e736cbbf8..08dd739aa709 100644
--- a/arch/arm/mach-kirkwood/guruplug-setup.c
+++ b/arch/arm/mach-kirkwood/guruplug-setup.c
@@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = {
53 53
54static struct mvsdio_platform_data guruplug_mvsdio_data = { 54static struct mvsdio_platform_data guruplug_mvsdio_data = {
55 /* unfortunately the CD signal has not been connected */ 55 /* unfortunately the CD signal has not been connected */
56 .gpio_card_detect = -1,
57 .gpio_write_protect = -1,
56}; 58};
57 59
58static struct gpio_led guruplug_led_pins[] = { 60static struct gpio_led guruplug_led_pins[] = {
diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c
index 8ddd69fdc937..6a6eb548307d 100644
--- a/arch/arm/mach-kirkwood/openrd-setup.c
+++ b/arch/arm/mach-kirkwood/openrd-setup.c
@@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = {
55 55
56static struct mvsdio_platform_data openrd_mvsdio_data = { 56static struct mvsdio_platform_data openrd_mvsdio_data = {
57 .gpio_card_detect = 29, /* MPP29 used as SD card detect */ 57 .gpio_card_detect = 29, /* MPP29 used as SD card detect */
58 .gpio_write_protect = -1,
58}; 59};
59 60
60static unsigned int openrd_mpp_config[] __initdata = { 61static unsigned int openrd_mpp_config[] __initdata = {
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index c7d93b48926b..d24223166e06 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = {
69 69
70static struct mvsdio_platform_data rd88f6281_mvsdio_data = { 70static struct mvsdio_platform_data rd88f6281_mvsdio_data = {
71 .gpio_card_detect = 28, 71 .gpio_card_detect = 28,
72 .gpio_write_protect = -1,
72}; 73};
73 74
74static unsigned int rd88f6281_mpp_config[] __initdata = { 75static unsigned int rd88f6281_mpp_config[] __initdata = {
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index 9f64d5632e07..76901f4ce611 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -223,13 +223,7 @@ static struct pxa27x_keypad_platform_data aspenite_keypad_info __initdata = {
223}; 223};
224 224
225#if defined(CONFIG_USB_EHCI_MV) 225#if defined(CONFIG_USB_EHCI_MV)
226static char *pxa168_sph_clock_name[] = {
227 [0] = "PXA168-USBCLK",
228};
229
230static struct mv_usb_platform_data pxa168_sph_pdata = { 226static struct mv_usb_platform_data pxa168_sph_pdata = {
231 .clknum = 1,
232 .clkname = pxa168_sph_clock_name,
233 .mode = MV_USB_MODE_HOST, 227 .mode = MV_USB_MODE_HOST,
234 .phy_init = pxa_usb_phy_init, 228 .phy_init = pxa_usb_phy_init,
235 .phy_deinit = pxa_usb_phy_deinit, 229 .phy_deinit = pxa_usb_phy_deinit,
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index d1e2d595e79c..f62b68d926f4 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/platform_device.h>
12#include <linux/gpio.h> 13#include <linux/gpio.h>
13 14
14#include <asm/mach/arch.h> 15#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 22a9058f9f4d..6528a5fa6a26 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -162,13 +162,7 @@ static struct i2c_board_info ttc_dkb_i2c_info[] = {
162#ifdef CONFIG_USB_SUPPORT 162#ifdef CONFIG_USB_SUPPORT
163#if defined(CONFIG_USB_MV_UDC) || defined(CONFIG_USB_EHCI_MV_U2O) 163#if defined(CONFIG_USB_MV_UDC) || defined(CONFIG_USB_EHCI_MV_U2O)
164 164
165static char *pxa910_usb_clock_name[] = {
166 [0] = "U2OCLK",
167};
168
169static struct mv_usb_platform_data ttc_usb_pdata = { 165static struct mv_usb_platform_data ttc_usb_pdata = {
170 .clknum = 1,
171 .clkname = pxa910_usb_clock_name,
172 .vbus = NULL, 166 .vbus = NULL,
173 .mode = MV_USB_MODE_OTG, 167 .mode = MV_USB_MODE_OTG,
174 .otg_force_a_bus_req = 1, 168 .otg_force_a_bus_req = 1,
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 2969027f02fa..f9fd77e8f1f5 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles,
62{ 62{
63 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); 63 u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
64 64
65 writel_relaxed(0, event_base + TIMER_CLEAR); 65 ctrl &= ~TIMER_ENABLE_EN;
66 writel_relaxed(ctrl, event_base + TIMER_ENABLE);
67
68 writel_relaxed(ctrl, event_base + TIMER_CLEAR);
66 writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); 69 writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
67 writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); 70 writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
68 return 0; 71 return 0;
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c
index 274ff58271de..830139a3e2ba 100644
--- a/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -44,6 +44,8 @@
44 44
45#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) 45#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
46 46
47#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
48
47#define ACTIVE_DOORBELLS (8) 49#define ACTIVE_DOORBELLS (8)
48 50
49static DEFINE_RAW_SPINLOCK(irq_controller_lock); 51static DEFINE_RAW_SPINLOCK(irq_controller_lock);
@@ -55,40 +57,30 @@ static struct irq_domain *armada_370_xp_mpic_domain;
55/* 57/*
56 * In SMP mode: 58 * In SMP mode:
57 * For shared global interrupts, mask/unmask global enable bit 59 * For shared global interrupts, mask/unmask global enable bit
58 * For CPU interrtups, mask/unmask the calling CPU's bit 60 * For CPU interrupts, mask/unmask the calling CPU's bit
59 */ 61 */
60static void armada_370_xp_irq_mask(struct irq_data *d) 62static void armada_370_xp_irq_mask(struct irq_data *d)
61{ 63{
62#ifdef CONFIG_SMP
63 irq_hw_number_t hwirq = irqd_to_hwirq(d); 64 irq_hw_number_t hwirq = irqd_to_hwirq(d);
64 65
65 if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) 66 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
66 writel(hwirq, main_int_base + 67 writel(hwirq, main_int_base +
67 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); 68 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
68 else 69 else
69 writel(hwirq, per_cpu_int_base + 70 writel(hwirq, per_cpu_int_base +
70 ARMADA_370_XP_INT_SET_MASK_OFFS); 71 ARMADA_370_XP_INT_SET_MASK_OFFS);
71#else
72 writel(irqd_to_hwirq(d),
73 per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
74#endif
75} 72}
76 73
77static void armada_370_xp_irq_unmask(struct irq_data *d) 74static void armada_370_xp_irq_unmask(struct irq_data *d)
78{ 75{
79#ifdef CONFIG_SMP
80 irq_hw_number_t hwirq = irqd_to_hwirq(d); 76 irq_hw_number_t hwirq = irqd_to_hwirq(d);
81 77
82 if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) 78 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
83 writel(hwirq, main_int_base + 79 writel(hwirq, main_int_base +
84 ARMADA_370_XP_INT_SET_ENABLE_OFFS); 80 ARMADA_370_XP_INT_SET_ENABLE_OFFS);
85 else 81 else
86 writel(hwirq, per_cpu_int_base + 82 writel(hwirq, per_cpu_int_base +
87 ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 83 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
88#else
89 writel(irqd_to_hwirq(d),
90 per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
91#endif
92} 84}
93 85
94#ifdef CONFIG_SMP 86#ifdef CONFIG_SMP
@@ -144,10 +136,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
144 unsigned int virq, irq_hw_number_t hw) 136 unsigned int virq, irq_hw_number_t hw)
145{ 137{
146 armada_370_xp_irq_mask(irq_get_irq_data(virq)); 138 armada_370_xp_irq_mask(irq_get_irq_data(virq));
147 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); 139 if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
140 writel(hw, per_cpu_int_base +
141 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
142 else
143 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
148 irq_set_status_flags(virq, IRQ_LEVEL); 144 irq_set_status_flags(virq, IRQ_LEVEL);
149 145
150 if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) { 146 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
151 irq_set_percpu_devid(virq); 147 irq_set_percpu_devid(virq);
152 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, 148 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
153 handle_percpu_devid_irq); 149 handle_percpu_devid_irq);
diff --git a/arch/arm/mach-mxs/icoll.c b/arch/arm/mach-mxs/icoll.c
index 8fb23af154b3..e26eeba46598 100644
--- a/arch/arm/mach-mxs/icoll.c
+++ b/arch/arm/mach-mxs/icoll.c
@@ -100,7 +100,7 @@ static struct irq_domain_ops icoll_irq_domain_ops = {
100 .xlate = irq_domain_xlate_onecell, 100 .xlate = irq_domain_xlate_onecell,
101}; 101};
102 102
103void __init icoll_of_init(struct device_node *np, 103static void __init icoll_of_init(struct device_node *np,
104 struct device_node *interrupt_parent) 104 struct device_node *interrupt_parent)
105{ 105{
106 /* 106 /*
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 052186713347..e7b781d3788f 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
41 .lower_margin = 4, 41 .lower_margin = 4,
42 .hsync_len = 1, 42 .hsync_len = 1,
43 .vsync_len = 1, 43 .vsync_len = 1,
44 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
45 FB_SYNC_DOTCLK_FAILING_ACT,
46 }, 44 },
47}; 45};
48 46
@@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
59 .lower_margin = 10, 57 .lower_margin = 10,
60 .hsync_len = 10, 58 .hsync_len = 10,
61 .vsync_len = 10, 59 .vsync_len = 10,
62 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
63 FB_SYNC_DOTCLK_FAILING_ACT,
64 }, 60 },
65}; 61};
66 62
@@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
77 .lower_margin = 45, 73 .lower_margin = 45,
78 .hsync_len = 1, 74 .hsync_len = 1,
79 .vsync_len = 1, 75 .vsync_len = 1,
80 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT,
81 }, 76 },
82}; 77};
83 78
@@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
94 .lower_margin = 13, 89 .lower_margin = 13,
95 .hsync_len = 48, 90 .hsync_len = 48,
96 .vsync_len = 3, 91 .vsync_len = 3,
97 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | 92 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
98 FB_SYNC_DATA_ENABLE_HIGH_ACT |
99 FB_SYNC_DOTCLK_FAILING_ACT,
100 }, 93 },
101}; 94};
102 95
@@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
113 .lower_margin = 0x15, 106 .lower_margin = 0x15,
114 .hsync_len = 64, 107 .hsync_len = 64,
115 .vsync_len = 4, 108 .vsync_len = 4,
116 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | 109 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
117 FB_SYNC_DATA_ENABLE_HIGH_ACT |
118 FB_SYNC_DOTCLK_FAILING_ACT,
119 }, 110 },
120}; 111};
121 112
@@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
132 .lower_margin = 2, 123 .lower_margin = 2,
133 .hsync_len = 15, 124 .hsync_len = 15,
134 .vsync_len = 15, 125 .vsync_len = 15,
135 .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT
136 }, 126 },
137}; 127};
138 128
@@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
259 mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes); 249 mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
260 mxsfb_pdata.default_bpp = 32; 250 mxsfb_pdata.default_bpp = 32;
261 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 251 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
252 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
253 MXSFB_SYNC_DOTCLK_FAILING_ACT;
262} 254}
263 255
264static inline void enable_clk_enet_out(void) 256static inline void enable_clk_enet_out(void)
@@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
278 mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes); 270 mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
279 mxsfb_pdata.default_bpp = 32; 271 mxsfb_pdata.default_bpp = 32;
280 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 272 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
273 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
274 MXSFB_SYNC_DOTCLK_FAILING_ACT;
281 275
282 mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); 276 mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
283} 277}
@@ -297,6 +291,7 @@ static void __init m28evk_init(void)
297 mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes); 291 mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
298 mxsfb_pdata.default_bpp = 16; 292 mxsfb_pdata.default_bpp = 16;
299 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; 293 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
294 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
300} 295}
301 296
302static void __init sc_sps1_init(void) 297static void __init sc_sps1_init(void)
@@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
322 mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes); 317 mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
323 mxsfb_pdata.default_bpp = 32; 318 mxsfb_pdata.default_bpp = 32;
324 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; 319 mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
320 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
321 MXSFB_SYNC_DOTCLK_FAILING_ACT;
325} 322}
326 323
327#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0) 324#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0)
@@ -402,17 +399,18 @@ static void __init cfa10049_init(void)
402{ 399{
403 enable_clk_enet_out(); 400 enable_clk_enet_out();
404 update_fec_mac_prop(OUI_CRYSTALFONTZ); 401 update_fec_mac_prop(OUI_CRYSTALFONTZ);
402
403 mxsfb_pdata.mode_list = cfa10049_video_modes;
404 mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
405 mxsfb_pdata.default_bpp = 32;
406 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
407 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
405} 408}
406 409
407static void __init cfa10037_init(void) 410static void __init cfa10037_init(void)
408{ 411{
409 enable_clk_enet_out(); 412 enable_clk_enet_out();
410 update_fec_mac_prop(OUI_CRYSTALFONTZ); 413 update_fec_mac_prop(OUI_CRYSTALFONTZ);
411
412 mxsfb_pdata.mode_list = cfa10049_video_modes;
413 mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
414 mxsfb_pdata.default_bpp = 32;
415 mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
416} 414}
417 415
418static void __init apf28_init(void) 416static void __init apf28_init(void)
@@ -423,6 +421,8 @@ static void __init apf28_init(void)
423 mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes); 421 mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
424 mxsfb_pdata.default_bpp = 16; 422 mxsfb_pdata.default_bpp = 16;
425 mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT; 423 mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
424 mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
425 MXSFB_SYNC_DOTCLK_FAILING_ACT;
426} 426}
427 427
428static void __init mxs_machine_init(void) 428static void __init mxs_machine_init(void)
diff --git a/arch/arm/mach-mxs/mm.c b/arch/arm/mach-mxs/mm.c
index a4294aa9f301..e63b7d87acbd 100644
--- a/arch/arm/mach-mxs/mm.c
+++ b/arch/arm/mach-mxs/mm.c
@@ -18,6 +18,7 @@
18 18
19#include <mach/mx23.h> 19#include <mach/mx23.h>
20#include <mach/mx28.h> 20#include <mach/mx28.h>
21#include <mach/common.h>
21 22
22/* 23/*
23 * Define the MX23 memory map. 24 * Define the MX23 memory map.
diff --git a/arch/arm/mach-mxs/ocotp.c b/arch/arm/mach-mxs/ocotp.c
index 54add60f94c9..1dff46703753 100644
--- a/arch/arm/mach-mxs/ocotp.c
+++ b/arch/arm/mach-mxs/ocotp.c
@@ -19,6 +19,7 @@
19#include <asm/processor.h> /* for cpu_relax() */ 19#include <asm/processor.h> /* for cpu_relax() */
20 20
21#include <mach/mxs.h> 21#include <mach/mxs.h>
22#include <mach/common.h>
22 23
23#define OCOTP_WORD_OFFSET 0x20 24#define OCOTP_WORD_OFFSET 0x20
24#define OCOTP_WORD_COUNT 0x20 25#define OCOTP_WORD_COUNT 0x20
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index cb7c6ae2e3fc..6c4f766365a2 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -543,15 +543,6 @@ static struct clk usb_dc_ck = {
543 /* Direct from ULPD, no parent */ 543 /* Direct from ULPD, no parent */
544 .rate = 48000000, 544 .rate = 48000000,
545 .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), 545 .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
546 .enable_bit = USB_REQ_EN_SHIFT,
547};
548
549static struct clk usb_dc_ck7xx = {
550 .name = "usb_dc_ck",
551 .ops = &clkops_generic,
552 /* Direct from ULPD, no parent */
553 .rate = 48000000,
554 .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
555 .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT, 546 .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT,
556}; 547};
557 548
@@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = {
727 CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), 718 CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310),
728 CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), 719 CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310),
729 CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), 720 CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX),
730 CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX), 721 CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX),
731 CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX),
732 CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), 722 CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310),
733 CLK(NULL, "mclk", &mclk_16xx, CK_16XX), 723 CLK(NULL, "mclk", &mclk_16xx, CK_16XX),
734 CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310), 724 CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
index fb18831e88aa..14f7e9920479 100644
--- a/arch/arm/mach-omap1/common.h
+++ b/arch/arm/mach-omap1/common.h
@@ -31,6 +31,8 @@
31 31
32#include <plat/i2c.h> 32#include <plat/i2c.h>
33 33
34#include <mach/irqs.h>
35
34#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850) 36#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
35void omap7xx_map_io(void); 37void omap7xx_map_io(void);
36#else 38#else
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 7a7690ab6cb8..db37f49da5ac 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -43,6 +43,7 @@
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/io.h> 44#include <linux/io.h>
45#include <linux/atomic.h> 45#include <linux/atomic.h>
46#include <linux/cpu.h>
46 47
47#include <asm/fncpy.h> 48#include <asm/fncpy.h>
48#include <asm/system_misc.h> 49#include <asm/system_misc.h>
@@ -584,8 +585,7 @@ static void omap_pm_init_proc(void)
584static int omap_pm_prepare(void) 585static int omap_pm_prepare(void)
585{ 586{
586 /* We cannot sleep in idle until we have resumed */ 587 /* We cannot sleep in idle until we have resumed */
587 disable_hlt(); 588 cpu_idle_poll_ctrl(true);
588
589 return 0; 589 return 0;
590} 590}
591 591
@@ -621,7 +621,7 @@ static int omap_pm_enter(suspend_state_t state)
621 621
622static void omap_pm_finish(void) 622static void omap_pm_finish(void)
623{ 623{
624 enable_hlt(); 624 cpu_idle_poll_ctrl(false);
625} 625}
626 626
627 627
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 49ac3dfebef9..8111cd9ff3e5 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -311,9 +311,6 @@ config MACH_OMAP_ZOOM2
311 default y 311 default y
312 select OMAP_PACKAGE_CBB 312 select OMAP_PACKAGE_CBB
313 select REGULATOR_FIXED_VOLTAGE if REGULATOR 313 select REGULATOR_FIXED_VOLTAGE if REGULATOR
314 select SERIAL_8250
315 select SERIAL_8250_CONSOLE
316 select SERIAL_CORE_CONSOLE
317 314
318config MACH_OMAP_ZOOM3 315config MACH_OMAP_ZOOM3
319 bool "OMAP3630 Zoom3 board" 316 bool "OMAP3630 Zoom3 board"
@@ -321,9 +318,6 @@ config MACH_OMAP_ZOOM3
321 default y 318 default y
322 select OMAP_PACKAGE_CBP 319 select OMAP_PACKAGE_CBP
323 select REGULATOR_FIXED_VOLTAGE if REGULATOR 320 select REGULATOR_FIXED_VOLTAGE if REGULATOR
324 select SERIAL_8250
325 select SERIAL_8250_CONSOLE
326 select SERIAL_CORE_CONSOLE
327 321
328config MACH_CM_T35 322config MACH_CM_T35
329 bool "CompuLab CM-T35/CM-T3730 modules" 323 bool "CompuLab CM-T35/CM-T3730 modules"
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 0274ff7a2a2b..e54a48060198 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -102,6 +102,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
102 .init_irq = omap_intc_of_init, 102 .init_irq = omap_intc_of_init,
103 .handle_irq = omap3_intc_handle_irq, 103 .handle_irq = omap3_intc_handle_irq,
104 .init_machine = omap_generic_init, 104 .init_machine = omap_generic_init,
105 .init_late = omap3_init_late,
105 .init_time = omap3_sync32k_timer_init, 106 .init_time = omap3_sync32k_timer_init,
106 .dt_compat = omap3_boards_compat, 107 .dt_compat = omap3_boards_compat,
107 .restart = omap3xxx_restart, 108 .restart = omap3xxx_restart,
@@ -119,6 +120,7 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)")
119 .init_irq = omap_intc_of_init, 120 .init_irq = omap_intc_of_init,
120 .handle_irq = omap3_intc_handle_irq, 121 .handle_irq = omap3_intc_handle_irq,
121 .init_machine = omap_generic_init, 122 .init_machine = omap_generic_init,
123 .init_late = omap3_init_late,
122 .init_time = omap3_secure_sync32k_timer_init, 124 .init_time = omap3_secure_sync32k_timer_init,
123 .dt_compat = omap3_gp_boards_compat, 125 .dt_compat = omap3_gp_boards_compat,
124 .restart = omap3xxx_restart, 126 .restart = omap3xxx_restart,
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index f7c4616cbb60..d2ea68ea678a 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -17,6 +17,7 @@
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/leds.h> 19#include <linux/leds.h>
20#include <linux/usb/phy.h>
20#include <linux/usb/musb.h> 21#include <linux/usb/musb.h>
21#include <linux/platform_data/spi-omap2-mcspi.h> 22#include <linux/platform_data/spi-omap2-mcspi.h>
22 23
@@ -98,6 +99,7 @@ static void __init rx51_init(void)
98 sdrc_params = nokia_get_sdram_timings(); 99 sdrc_params = nokia_get_sdram_timings();
99 omap_sdrc_init(sdrc_params, sdrc_params); 100 omap_sdrc_init(sdrc_params, sdrc_params);
100 101
102 usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
101 usb_musb_init(&musb_board_data); 103 usb_musb_init(&musb_board_data);
102 rx51_peripherals_init(); 104 rx51_peripherals_init();
103 105
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
index 3d58f335f173..0c6834ae1fc4 100644
--- a/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/arch/arm/mach-omap2/cclock44xx_data.c
@@ -52,6 +52,13 @@
52 */ 52 */
53#define OMAP4_DPLL_ABE_DEFFREQ 98304000 53#define OMAP4_DPLL_ABE_DEFFREQ 98304000
54 54
55/*
56 * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
57 * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
58 * locked frequency for the USB DPLL is 960MHz.
59 */
60#define OMAP4_DPLL_USB_DEFFREQ 960000000
61
55/* Root clocks */ 62/* Root clocks */
56 63
57DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); 64DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
@@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,
1011 OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, 1018 OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,
1012 hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); 1019 hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);
1013 1020
1021DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0,
1022 OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
1023 OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL);
1024
1014DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, 1025DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,
1015 OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, 1026 OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
1016 OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); 1027 OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
@@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = {
1538 CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X), 1549 CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X),
1539 CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X), 1550 CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X),
1540 CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X), 1551 CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X),
1552 CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
1541 CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X), 1553 CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X),
1542 CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X), 1554 CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X),
1543 CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X), 1555 CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X),
@@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void)
1705 if (rc) 1717 if (rc)
1706 pr_err("%s: failed to configure ABE DPLL!\n", __func__); 1718 pr_err("%s: failed to configure ABE DPLL!\n", __func__);
1707 1719
1720 /*
1721 * Lock USB DPLL on OMAP4 devices so that the L3INIT power
1722 * domain can transition to retention state when not in use.
1723 */
1724 rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
1725 if (rc)
1726 pr_err("%s: failed to configure USB DPLL!\n", __func__);
1727
1708 return 0; 1728 return 0;
1709} 1729}
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 0a6b9c7a63da..d6ba13e1c540 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -108,7 +108,6 @@ void omap35xx_init_late(void);
108void omap3630_init_late(void); 108void omap3630_init_late(void);
109void am35xx_init_late(void); 109void am35xx_init_late(void);
110void ti81xx_init_late(void); 110void ti81xx_init_late(void);
111void omap4430_init_late(void);
112int omap2_common_pm_late_init(void); 111int omap2_common_pm_late_init(void);
113 112
114#if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430) 113#if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430)
@@ -294,5 +293,8 @@ extern void omap_reserve(void);
294struct omap_hwmod; 293struct omap_hwmod;
295extern int omap_dss_reset(struct omap_hwmod *); 294extern int omap_dss_reset(struct omap_hwmod *);
296 295
296/* SoC specific clock initializer */
297extern int (*omap_clk_init)(void);
298
297#endif /* __ASSEMBLER__ */ 299#endif /* __ASSEMBLER__ */
298#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ 300#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index e4b16c8efe8b..410e1bac7815 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1122,9 +1122,6 @@ int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
1122 /* TODO: remove, see function definition */ 1122 /* TODO: remove, see function definition */
1123 gpmc_convert_ps_to_ns(gpmc_t); 1123 gpmc_convert_ps_to_ns(gpmc_t);
1124 1124
1125 /* Now the GPMC is initialised, unreserve the chip-selects */
1126 gpmc_cs_map = 0;
1127
1128 return 0; 1125 return 0;
1129} 1126}
1130 1127
@@ -1383,6 +1380,9 @@ static int gpmc_probe(struct platform_device *pdev)
1383 if (IS_ERR_VALUE(gpmc_setup_irq())) 1380 if (IS_ERR_VALUE(gpmc_setup_irq()))
1384 dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); 1381 dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
1385 1382
1383 /* Now the GPMC is initialised, unreserve the chip-selects */
1384 gpmc_cs_map = 0;
1385
1386 rc = gpmc_probe_dt(pdev); 1386 rc = gpmc_probe_dt(pdev);
1387 if (rc < 0) { 1387 if (rc < 0) {
1388 clk_disable_unprepare(gpmc_l3_clk); 1388 clk_disable_unprepare(gpmc_l3_clk);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 2c3fdd65387b..5c445ca1e271 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -55,6 +55,12 @@
55#include "prm44xx.h" 55#include "prm44xx.h"
56 56
57/* 57/*
58 * omap_clk_init: points to a function that does the SoC-specific
59 * clock initializations
60 */
61int (*omap_clk_init)(void);
62
63/*
58 * The machine specific code may provide the extra mapping besides the 64 * The machine specific code may provide the extra mapping besides the
59 * default mapping provided here. 65 * default mapping provided here.
60 */ 66 */
@@ -397,7 +403,7 @@ void __init omap2420_init_early(void)
397 omap242x_clockdomains_init(); 403 omap242x_clockdomains_init();
398 omap2420_hwmod_init(); 404 omap2420_hwmod_init();
399 omap_hwmod_init_postsetup(); 405 omap_hwmod_init_postsetup();
400 omap2420_clk_init(); 406 omap_clk_init = omap2420_clk_init;
401} 407}
402 408
403void __init omap2420_init_late(void) 409void __init omap2420_init_late(void)
@@ -427,7 +433,7 @@ void __init omap2430_init_early(void)
427 omap243x_clockdomains_init(); 433 omap243x_clockdomains_init();
428 omap2430_hwmod_init(); 434 omap2430_hwmod_init();
429 omap_hwmod_init_postsetup(); 435 omap_hwmod_init_postsetup();
430 omap2430_clk_init(); 436 omap_clk_init = omap2430_clk_init;
431} 437}
432 438
433void __init omap2430_init_late(void) 439void __init omap2430_init_late(void)
@@ -462,7 +468,7 @@ void __init omap3_init_early(void)
462 omap3xxx_clockdomains_init(); 468 omap3xxx_clockdomains_init();
463 omap3xxx_hwmod_init(); 469 omap3xxx_hwmod_init();
464 omap_hwmod_init_postsetup(); 470 omap_hwmod_init_postsetup();
465 omap3xxx_clk_init(); 471 omap_clk_init = omap3xxx_clk_init;
466} 472}
467 473
468void __init omap3430_init_early(void) 474void __init omap3430_init_early(void)
@@ -500,7 +506,7 @@ void __init ti81xx_init_early(void)
500 omap3xxx_clockdomains_init(); 506 omap3xxx_clockdomains_init();
501 omap3xxx_hwmod_init(); 507 omap3xxx_hwmod_init();
502 omap_hwmod_init_postsetup(); 508 omap_hwmod_init_postsetup();
503 omap3xxx_clk_init(); 509 omap_clk_init = omap3xxx_clk_init;
504} 510}
505 511
506void __init omap3_init_late(void) 512void __init omap3_init_late(void)
@@ -568,7 +574,7 @@ void __init am33xx_init_early(void)
568 am33xx_clockdomains_init(); 574 am33xx_clockdomains_init();
569 am33xx_hwmod_init(); 575 am33xx_hwmod_init();
570 omap_hwmod_init_postsetup(); 576 omap_hwmod_init_postsetup();
571 am33xx_clk_init(); 577 omap_clk_init = am33xx_clk_init;
572} 578}
573#endif 579#endif
574 580
@@ -593,7 +599,7 @@ void __init omap4430_init_early(void)
593 omap44xx_clockdomains_init(); 599 omap44xx_clockdomains_init();
594 omap44xx_hwmod_init(); 600 omap44xx_hwmod_init();
595 omap_hwmod_init_postsetup(); 601 omap_hwmod_init_postsetup();
596 omap4xxx_clk_init(); 602 omap_clk_init = omap4xxx_clk_init;
597} 603}
598 604
599void __init omap4430_init_late(void) 605void __init omap4430_init_late(void)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 6a217c98db54..f82cf878d6af 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -211,8 +211,6 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
211 return -EINVAL; 211 return -EINVAL;
212 } 212 }
213 213
214 pr_err("%s: Could not find signal %s\n", __func__, muxname);
215
216 return -ENODEV; 214 return -ENODEV;
217} 215}
218 216
@@ -234,6 +232,8 @@ int __init omap_mux_get_by_name(const char *muxname,
234 return mux_mode; 232 return mux_mode;
235 } 233 }
236 234
235 pr_err("%s: Could not find signal %s\n", __func__, muxname);
236
237 return -ENODEV; 237 return -ENODEV;
238} 238}
239 239
@@ -739,8 +739,9 @@ static void __init omap_mux_dbg_create_entry(
739 list_for_each_entry(e, &partition->muxmodes, node) { 739 list_for_each_entry(e, &partition->muxmodes, node) {
740 struct omap_mux *m = &e->mux; 740 struct omap_mux *m = &e->mux;
741 741
742 (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir, 742 (void)debugfs_create_file(m->muxnames[0], S_IWUSR | S_IRUGO,
743 m, &omap_mux_dbg_signal_fops); 743 mux_dbg_dir, m,
744 &omap_mux_dbg_signal_fops);
744 } 745 }
745} 746}
746 747
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index c2c798c08c2b..e512253601c8 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -138,6 +138,7 @@
138#include <linux/spinlock.h> 138#include <linux/spinlock.h>
139#include <linux/slab.h> 139#include <linux/slab.h>
140#include <linux/bootmem.h> 140#include <linux/bootmem.h>
141#include <linux/cpu.h>
141 142
142#include <asm/system_misc.h> 143#include <asm/system_misc.h>
143 144
@@ -1368,7 +1369,9 @@ static void _enable_sysc(struct omap_hwmod *oh)
1368 } 1369 }
1369 1370
1370 if (sf & SYSC_HAS_MIDLEMODE) { 1371 if (sf & SYSC_HAS_MIDLEMODE) {
1371 if (oh->flags & HWMOD_SWSUP_MSTANDBY) { 1372 if (oh->flags & HWMOD_FORCE_MSTANDBY) {
1373 idlemode = HWMOD_IDLEMODE_FORCE;
1374 } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
1372 idlemode = HWMOD_IDLEMODE_NO; 1375 idlemode = HWMOD_IDLEMODE_NO;
1373 } else { 1376 } else {
1374 if (sf & SYSC_HAS_ENAWAKEUP) 1377 if (sf & SYSC_HAS_ENAWAKEUP)
@@ -1440,7 +1443,8 @@ static void _idle_sysc(struct omap_hwmod *oh)
1440 } 1443 }
1441 1444
1442 if (sf & SYSC_HAS_MIDLEMODE) { 1445 if (sf & SYSC_HAS_MIDLEMODE) {
1443 if (oh->flags & HWMOD_SWSUP_MSTANDBY) { 1446 if ((oh->flags & HWMOD_SWSUP_MSTANDBY) ||
1447 (oh->flags & HWMOD_FORCE_MSTANDBY)) {
1444 idlemode = HWMOD_IDLEMODE_FORCE; 1448 idlemode = HWMOD_IDLEMODE_FORCE;
1445 } else { 1449 } else {
1446 if (sf & SYSC_HAS_ENAWAKEUP) 1450 if (sf & SYSC_HAS_ENAWAKEUP)
@@ -2154,7 +2158,7 @@ static int _enable(struct omap_hwmod *oh)
2154 if (soc_ops.enable_module) 2158 if (soc_ops.enable_module)
2155 soc_ops.enable_module(oh); 2159 soc_ops.enable_module(oh);
2156 if (oh->flags & HWMOD_BLOCK_WFI) 2160 if (oh->flags & HWMOD_BLOCK_WFI)
2157 disable_hlt(); 2161 cpu_idle_poll_ctrl(true);
2158 2162
2159 if (soc_ops.update_context_lost) 2163 if (soc_ops.update_context_lost)
2160 soc_ops.update_context_lost(oh); 2164 soc_ops.update_context_lost(oh);
@@ -2218,7 +2222,7 @@ static int _idle(struct omap_hwmod *oh)
2218 _del_initiator_dep(oh, mpu_oh); 2222 _del_initiator_dep(oh, mpu_oh);
2219 2223
2220 if (oh->flags & HWMOD_BLOCK_WFI) 2224 if (oh->flags & HWMOD_BLOCK_WFI)
2221 enable_hlt(); 2225 cpu_idle_poll_ctrl(false);
2222 if (soc_ops.disable_module) 2226 if (soc_ops.disable_module)
2223 soc_ops.disable_module(oh); 2227 soc_ops.disable_module(oh);
2224 2228
@@ -2328,7 +2332,7 @@ static int _shutdown(struct omap_hwmod *oh)
2328 _del_initiator_dep(oh, mpu_oh); 2332 _del_initiator_dep(oh, mpu_oh);
2329 /* XXX what about the other system initiators here? dma, dsp */ 2333 /* XXX what about the other system initiators here? dma, dsp */
2330 if (oh->flags & HWMOD_BLOCK_WFI) 2334 if (oh->flags & HWMOD_BLOCK_WFI)
2331 enable_hlt(); 2335 cpu_idle_poll_ctrl(false);
2332 if (soc_ops.disable_module) 2336 if (soc_ops.disable_module)
2333 soc_ops.disable_module(oh); 2337 soc_ops.disable_module(oh);
2334 _disable_clocks(oh); 2338 _disable_clocks(oh);
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index d43d9b608eda..d5dc935f6060 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm {
427 * 427 *
428 * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out 428 * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out
429 * of idle, rather than relying on module smart-idle 429 * of idle, rather than relying on module smart-idle
430 * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out 430 * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and
431 * of standby, rather than relying on module smart-standby 431 * out of standby, rather than relying on module smart-standby
432 * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for 432 * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
433 * SDRAM controller, etc. XXX probably belongs outside the main hwmod file 433 * SDRAM controller, etc. XXX probably belongs outside the main hwmod file
434 * XXX Should be HWMOD_SETUP_NO_RESET 434 * XXX Should be HWMOD_SETUP_NO_RESET
@@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm {
459 * correctly, or this is being abused to deal with some PM latency 459 * correctly, or this is being abused to deal with some PM latency
460 * issues -- but we're currently suffering from a shortage of 460 * issues -- but we're currently suffering from a shortage of
461 * folks who are able to track these issues down properly. 461 * folks who are able to track these issues down properly.
462 * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device
463 * is kept in force-standby mode. Failing to do so causes PM problems
464 * with musb on OMAP3630 at least. Note that musb has a dedicated register
465 * to control MSTANDBY signal when MIDLEMODE is set to force-standby.
462 */ 466 */
463#define HWMOD_SWSUP_SIDLE (1 << 0) 467#define HWMOD_SWSUP_SIDLE (1 << 0)
464#define HWMOD_SWSUP_MSTANDBY (1 << 1) 468#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm {
471#define HWMOD_16BIT_REG (1 << 8) 475#define HWMOD_16BIT_REG (1 << 8)
472#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) 476#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
473#define HWMOD_BLOCK_WFI (1 << 10) 477#define HWMOD_BLOCK_WFI (1 << 10)
478#define HWMOD_FORCE_MSTANDBY (1 << 11)
474 479
475/* 480/*
476 * omap_hwmod._int_flags definitions 481 * omap_hwmod._int_flags definitions
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index ac7e03ec952f..5112d04e7b79 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
1707 * Erratum ID: i479 idle_req / idle_ack mechanism potentially 1707 * Erratum ID: i479 idle_req / idle_ack mechanism potentially
1708 * broken when autoidle is enabled 1708 * broken when autoidle is enabled
1709 * workaround is to disable the autoidle bit at module level. 1709 * workaround is to disable the autoidle bit at module level.
1710 *
1711 * Enabling the device in any other MIDLEMODE setting but force-idle
1712 * causes core_pwrdm not enter idle states at least on OMAP3630.
1713 * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY
1714 * signal when MIDLEMODE is set to force-idle.
1710 */ 1715 */
1711 .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE 1716 .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
1712 | HWMOD_SWSUP_MSTANDBY, 1717 | HWMOD_FORCE_MSTANDBY,
1713}; 1718};
1714 1719
1715/* usb_otg_hs */ 1720/* usb_otg_hs */
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 0e47d2e1687c..eaba9dc91a0d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2719,7 +2719,17 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
2719 .name = "ocp2scp_usb_phy", 2719 .name = "ocp2scp_usb_phy",
2720 .class = &omap44xx_ocp2scp_hwmod_class, 2720 .class = &omap44xx_ocp2scp_hwmod_class,
2721 .clkdm_name = "l3_init_clkdm", 2721 .clkdm_name = "l3_init_clkdm",
2722 .main_clk = "func_48m_fclk", 2722 /*
2723 * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP
2724 * block as an "optional clock," and normally should never be
2725 * specified as the main_clk for an OMAP IP block. However it
2726 * turns out that this clock is actually the main clock for
2727 * the ocp2scp_usb_phy IP block:
2728 * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html
2729 * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems
2730 * to be the best workaround.
2731 */
2732 .main_clk = "ocp2scp_usb_phy_phy_48m",
2723 .prcm = { 2733 .prcm = {
2724 .omap4 = { 2734 .omap4 = {
2725 .clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET, 2735 .clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 673a4c1d1d76..dec553349ae2 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -218,7 +218,7 @@ static int omap_pm_enter(suspend_state_t suspend_state)
218 218
219static int omap_pm_begin(suspend_state_t state) 219static int omap_pm_begin(suspend_state_t state)
220{ 220{
221 disable_hlt(); 221 cpu_idle_poll_ctrl(true);
222 if (cpu_is_omap34xx()) 222 if (cpu_is_omap34xx())
223 omap_prcm_irq_prepare(); 223 omap_prcm_irq_prepare();
224 return 0; 224 return 0;
@@ -226,8 +226,7 @@ static int omap_pm_begin(suspend_state_t state)
226 226
227static void omap_pm_end(void) 227static void omap_pm_end(void)
228{ 228{
229 enable_hlt(); 229 cpu_idle_poll_ctrl(false);
230 return;
231} 230}
232 231
233static void omap_pm_finish(void) 232static void omap_pm_finish(void)
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index ea62e75ef21d..152a10cf4f1d 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -126,8 +126,8 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
126 * omap_default_idle - OMAP4 default ilde routine.' 126 * omap_default_idle - OMAP4 default ilde routine.'
127 * 127 *
128 * Implements OMAP4 memory, IO ordering requirements which can't be addressed 128 * Implements OMAP4 memory, IO ordering requirements which can't be addressed
129 * with default cpu_do_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and 129 * with default cpu_do_idle() hook. Used by all CPUs with !CONFIG_CPU_IDLE and
130 * by secondary CPU with CONFIG_CPUIDLE. 130 * by secondary CPU with CONFIG_CPU_IDLE.
131 */ 131 */
132static void omap_default_idle(void) 132static void omap_default_idle(void)
133{ 133{
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 2bdd4cf17a8f..f62b509ed08d 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void)
547 clksrc_nr, clksrc_src) \ 547 clksrc_nr, clksrc_src) \
548void __init omap##name##_gptimer_timer_init(void) \ 548void __init omap##name##_gptimer_timer_init(void) \
549{ \ 549{ \
550 if (omap_clk_init) \
551 omap_clk_init(); \
550 omap_dmtimer_init(); \ 552 omap_dmtimer_init(); \
551 omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ 553 omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
552 omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ 554 omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \
@@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \
556 clksrc_nr, clksrc_src) \ 558 clksrc_nr, clksrc_src) \
557void __init omap##name##_sync32k_timer_init(void) \ 559void __init omap##name##_sync32k_timer_init(void) \
558{ \ 560{ \
561 if (omap_clk_init) \
562 omap_clk_init(); \
559 omap_dmtimer_init(); \ 563 omap_dmtimer_init(); \
560 omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ 564 omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
561 /* Enable the use of clocksource="gp_timer" kernel parameter */ \ 565 /* Enable the use of clocksource="gp_timer" kernel parameter */ \
diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c
index 35a8014529ca..94fbb815680c 100644
--- a/arch/arm/mach-orion5x/board-dt.c
+++ b/arch/arm/mach-orion5x/board-dt.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/cpu.h>
17#include <asm/system_misc.h> 18#include <asm/system_misc.h>
18#include <asm/mach/arch.h> 19#include <asm/mach/arch.h>
19#include <mach/orion5x.h> 20#include <mach/orion5x.h>
@@ -52,7 +53,7 @@ static void __init orion5x_dt_init(void)
52 */ 53 */
53 if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { 54 if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) {
54 printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); 55 printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n");
55 disable_hlt(); 56 cpu_idle_poll_ctrl(true);
56 } 57 }
57 58
58 if (of_machine_is_compatible("lacie,ethernet-disk-mini-v2")) 59 if (of_machine_is_compatible("lacie,ethernet-disk-mini-v2"))
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index d068f1431c40..ad71c8a03ffd 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -293,7 +293,7 @@ void __init orion5x_init(void)
293 */ 293 */
294 if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { 294 if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) {
295 printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); 295 printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n");
296 disable_hlt(); 296 cpu_idle_poll_ctrl(true);
297 } 297 }
298 298
299 /* 299 /*
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index af41888acbd6..969b0ba7fa70 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -505,6 +505,7 @@ static struct w1_gpio_platform_data w1_gpio_platform_data = {
505 .pin = GPIO_ONE_WIRE, 505 .pin = GPIO_ONE_WIRE,
506 .is_open_drain = 0, 506 .is_open_drain = 0,
507 .enable_external_pullup = w1_enable_external_pullup, 507 .enable_external_pullup = w1_enable_external_pullup,
508 .ext_pullup_enable_pin = -EINVAL,
508}; 509};
509 510
510struct platform_device raumfeld_w1_gpio_device = { 511struct platform_device raumfeld_w1_gpio_device = {
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 04b87ec92537..1069b5680826 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -123,6 +123,11 @@ static struct clk s3c2440_clk_ac97 = {
123 .ctrlbit = S3C2440_CLKCON_AC97, 123 .ctrlbit = S3C2440_CLKCON_AC97,
124}; 124};
125 125
126#define S3C24XX_VA_UART0 (S3C_VA_UART)
127#define S3C24XX_VA_UART1 (S3C_VA_UART + 0x4000 )
128#define S3C24XX_VA_UART2 (S3C_VA_UART + 0x8000 )
129#define S3C24XX_VA_UART3 (S3C_VA_UART + 0xC000 )
130
126static unsigned long s3c2440_fclk_n_getrate(struct clk *clk) 131static unsigned long s3c2440_fclk_n_getrate(struct clk *clk)
127{ 132{
128 unsigned long ucon0, ucon1, ucon2, divisor; 133 unsigned long ucon0, ucon1, ucon2, divisor;
diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c
index 6bcf87f65f9e..92e609440c57 100644
--- a/arch/arm/mach-s3c24xx/common.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -239,6 +239,11 @@ void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
239 239
240/* Serial port registrations */ 240/* Serial port registrations */
241 241
242#define S3C2410_PA_UART0 (S3C24XX_PA_UART)
243#define S3C2410_PA_UART1 (S3C24XX_PA_UART + 0x4000 )
244#define S3C2410_PA_UART2 (S3C24XX_PA_UART + 0x8000 )
245#define S3C2443_PA_UART3 (S3C24XX_PA_UART + 0xC000 )
246
242static struct resource s3c2410_uart0_resource[] = { 247static struct resource s3c2410_uart0_resource[] = {
243 [0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K), 248 [0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K),
244 [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \ 249 [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \
diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h
index b7a9f4d469e8..1e73f5fa8659 100644
--- a/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -188,10 +188,8 @@
188 188
189#if defined(CONFIG_CPU_S3C2416) 189#if defined(CONFIG_CPU_S3C2416)
190#define NR_IRQS (IRQ_S3C2416_I2S1 + 1) 190#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
191#elif defined(CONFIG_CPU_S3C2443)
192#define NR_IRQS (IRQ_S3C2443_AC97+1)
193#else 191#else
194#define NR_IRQS (IRQ_S3C2440_AC97+1) 192#define NR_IRQS (IRQ_S3C2443_AC97 + 1)
195#endif 193#endif
196 194
197/* compatibility define. */ 195/* compatibility define. */
diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c
index cb9f5e011e73..d8ba9bee4c7e 100644
--- a/arch/arm/mach-s3c24xx/irq.c
+++ b/arch/arm/mach-s3c24xx/irq.c
@@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,
500 base = (void *)0xfd000000; 500 base = (void *)0xfd000000;
501 501
502 intc->reg_mask = base + 0xa4; 502 intc->reg_mask = base + 0xa4;
503 intc->reg_pending = base + 0x08; 503 intc->reg_pending = base + 0xa8;
504 irq_num = 20; 504 irq_num = 20;
505 irq_start = S3C2410_IRQ(32); 505 irq_start = S3C2410_IRQ(32);
506 irq_offset = 4; 506 irq_offset = 4;
diff --git a/arch/arm/mach-s3c64xx/setup-usb-phy.c b/arch/arm/mach-s3c64xx/setup-usb-phy.c
index c8174d95339b..ca960bda02fd 100644
--- a/arch/arm/mach-s3c64xx/setup-usb-phy.c
+++ b/arch/arm/mach-s3c64xx/setup-usb-phy.c
@@ -76,7 +76,7 @@ static int s3c_usb_otgphy_exit(struct platform_device *pdev)
76 76
77int s5p_usb_phy_init(struct platform_device *pdev, int type) 77int s5p_usb_phy_init(struct platform_device *pdev, int type)
78{ 78{
79 if (type == S5P_USB_PHY_DEVICE) 79 if (type == USB_PHY_TYPE_DEVICE)
80 return s3c_usb_otgphy_init(pdev); 80 return s3c_usb_otgphy_init(pdev);
81 81
82 return -EINVAL; 82 return -EINVAL;
@@ -84,7 +84,7 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type)
84 84
85int s5p_usb_phy_exit(struct platform_device *pdev, int type) 85int s5p_usb_phy_exit(struct platform_device *pdev, int type)
86{ 86{
87 if (type == S5P_USB_PHY_DEVICE) 87 if (type == USB_PHY_TYPE_DEVICE)
88 return s3c_usb_otgphy_exit(pdev); 88 return s3c_usb_otgphy_exit(pdev);
89 89
90 return -EINVAL; 90 return -EINVAL;
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index fcdf52dbcc49..f051f53e35b7 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = {
214 .name = "pcmcdclk", 214 .name = "pcmcdclk",
215}; 215};
216 216
217static struct clk dummy_apb_pclk = {
218 .name = "apb_pclk",
219 .id = -1,
220};
221
222static struct clk *clkset_vpllsrc_list[] = { 217static struct clk *clkset_vpllsrc_list[] = {
223 [0] = &clk_fin_vpll, 218 [0] = &clk_fin_vpll,
224 [1] = &clk_sclk_hdmi27m, 219 [1] = &clk_sclk_hdmi27m,
@@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops = {
305 300
306static struct clk init_clocks_off[] = { 301static struct clk init_clocks_off[] = {
307 { 302 {
308 .name = "dma",
309 .devname = "dma-pl330.0",
310 .parent = &clk_hclk_psys.clk,
311 .enable = s5pv210_clk_ip0_ctrl,
312 .ctrlbit = (1 << 3),
313 }, {
314 .name = "dma",
315 .devname = "dma-pl330.1",
316 .parent = &clk_hclk_psys.clk,
317 .enable = s5pv210_clk_ip0_ctrl,
318 .ctrlbit = (1 << 4),
319 }, {
320 .name = "rot", 303 .name = "rot",
321 .parent = &clk_hclk_dsys.clk, 304 .parent = &clk_hclk_dsys.clk,
322 .enable = s5pv210_clk_ip0_ctrl, 305 .enable = s5pv210_clk_ip0_ctrl,
@@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = {
573 .ctrlbit = (1<<19), 556 .ctrlbit = (1<<19),
574}; 557};
575 558
559static struct clk clk_pdma0 = {
560 .name = "pdma0",
561 .parent = &clk_hclk_psys.clk,
562 .enable = s5pv210_clk_ip0_ctrl,
563 .ctrlbit = (1 << 3),
564};
565
566static struct clk clk_pdma1 = {
567 .name = "pdma1",
568 .parent = &clk_hclk_psys.clk,
569 .enable = s5pv210_clk_ip0_ctrl,
570 .ctrlbit = (1 << 4),
571};
572
576static struct clk *clkset_uart_list[] = { 573static struct clk *clkset_uart_list[] = {
577 [6] = &clk_mout_mpll.clk, 574 [6] = &clk_mout_mpll.clk,
578 [7] = &clk_mout_epll.clk, 575 [7] = &clk_mout_epll.clk,
@@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = {
1075 &clk_hsmmc1, 1072 &clk_hsmmc1,
1076 &clk_hsmmc2, 1073 &clk_hsmmc2,
1077 &clk_hsmmc3, 1074 &clk_hsmmc3,
1075 &clk_pdma0,
1076 &clk_pdma1,
1078}; 1077};
1079 1078
1080/* Clock initialisation code */ 1079/* Clock initialisation code */
@@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = {
1333 CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), 1332 CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
1334 CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), 1333 CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
1335 CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), 1334 CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
1335 CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0),
1336 CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1),
1336}; 1337};
1337 1338
1338void __init s5pv210_register_clocks(void) 1339void __init s5pv210_register_clocks(void)
@@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void)
1361 for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) 1362 for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
1362 s3c_disable_clocks(clk_cdev[ptr], 1); 1363 s3c_disable_clocks(clk_cdev[ptr], 1);
1363 1364
1364 s3c24xx_register_clock(&dummy_apb_pclk);
1365 s3c_pwmclk_init(); 1365 s3c_pwmclk_init();
1366} 1366}
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 3a38f7b34b94..e373de44a8b6 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -845,7 +845,7 @@ static struct fimc_source_info goni_camera_sensors[] = {
845 .mux_id = 0, 845 .mux_id = 0,
846 .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | 846 .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
847 V4L2_MBUS_VSYNC_ACTIVE_LOW, 847 V4L2_MBUS_VSYNC_ACTIVE_LOW,
848 .bus_type = FIMC_BUS_TYPE_ITU_601, 848 .fimc_bus_type = FIMC_BUS_TYPE_ITU_601,
849 .board_info = &noon010pc30_board_info, 849 .board_info = &noon010pc30_board_info,
850 .i2c_bus_num = 0, 850 .i2c_bus_num = 0,
851 .clk_frequency = 16000000UL, 851 .clk_frequency = 16000000UL,
diff --git a/arch/arm/mach-s5pv210/setup-usb-phy.c b/arch/arm/mach-s5pv210/setup-usb-phy.c
index 356a0900af03..b2ee5333f89c 100644
--- a/arch/arm/mach-s5pv210/setup-usb-phy.c
+++ b/arch/arm/mach-s5pv210/setup-usb-phy.c
@@ -80,7 +80,7 @@ static int s5pv210_usb_otgphy_exit(struct platform_device *pdev)
80 80
81int s5p_usb_phy_init(struct platform_device *pdev, int type) 81int s5p_usb_phy_init(struct platform_device *pdev, int type)
82{ 82{
83 if (type == S5P_USB_PHY_DEVICE) 83 if (type == USB_PHY_TYPE_DEVICE)
84 return s5pv210_usb_otgphy_init(pdev); 84 return s5pv210_usb_otgphy_init(pdev);
85 85
86 return -EINVAL; 86 return -EINVAL;
@@ -88,7 +88,7 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type)
88 88
89int s5p_usb_phy_exit(struct platform_device *pdev, int type) 89int s5p_usb_phy_exit(struct platform_device *pdev, int type)
90{ 90{
91 if (type == S5P_USB_PHY_DEVICE) 91 if (type == USB_PHY_TYPE_DEVICE)
92 return s5pv210_usb_otgphy_exit(pdev); 92 return s5pv210_usb_otgphy_exit(pdev);
93 93
94 return -EINVAL; 94 return -EINVAL;
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index b63dec848195..153555724988 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -10,6 +10,7 @@
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/serial_8250.h> 11#include <linux/serial_8250.h>
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/cpu.h>
13 14
14#include <asm/setup.h> 15#include <asm/setup.h>
15#include <asm/mach-types.h> 16#include <asm/mach-types.h>
@@ -130,7 +131,7 @@ static void __init shark_timer_init(void)
130 131
131static void shark_init_early(void) 132static void shark_init_early(void)
132{ 133{
133 disable_hlt(); 134 cpu_idle_poll_ctrl(true);
134} 135}
135 136
136MACHINE_START(SHARK, "Shark") 137MACHINE_START(SHARK, "Shark")
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index f2ec0777cfbe..ff8b7ba9b93c 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -169,7 +169,7 @@ static int usbhsf_get_id(struct platform_device *pdev)
169 return USBHS_GADGET; 169 return USBHS_GADGET;
170} 170}
171 171
172static void usbhsf_power_ctrl(struct platform_device *pdev, 172static int usbhsf_power_ctrl(struct platform_device *pdev,
173 void __iomem *base, int enable) 173 void __iomem *base, int enable)
174{ 174{
175 struct usbhsf_private *priv = usbhsf_get_priv(pdev); 175 struct usbhsf_private *priv = usbhsf_get_priv(pdev);
@@ -223,6 +223,8 @@ static void usbhsf_power_ctrl(struct platform_device *pdev,
223 clk_disable(priv->pci); /* usb work around */ 223 clk_disable(priv->pci); /* usb work around */
224 clk_disable(priv->usb24); /* usb work around */ 224 clk_disable(priv->usb24); /* usb work around */
225 } 225 }
226
227 return 0;
226} 228}
227 229
228static int usbhsf_get_vbus(struct platform_device *pdev) 230static int usbhsf_get_vbus(struct platform_device *pdev)
@@ -239,7 +241,7 @@ static irqreturn_t usbhsf_interrupt(int irq, void *data)
239 return IRQ_HANDLED; 241 return IRQ_HANDLED;
240} 242}
241 243
242static void usbhsf_hardware_exit(struct platform_device *pdev) 244static int usbhsf_hardware_exit(struct platform_device *pdev)
243{ 245{
244 struct usbhsf_private *priv = usbhsf_get_priv(pdev); 246 struct usbhsf_private *priv = usbhsf_get_priv(pdev);
245 247
@@ -264,6 +266,8 @@ static void usbhsf_hardware_exit(struct platform_device *pdev)
264 priv->usbh_base = NULL; 266 priv->usbh_base = NULL;
265 267
266 free_irq(IRQ7, pdev); 268 free_irq(IRQ7, pdev);
269
270 return 0;
267} 271}
268 272
269static int usbhsf_hardware_init(struct platform_device *pdev) 273static int usbhsf_hardware_init(struct platform_device *pdev)
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index 7f3a6b7e7b7c..a385f570bbfc 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -155,12 +155,14 @@ static int usbhs_get_vbus(struct platform_device *pdev)
155 return !((1 << 7) & __raw_readw(priv->cr2)); 155 return !((1 << 7) & __raw_readw(priv->cr2));
156} 156}
157 157
158static void usbhs_phy_reset(struct platform_device *pdev) 158static int usbhs_phy_reset(struct platform_device *pdev)
159{ 159{
160 struct usbhs_private *priv = usbhs_get_priv(pdev); 160 struct usbhs_private *priv = usbhs_get_priv(pdev);
161 161
162 /* init phy */ 162 /* init phy */
163 __raw_writew(0x8a0a, priv->cr2); 163 __raw_writew(0x8a0a, priv->cr2);
164
165 return 0;
164} 166}
165 167
166static int usbhs_get_id(struct platform_device *pdev) 168static int usbhs_get_id(struct platform_device *pdev)
@@ -202,7 +204,7 @@ static int usbhs_hardware_init(struct platform_device *pdev)
202 return 0; 204 return 0;
203} 205}
204 206
205static void usbhs_hardware_exit(struct platform_device *pdev) 207static int usbhs_hardware_exit(struct platform_device *pdev)
206{ 208{
207 struct usbhs_private *priv = usbhs_get_priv(pdev); 209 struct usbhs_private *priv = usbhs_get_priv(pdev);
208 210
@@ -210,6 +212,8 @@ static void usbhs_hardware_exit(struct platform_device *pdev)
210 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy); 212 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy);
211 213
212 free_irq(IRQ15, pdev); 214 free_irq(IRQ15, pdev);
215
216 return 0;
213} 217}
214 218
215static u32 usbhs_pipe_cfg[] = { 219static u32 usbhs_pipe_cfg[] = {
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index db968a585ff0..979237c18dad 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -596,12 +596,14 @@ static int usbhs_get_vbus(struct platform_device *pdev)
596 return usbhs_is_connected(usbhs_get_priv(pdev)); 596 return usbhs_is_connected(usbhs_get_priv(pdev));
597} 597}
598 598
599static void usbhs_phy_reset(struct platform_device *pdev) 599static int usbhs_phy_reset(struct platform_device *pdev)
600{ 600{
601 struct usbhs_private *priv = usbhs_get_priv(pdev); 601 struct usbhs_private *priv = usbhs_get_priv(pdev);
602 602
603 /* init phy */ 603 /* init phy */
604 __raw_writew(0x8a0a, priv->usbcrcaddr); 604 __raw_writew(0x8a0a, priv->usbcrcaddr);
605
606 return 0;
605} 607}
606 608
607static int usbhs0_get_id(struct platform_device *pdev) 609static int usbhs0_get_id(struct platform_device *pdev)
@@ -628,11 +630,13 @@ static int usbhs0_hardware_init(struct platform_device *pdev)
628 return 0; 630 return 0;
629} 631}
630 632
631static void usbhs0_hardware_exit(struct platform_device *pdev) 633static int usbhs0_hardware_exit(struct platform_device *pdev)
632{ 634{
633 struct usbhs_private *priv = usbhs_get_priv(pdev); 635 struct usbhs_private *priv = usbhs_get_priv(pdev);
634 636
635 cancel_delayed_work_sync(&priv->work); 637 cancel_delayed_work_sync(&priv->work);
638
639 return 0;
636} 640}
637 641
638static struct usbhs_private usbhs0_private = { 642static struct usbhs_private usbhs0_private = {
@@ -735,7 +739,7 @@ static int usbhs1_hardware_init(struct platform_device *pdev)
735 return 0; 739 return 0;
736} 740}
737 741
738static void usbhs1_hardware_exit(struct platform_device *pdev) 742static int usbhs1_hardware_exit(struct platform_device *pdev)
739{ 743{
740 struct usbhs_private *priv = usbhs_get_priv(pdev); 744 struct usbhs_private *priv = usbhs_get_priv(pdev);
741 745
@@ -743,6 +747,8 @@ static void usbhs1_hardware_exit(struct platform_device *pdev)
743 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr); 747 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
744 748
745 free_irq(IRQ8, pdev); 749 free_irq(IRQ8, pdev);
750
751 return 0;
746} 752}
747 753
748static int usbhs1_get_id(struct platform_device *pdev) 754static int usbhs1_get_id(struct platform_device *pdev)
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index cdcb799e802f..fec49ebc359a 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -32,6 +32,7 @@
32#include <linux/smsc911x.h> 32#include <linux/smsc911x.h>
33#include <linux/spi/spi.h> 33#include <linux/spi/spi.h>
34#include <linux/spi/sh_hspi.h> 34#include <linux/spi/sh_hspi.h>
35#include <linux/mmc/host.h>
35#include <linux/mmc/sh_mobile_sdhi.h> 36#include <linux/mmc/sh_mobile_sdhi.h>
36#include <linux/mfd/tmio.h> 37#include <linux/mfd/tmio.h>
37#include <linux/usb/otg.h> 38#include <linux/usb/otg.h>
diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c
index 47d83f7a70b6..5d92b5dd486b 100644
--- a/arch/arm/mach-shmobile/suspend.c
+++ b/arch/arm/mach-shmobile/suspend.c
@@ -12,6 +12,8 @@
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/cpu.h>
16
15#include <asm/io.h> 17#include <asm/io.h>
16#include <asm/system_misc.h> 18#include <asm/system_misc.h>
17 19
@@ -23,13 +25,13 @@ static int shmobile_suspend_default_enter(suspend_state_t suspend_state)
23 25
24static int shmobile_suspend_begin(suspend_state_t state) 26static int shmobile_suspend_begin(suspend_state_t state)
25{ 27{
26 disable_hlt(); 28 cpu_idle_poll_ctrl(true);
27 return 0; 29 return 0;
28} 30}
29 31
30static void shmobile_suspend_end(void) 32static void shmobile_suspend_end(void)
31{ 33{
32 enable_hlt(); 34 cpu_idle_poll_ctrl(false);
33} 35}
34 36
35struct platform_suspend_ops shmobile_suspend_ops = { 37struct platform_suspend_ops shmobile_suspend_ops = {
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index f9d754f90c59..d2b3937c4014 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -14,7 +14,7 @@
14#define pr_fmt(fmt) "SPEAr3xx: " fmt 14#define pr_fmt(fmt) "SPEAr3xx: " fmt
15 15
16#include <linux/amba/pl022.h> 16#include <linux/amba/pl022.h>
17#include <linux/amba/pl08x.h> 17#include <linux/amba/pl080.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <plat/pl080.h> 19#include <plat/pl080.h>
20#include <mach/generic.h> 20#include <mach/generic.h>
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index d1c4893894ce..dbc653ea851c 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -18,8 +18,8 @@ config ARCH_TEGRA_2x_SOC
18 select PL310_ERRATA_727915 if CACHE_L2X0 18 select PL310_ERRATA_727915 if CACHE_L2X0
19 select PL310_ERRATA_769419 if CACHE_L2X0 19 select PL310_ERRATA_769419 if CACHE_L2X0
20 select USB_ARCH_HAS_EHCI if USB_SUPPORT 20 select USB_ARCH_HAS_EHCI if USB_SUPPORT
21 select USB_ULPI if USB 21 select USB_ULPI if USB_PHY
22 select USB_ULPI_VIEWPORT if USB_SUPPORT 22 select USB_ULPI_VIEWPORT if USB_PHY
23 help 23 help
24 Support for NVIDIA Tegra AP20 and T20 processors, based on the 24 Support for NVIDIA Tegra AP20 and T20 processors, based on the
25 ARM CortexA9MP CPU and the ARM PL310 L2 cache controller 25 ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
@@ -37,8 +37,8 @@ config ARCH_TEGRA_3x_SOC
37 select PINCTRL_TEGRA30 37 select PINCTRL_TEGRA30
38 select PL310_ERRATA_769419 if CACHE_L2X0 38 select PL310_ERRATA_769419 if CACHE_L2X0
39 select USB_ARCH_HAS_EHCI if USB_SUPPORT 39 select USB_ARCH_HAS_EHCI if USB_SUPPORT
40 select USB_ULPI if USB 40 select USB_ULPI if USB_PHY
41 select USB_ULPI_VIEWPORT if USB_SUPPORT 41 select USB_ULPI_VIEWPORT if USB_PHY
42 help 42 help
43 Support for NVIDIA Tegra T30 processor family, based on the 43 Support for NVIDIA Tegra T30 processor family, based on the
44 ARM CortexA9MP CPU and the ARM PL310 L2 cache controller 44 ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 2a17bc506cff..ff3c9f016591 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * Authors: Sundar Iyer <sundar.iyer@stericsson.com> 6 * Authors: Sundar Iyer <sundar.iyer@stericsson.com>
7 * Bengt Jonsson <bengt.g.jonsson@stericsson.com> 7 * Bengt Jonsson <bengt.g.jonsson@stericsson.com>
8 * Daniel Willerud <daniel.willerud@stericsson.com>
8 * 9 *
9 * MOP500 board specific initialization for regulators 10 * MOP500 board specific initialization for regulators
10 */ 11 */
@@ -12,6 +13,7 @@
12#include <linux/regulator/machine.h> 13#include <linux/regulator/machine.h>
13#include <linux/regulator/ab8500.h> 14#include <linux/regulator/ab8500.h>
14#include "board-mop500-regulators.h" 15#include "board-mop500-regulators.h"
16#include "id.h"
15 17
16static struct regulator_consumer_supply gpio_en_3v3_consumers[] = { 18static struct regulator_consumer_supply gpio_en_3v3_consumers[] = {
17 REGULATOR_SUPPLY("vdd33a", "smsc911x.0"), 19 REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
@@ -53,21 +55,37 @@ struct regulator_init_data tps61052_regulator = {
53}; 55};
54 56
55static struct regulator_consumer_supply ab8500_vaux1_consumers[] = { 57static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
56 /* External displays, connector on board 2v5 power supply */ 58 /* Main display, u8500 R3 uib */
57 REGULATOR_SUPPLY("vaux12v5", "mcde.0"), 59 REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
60 /* Main display, u8500 uib and ST uib */
61 REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"),
62 /* Secondary display, ST uib */
63 REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"),
58 /* SFH7741 proximity sensor */ 64 /* SFH7741 proximity sensor */
59 REGULATOR_SUPPLY("vcc", "gpio-keys.0"), 65 REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
60 /* BH1780GLS ambient light sensor */ 66 /* BH1780GLS ambient light sensor */
61 REGULATOR_SUPPLY("vcc", "2-0029"), 67 REGULATOR_SUPPLY("vcc", "2-0029"),
62 /* lsm303dlh accelerometer */ 68 /* lsm303dlh accelerometer */
63 REGULATOR_SUPPLY("vdd", "3-0018"), 69 REGULATOR_SUPPLY("vdd", "2-0018"),
70 /* lsm303dlhc accelerometer */
71 REGULATOR_SUPPLY("vdd", "2-0019"),
64 /* lsm303dlh magnetometer */ 72 /* lsm303dlh magnetometer */
65 REGULATOR_SUPPLY("vdd", "3-001e"), 73 REGULATOR_SUPPLY("vdd", "2-001e"),
66 /* Rohm BU21013 Touchscreen devices */ 74 /* Rohm BU21013 Touchscreen devices */
67 REGULATOR_SUPPLY("avdd", "3-005c"), 75 REGULATOR_SUPPLY("avdd", "3-005c"),
68 REGULATOR_SUPPLY("avdd", "3-005d"), 76 REGULATOR_SUPPLY("avdd", "3-005d"),
69 /* Synaptics RMI4 Touchscreen device */ 77 /* Synaptics RMI4 Touchscreen device */
70 REGULATOR_SUPPLY("vdd", "3-004b"), 78 REGULATOR_SUPPLY("vdd", "3-004b"),
79 /* L3G4200D Gyroscope device */
80 REGULATOR_SUPPLY("vdd", "2-0068"),
81 /* Ambient light sensor device */
82 REGULATOR_SUPPLY("vdd", "3-0029"),
83 /* Pressure sensor device */
84 REGULATOR_SUPPLY("vdd", "2-005c"),
85 /* Cypress TrueTouch Touchscreen device */
86 REGULATOR_SUPPLY("vcpin", "spi8.0"),
87 /* Camera device */
88 REGULATOR_SUPPLY("vaux12v5", "mmio_camera"),
71}; 89};
72 90
73static struct regulator_consumer_supply ab8500_vaux2_consumers[] = { 91static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
@@ -75,18 +93,50 @@ static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
75 REGULATOR_SUPPLY("vmmc", "sdi4"), 93 REGULATOR_SUPPLY("vmmc", "sdi4"),
76 /* AB8500 audio codec */ 94 /* AB8500 audio codec */
77 REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"), 95 REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
96 /* AB8500 accessory detect 1 */
97 REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"),
98 /* AB8500 Tv-out device */
99 REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"),
100 /* AV8100 HDMI device */
101 REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"),
78}; 102};
79 103
80static struct regulator_consumer_supply ab8500_vaux3_consumers[] = { 104static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
105 REGULATOR_SUPPLY("v-SD-STM", "stm"),
81 /* External MMC slot power */ 106 /* External MMC slot power */
82 REGULATOR_SUPPLY("vmmc", "sdi0"), 107 REGULATOR_SUPPLY("vmmc", "sdi0"),
83}; 108};
84 109
110static struct regulator_consumer_supply ab8505_vaux4_consumers[] = {
111};
112
113static struct regulator_consumer_supply ab8505_vaux5_consumers[] = {
114};
115
116static struct regulator_consumer_supply ab8505_vaux6_consumers[] = {
117};
118
119static struct regulator_consumer_supply ab8505_vaux8_consumers[] = {
120 /* AB8500 audio codec device */
121 REGULATOR_SUPPLY("v-aux8", NULL),
122};
123
124static struct regulator_consumer_supply ab8505_vadc_consumers[] = {
125 /* Internal general-purpose ADC */
126 REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
127 /* ADC for charger */
128 REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
129};
130
85static struct regulator_consumer_supply ab8500_vtvout_consumers[] = { 131static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
86 /* TV-out DENC supply */ 132 /* TV-out DENC supply */
87 REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"), 133 REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
88 /* Internal general-purpose ADC */ 134 /* Internal general-purpose ADC */
89 REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"), 135 REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
136 /* ADC for charger */
137 REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
138 /* AB8500 Tv-out device */
139 REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"),
90}; 140};
91 141
92static struct regulator_consumer_supply ab8500_vaud_consumers[] = { 142static struct regulator_consumer_supply ab8500_vaud_consumers[] = {
@@ -114,77 +164,90 @@ static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
114 REGULATOR_SUPPLY("v-intcore", NULL), 164 REGULATOR_SUPPLY("v-intcore", NULL),
115 /* USB Transceiver */ 165 /* USB Transceiver */
116 REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"), 166 REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"),
167 /* Handled by abx500 clk driver */
168 REGULATOR_SUPPLY("v-intcore", "abx500-clk.0"),
169};
170
171static struct regulator_consumer_supply ab8505_usb_consumers[] = {
172 /* HS USB OTG physical interface */
173 REGULATOR_SUPPLY("v-ape", NULL),
117}; 174};
118 175
119static struct regulator_consumer_supply ab8500_vana_consumers[] = { 176static struct regulator_consumer_supply ab8500_vana_consumers[] = {
120 /* External displays, connector on board, 1v8 power supply */ 177 /* DB8500 DSI */
121 REGULATOR_SUPPLY("vsmps2", "mcde.0"), 178 REGULATOR_SUPPLY("vdddsi1v2", "mcde"),
179 REGULATOR_SUPPLY("vdddsi1v2", "b2r2_core"),
180 REGULATOR_SUPPLY("vdddsi1v2", "b2r2_1_core"),
181 REGULATOR_SUPPLY("vdddsi1v2", "dsilink.0"),
182 REGULATOR_SUPPLY("vdddsi1v2", "dsilink.1"),
183 REGULATOR_SUPPLY("vdddsi1v2", "dsilink.2"),
184 /* DB8500 CSI */
185 REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"),
122}; 186};
123 187
124/* ab8500 regulator register initialization */ 188/* ab8500 regulator register initialization */
125struct ab8500_regulator_reg_init 189static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
126ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
127 /* 190 /*
128 * VanaRequestCtrl = HP/LP depending on VxRequest 191 * VanaRequestCtrl = HP/LP depending on VxRequest
129 * VextSupply1RequestCtrl = HP/LP depending on VxRequest 192 * VextSupply1RequestCtrl = HP/LP depending on VxRequest
130 */ 193 */
131 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0x00), 194 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00),
132 /* 195 /*
133 * VextSupply2RequestCtrl = HP/LP depending on VxRequest 196 * VextSupply2RequestCtrl = HP/LP depending on VxRequest
134 * VextSupply3RequestCtrl = HP/LP depending on VxRequest 197 * VextSupply3RequestCtrl = HP/LP depending on VxRequest
135 * Vaux1RequestCtrl = HP/LP depending on VxRequest 198 * Vaux1RequestCtrl = HP/LP depending on VxRequest
136 * Vaux2RequestCtrl = HP/LP depending on VxRequest 199 * Vaux2RequestCtrl = HP/LP depending on VxRequest
137 */ 200 */
138 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0x00), 201 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00),
139 /* 202 /*
140 * Vaux3RequestCtrl = HP/LP depending on VxRequest 203 * Vaux3RequestCtrl = HP/LP depending on VxRequest
141 * SwHPReq = Control through SWValid disabled 204 * SwHPReq = Control through SWValid disabled
142 */ 205 */
143 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x00), 206 INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00),
144 /* 207 /*
145 * VanaSysClkReq1HPValid = disabled 208 * VanaSysClkReq1HPValid = disabled
146 * Vaux1SysClkReq1HPValid = disabled 209 * Vaux1SysClkReq1HPValid = disabled
147 * Vaux2SysClkReq1HPValid = disabled 210 * Vaux2SysClkReq1HPValid = disabled
148 * Vaux3SysClkReq1HPValid = disabled 211 * Vaux3SysClkReq1HPValid = disabled
149 */ 212 */
150 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0x00), 213 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
151 /* 214 /*
152 * VextSupply1SysClkReq1HPValid = disabled 215 * VextSupply1SysClkReq1HPValid = disabled
153 * VextSupply2SysClkReq1HPValid = disabled 216 * VextSupply2SysClkReq1HPValid = disabled
154 * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled 217 * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
155 */ 218 */
156 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x40), 219 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40),
157 /* 220 /*
158 * VanaHwHPReq1Valid = disabled 221 * VanaHwHPReq1Valid = disabled
159 * Vaux1HwHPreq1Valid = disabled 222 * Vaux1HwHPreq1Valid = disabled
160 * Vaux2HwHPReq1Valid = disabled 223 * Vaux2HwHPReq1Valid = disabled
161 * Vaux3HwHPReqValid = disabled 224 * Vaux3HwHPReqValid = disabled
162 */ 225 */
163 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0x00), 226 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00),
164 /* 227 /*
165 * VextSupply1HwHPReq1Valid = disabled 228 * VextSupply1HwHPReq1Valid = disabled
166 * VextSupply2HwHPReq1Valid = disabled 229 * VextSupply2HwHPReq1Valid = disabled
167 * VextSupply3HwHPReq1Valid = disabled 230 * VextSupply3HwHPReq1Valid = disabled
168 */ 231 */
169 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x00), 232 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00),
170 /* 233 /*
171 * VanaHwHPReq2Valid = disabled 234 * VanaHwHPReq2Valid = disabled
172 * Vaux1HwHPReq2Valid = disabled 235 * Vaux1HwHPReq2Valid = disabled
173 * Vaux2HwHPReq2Valid = disabled 236 * Vaux2HwHPReq2Valid = disabled
174 * Vaux3HwHPReq2Valid = disabled 237 * Vaux3HwHPReq2Valid = disabled
175 */ 238 */
176 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0x00), 239 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00),
177 /* 240 /*
178 * VextSupply1HwHPReq2Valid = disabled 241 * VextSupply1HwHPReq2Valid = disabled
179 * VextSupply2HwHPReq2Valid = disabled 242 * VextSupply2HwHPReq2Valid = disabled
180 * VextSupply3HwHPReq2Valid = HWReq2 controlled 243 * VextSupply3HwHPReq2Valid = HWReq2 controlled
181 */ 244 */
182 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x04), 245 INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04),
183 /* 246 /*
184 * VanaSwHPReqValid = disabled 247 * VanaSwHPReqValid = disabled
185 * Vaux1SwHPReqValid = disabled 248 * Vaux1SwHPReqValid = disabled
186 */ 249 */
187 INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0x00), 250 INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00),
188 /* 251 /*
189 * Vaux2SwHPReqValid = disabled 252 * Vaux2SwHPReqValid = disabled
190 * Vaux3SwHPReqValid = disabled 253 * Vaux3SwHPReqValid = disabled
@@ -192,7 +255,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
192 * VextSupply2SwHPReqValid = disabled 255 * VextSupply2SwHPReqValid = disabled
193 * VextSupply3SwHPReqValid = disabled 256 * VextSupply3SwHPReqValid = disabled
194 */ 257 */
195 INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x00), 258 INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00),
196 /* 259 /*
197 * SysClkReq2Valid1 = SysClkReq2 controlled 260 * SysClkReq2Valid1 = SysClkReq2 controlled
198 * SysClkReq3Valid1 = disabled 261 * SysClkReq3Valid1 = disabled
@@ -202,7 +265,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
202 * SysClkReq7Valid1 = disabled 265 * SysClkReq7Valid1 = disabled
203 * SysClkReq8Valid1 = disabled 266 * SysClkReq8Valid1 = disabled
204 */ 267 */
205 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0x2a), 268 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a),
206 /* 269 /*
207 * SysClkReq2Valid2 = disabled 270 * SysClkReq2Valid2 = disabled
208 * SysClkReq3Valid2 = disabled 271 * SysClkReq3Valid2 = disabled
@@ -212,7 +275,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
212 * SysClkReq7Valid2 = disabled 275 * SysClkReq7Valid2 = disabled
213 * SysClkReq8Valid2 = disabled 276 * SysClkReq8Valid2 = disabled
214 */ 277 */
215 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0x20), 278 INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20),
216 /* 279 /*
217 * VTVoutEna = disabled 280 * VTVoutEna = disabled
218 * Vintcore12Ena = disabled 281 * Vintcore12Ena = disabled
@@ -220,66 +283,62 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
220 * Vintcore12LP = inactive (HP) 283 * Vintcore12LP = inactive (HP)
221 * VTVoutLP = inactive (HP) 284 * VTVoutLP = inactive (HP)
222 */ 285 */
223 INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0x10), 286 INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10),
224 /* 287 /*
225 * VaudioEna = disabled 288 * VaudioEna = disabled
226 * VdmicEna = disabled 289 * VdmicEna = disabled
227 * Vamic1Ena = disabled 290 * Vamic1Ena = disabled
228 * Vamic2Ena = disabled 291 * Vamic2Ena = disabled
229 */ 292 */
230 INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x00), 293 INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00),
231 /* 294 /*
232 * Vamic1_dzout = high-Z when Vamic1 is disabled 295 * Vamic1_dzout = high-Z when Vamic1 is disabled
233 * Vamic2_dzout = high-Z when Vamic2 is disabled 296 * Vamic2_dzout = high-Z when Vamic2 is disabled
234 */ 297 */
235 INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x00), 298 INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00),
236 /* 299 /*
237 * VPll = Hw controlled 300 * VPll = Hw controlled (NOTE! PRCMU bits)
238 * VanaRegu = force off 301 * VanaRegu = force off
239 */ 302 */
240 INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x02), 303 INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02),
241 /* 304 /*
242 * VrefDDREna = disabled 305 * VrefDDREna = disabled
243 * VrefDDRSleepMode = inactive (no pulldown) 306 * VrefDDRSleepMode = inactive (no pulldown)
244 */ 307 */
245 INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x00), 308 INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00),
246 /* 309 /*
247 * VextSupply1Regu = HW control 310 * VextSupply1Regu = force LP
248 * VextSupply2Regu = HW control 311 * VextSupply2Regu = force OFF
249 * VextSupply3Regu = HW control 312 * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP)
250 * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0 313 * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
251 * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0 314 * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
252 */ 315 */
253 INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0x2a), 316 INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13),
254 /* 317 /*
255 * Vaux1Regu = force HP 318 * Vaux1Regu = force HP
256 * Vaux2Regu = force off 319 * Vaux2Regu = force off
257 */ 320 */
258 INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x01), 321 INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01),
259 /* 322 /*
260 * Vaux3regu = force off 323 * Vaux3Regu = force off
261 */ 324 */
262 INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x00), 325 INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00),
263 /* 326 /*
264 * Vsmps1 = 1.15V 327 * Vaux1Sel = 2.8 V
265 */ 328 */
266 INIT_REGULATOR_REGISTER(AB8500_VSMPS1SEL1, 0x24), 329 INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C),
267 /*
268 * Vaux1Sel = 2.5 V
269 */
270 INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x08),
271 /* 330 /*
272 * Vaux2Sel = 2.9 V 331 * Vaux2Sel = 2.9 V
273 */ 332 */
274 INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0d), 333 INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d),
275 /* 334 /*
276 * Vaux3Sel = 2.91 V 335 * Vaux3Sel = 2.91 V
277 */ 336 */
278 INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07), 337 INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07),
279 /* 338 /*
280 * VextSupply12LP = disabled (no LP) 339 * VextSupply12LP = disabled (no LP)
281 */ 340 */
282 INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x00), 341 INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00),
283 /* 342 /*
284 * Vaux1Disch = short discharge time 343 * Vaux1Disch = short discharge time
285 * Vaux2Disch = short discharge time 344 * Vaux2Disch = short discharge time
@@ -288,33 +347,26 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
288 * VTVoutDisch = short discharge time 347 * VTVoutDisch = short discharge time
289 * VaudioDisch = short discharge time 348 * VaudioDisch = short discharge time
290 */ 349 */
291 INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0x00), 350 INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00),
292 /* 351 /*
293 * VanaDisch = short discharge time 352 * VanaDisch = short discharge time
294 * VdmicPullDownEna = pulldown disabled when Vdmic is disabled 353 * VdmicPullDownEna = pulldown disabled when Vdmic is disabled
295 * VdmicDisch = short discharge time 354 * VdmicDisch = short discharge time
296 */ 355 */
297 INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x00), 356 INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00),
298}; 357};
299 358
300/* AB8500 regulators */ 359/* AB8500 regulators */
301struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { 360static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
302 /* supplies to the display/camera */ 361 /* supplies to the display/camera */
303 [AB8500_LDO_AUX1] = { 362 [AB8500_LDO_AUX1] = {
304 .constraints = { 363 .constraints = {
305 .name = "V-DISPLAY", 364 .name = "V-DISPLAY",
306 .min_uV = 2500000, 365 .min_uV = 2800000,
307 .max_uV = 2900000, 366 .max_uV = 3300000,
308 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | 367 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
309 REGULATOR_CHANGE_STATUS, 368 REGULATOR_CHANGE_STATUS,
310 .boot_on = 1, /* display is on at boot */ 369 .boot_on = 1, /* display is on at boot */
311 /*
312 * This voltage cannot be disabled right now because
313 * it is somehow affecting the external MMC
314 * functionality, though that typically will use
315 * AUX3.
316 */
317 .always_on = 1,
318 }, 370 },
319 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers), 371 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
320 .consumer_supplies = ab8500_vaux1_consumers, 372 .consumer_supplies = ab8500_vaux1_consumers,
@@ -326,7 +378,10 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
326 .min_uV = 1100000, 378 .min_uV = 1100000,
327 .max_uV = 3300000, 379 .max_uV = 3300000,
328 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | 380 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
329 REGULATOR_CHANGE_STATUS, 381 REGULATOR_CHANGE_STATUS |
382 REGULATOR_CHANGE_MODE,
383 .valid_modes_mask = REGULATOR_MODE_NORMAL |
384 REGULATOR_MODE_IDLE,
330 }, 385 },
331 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers), 386 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
332 .consumer_supplies = ab8500_vaux2_consumers, 387 .consumer_supplies = ab8500_vaux2_consumers,
@@ -338,7 +393,10 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
338 .min_uV = 1100000, 393 .min_uV = 1100000,
339 .max_uV = 3300000, 394 .max_uV = 3300000,
340 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | 395 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
341 REGULATOR_CHANGE_STATUS, 396 REGULATOR_CHANGE_STATUS |
397 REGULATOR_CHANGE_MODE,
398 .valid_modes_mask = REGULATOR_MODE_NORMAL |
399 REGULATOR_MODE_IDLE,
342 }, 400 },
343 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers), 401 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
344 .consumer_supplies = ab8500_vaux3_consumers, 402 .consumer_supplies = ab8500_vaux3_consumers,
@@ -392,18 +450,614 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
392 [AB8500_LDO_INTCORE] = { 450 [AB8500_LDO_INTCORE] = {
393 .constraints = { 451 .constraints = {
394 .name = "V-INTCORE", 452 .name = "V-INTCORE",
395 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 453 .min_uV = 1250000,
454 .max_uV = 1350000,
455 .input_uV = 1800000,
456 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
457 REGULATOR_CHANGE_STATUS |
458 REGULATOR_CHANGE_MODE |
459 REGULATOR_CHANGE_DRMS,
460 .valid_modes_mask = REGULATOR_MODE_NORMAL |
461 REGULATOR_MODE_IDLE,
396 }, 462 },
397 .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers), 463 .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
398 .consumer_supplies = ab8500_vintcore_consumers, 464 .consumer_supplies = ab8500_vintcore_consumers,
399 }, 465 },
400 /* supply for U8500 CSI/DSI, VANA LDO */ 466 /* supply for U8500 CSI-DSI, VANA LDO */
401 [AB8500_LDO_ANA] = { 467 [AB8500_LDO_ANA] = {
402 .constraints = { 468 .constraints = {
403 .name = "V-CSI/DSI", 469 .name = "V-CSI-DSI",
404 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 470 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
405 }, 471 },
406 .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers), 472 .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
407 .consumer_supplies = ab8500_vana_consumers, 473 .consumer_supplies = ab8500_vana_consumers,
408 }, 474 },
409}; 475};
476
477/* supply for VextSupply3 */
478static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = {
479 /* SIM supply for 3 V SIM cards */
480 REGULATOR_SUPPLY("vinvsim", "sim-detect.0"),
481};
482
483/* extended configuration for VextSupply2, only used for HREFP_V20 boards */
484static struct ab8500_ext_regulator_cfg ab8500_ext_supply2 = {
485 .hwreq = true,
486};
487
488/*
489 * AB8500 external regulators
490 */
491static struct regulator_init_data ab8500_ext_regulators[] = {
492 /* fixed Vbat supplies VSMPS1_EXT_1V8 */
493 [AB8500_EXT_SUPPLY1] = {
494 .constraints = {
495 .name = "ab8500-ext-supply1",
496 .min_uV = 1800000,
497 .max_uV = 1800000,
498 .initial_mode = REGULATOR_MODE_IDLE,
499 .boot_on = 1,
500 .always_on = 1,
501 },
502 },
503 /* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */
504 [AB8500_EXT_SUPPLY2] = {
505 .constraints = {
506 .name = "ab8500-ext-supply2",
507 .min_uV = 1360000,
508 .max_uV = 1360000,
509 },
510 },
511 /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */
512 [AB8500_EXT_SUPPLY3] = {
513 .constraints = {
514 .name = "ab8500-ext-supply3",
515 .min_uV = 3400000,
516 .max_uV = 3400000,
517 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
518 .boot_on = 1,
519 },
520 .num_consumer_supplies =
521 ARRAY_SIZE(ab8500_ext_supply3_consumers),
522 .consumer_supplies = ab8500_ext_supply3_consumers,
523 },
524};
525
526/* ab8505 regulator register initialization */
527static struct ab8500_regulator_reg_init ab8505_reg_init[] = {
528 /*
529 * VarmRequestCtrl
530 * VsmpsCRequestCtrl
531 * VsmpsARequestCtrl
532 * VsmpsBRequestCtrl
533 */
534 INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL1, 0x00, 0x00),
535 /*
536 * VsafeRequestCtrl
537 * VpllRequestCtrl
538 * VanaRequestCtrl = HP/LP depending on VxRequest
539 */
540 INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL2, 0x30, 0x00),
541 /*
542 * Vaux1RequestCtrl = HP/LP depending on VxRequest
543 * Vaux2RequestCtrl = HP/LP depending on VxRequest
544 */
545 INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL3, 0xf0, 0x00),
546 /*
547 * Vaux3RequestCtrl = HP/LP depending on VxRequest
548 * SwHPReq = Control through SWValid disabled
549 */
550 INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL4, 0x07, 0x00),
551 /*
552 * VsmpsASysClkReq1HPValid
553 * VsmpsBSysClkReq1HPValid
554 * VsafeSysClkReq1HPValid
555 * VanaSysClkReq1HPValid = disabled
556 * VpllSysClkReq1HPValid
557 * Vaux1SysClkReq1HPValid = disabled
558 * Vaux2SysClkReq1HPValid = disabled
559 * Vaux3SysClkReq1HPValid = disabled
560 */
561 INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
562 /*
563 * VsmpsCSysClkReq1HPValid
564 * VarmSysClkReq1HPValid
565 * VbbSysClkReq1HPValid
566 * VsmpsMSysClkReq1HPValid
567 */
568 INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID2, 0x00, 0x00),
569 /*
570 * VsmpsAHwHPReq1Valid
571 * VsmpsBHwHPReq1Valid
572 * VsafeHwHPReq1Valid
573 * VanaHwHPReq1Valid = disabled
574 * VpllHwHPReq1Valid
575 * Vaux1HwHPreq1Valid = disabled
576 * Vaux2HwHPReq1Valid = disabled
577 * Vaux3HwHPReqValid = disabled
578 */
579 INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID1, 0xe8, 0x00),
580 /*
581 * VsmpsMHwHPReq1Valid
582 */
583 INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID2, 0x00, 0x00),
584 /*
585 * VsmpsAHwHPReq2Valid
586 * VsmpsBHwHPReq2Valid
587 * VsafeHwHPReq2Valid
588 * VanaHwHPReq2Valid = disabled
589 * VpllHwHPReq2Valid
590 * Vaux1HwHPReq2Valid = disabled
591 * Vaux2HwHPReq2Valid = disabled
592 * Vaux3HwHPReq2Valid = disabled
593 */
594 INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID1, 0xe8, 0x00),
595 /*
596 * VsmpsMHwHPReq2Valid
597 */
598 INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID2, 0x00, 0x00),
599 /**
600 * VsmpsCSwHPReqValid
601 * VarmSwHPReqValid
602 * VsmpsASwHPReqValid
603 * VsmpsBSwHPReqValid
604 * VsafeSwHPReqValid
605 * VanaSwHPReqValid
606 * VanaSwHPReqValid = disabled
607 * VpllSwHPReqValid
608 * Vaux1SwHPReqValid = disabled
609 */
610 INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID1, 0xa0, 0x00),
611 /*
612 * Vaux2SwHPReqValid = disabled
613 * Vaux3SwHPReqValid = disabled
614 * VsmpsMSwHPReqValid
615 */
616 INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID2, 0x03, 0x00),
617 /*
618 * SysClkReq2Valid1 = SysClkReq2 controlled
619 * SysClkReq3Valid1 = disabled
620 * SysClkReq4Valid1 = SysClkReq4 controlled
621 */
622 INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID1, 0x0e, 0x0a),
623 /*
624 * SysClkReq2Valid2 = disabled
625 * SysClkReq3Valid2 = disabled
626 * SysClkReq4Valid2 = disabled
627 */
628 INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID2, 0x0e, 0x00),
629 /*
630 * Vaux4SwHPReqValid
631 * Vaux4HwHPReq2Valid
632 * Vaux4HwHPReq1Valid
633 * Vaux4SysClkReq1HPValid
634 */
635 INIT_REGULATOR_REGISTER(AB8505_REGUVAUX4REQVALID, 0x00, 0x00),
636 /*
637 * VadcEna = disabled
638 * VintCore12Ena = disabled
639 * VintCore12Sel = 1.25 V
640 * VintCore12LP = inactive (HP)
641 * VadcLP = inactive (HP)
642 */
643 INIT_REGULATOR_REGISTER(AB8505_REGUMISC1, 0xfe, 0x10),
644 /*
645 * VaudioEna = disabled
646 * Vaux8Ena = disabled
647 * Vamic1Ena = disabled
648 * Vamic2Ena = disabled
649 */
650 INIT_REGULATOR_REGISTER(AB8505_VAUDIOSUPPLY, 0x1e, 0x00),
651 /*
652 * Vamic1_dzout = high-Z when Vamic1 is disabled
653 * Vamic2_dzout = high-Z when Vamic2 is disabled
654 */
655 INIT_REGULATOR_REGISTER(AB8505_REGUCTRL1VAMIC, 0x03, 0x00),
656 /*
657 * VsmpsARegu
658 * VsmpsASelCtrl
659 * VsmpsAAutoMode
660 * VsmpsAPWMMode
661 */
662 INIT_REGULATOR_REGISTER(AB8505_VSMPSAREGU, 0x00, 0x00),
663 /*
664 * VsmpsBRegu
665 * VsmpsBSelCtrl
666 * VsmpsBAutoMode
667 * VsmpsBPWMMode
668 */
669 INIT_REGULATOR_REGISTER(AB8505_VSMPSBREGU, 0x00, 0x00),
670 /*
671 * VsafeRegu
672 * VsafeSelCtrl
673 * VsafeAutoMode
674 * VsafePWMMode
675 */
676 INIT_REGULATOR_REGISTER(AB8505_VSAFEREGU, 0x00, 0x00),
677 /*
678 * VPll = Hw controlled (NOTE! PRCMU bits)
679 * VanaRegu = force off
680 */
681 INIT_REGULATOR_REGISTER(AB8505_VPLLVANAREGU, 0x0f, 0x02),
682 /*
683 * VextSupply1Regu = force OFF (OTP_ExtSupply12LPnPolarity 1)
684 * VextSupply2Regu = force OFF (OTP_ExtSupply12LPnPolarity 1)
685 * VextSupply3Regu = force OFF (OTP_ExtSupply3LPnPolarity 0)
686 * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
687 * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
688 */
689 INIT_REGULATOR_REGISTER(AB8505_EXTSUPPLYREGU, 0xff, 0x30),
690 /*
691 * Vaux1Regu = force HP
692 * Vaux2Regu = force off
693 */
694 INIT_REGULATOR_REGISTER(AB8505_VAUX12REGU, 0x0f, 0x01),
695 /*
696 * Vaux3Regu = force off
697 */
698 INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3REGU, 0x03, 0x00),
699 /*
700 * VsmpsASel1
701 */
702 INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL1, 0x00, 0x00),
703 /*
704 * VsmpsASel2
705 */
706 INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL2, 0x00, 0x00),
707 /*
708 * VsmpsASel3
709 */
710 INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL3, 0x00, 0x00),
711 /*
712 * VsmpsBSel1
713 */
714 INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL1, 0x00, 0x00),
715 /*
716 * VsmpsBSel2
717 */
718 INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL2, 0x00, 0x00),
719 /*
720 * VsmpsBSel3
721 */
722 INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL3, 0x00, 0x00),
723 /*
724 * VsafeSel1
725 */
726 INIT_REGULATOR_REGISTER(AB8505_VSAFESEL1, 0x00, 0x00),
727 /*
728 * VsafeSel2
729 */
730 INIT_REGULATOR_REGISTER(AB8505_VSAFESEL2, 0x00, 0x00),
731 /*
732 * VsafeSel3
733 */
734 INIT_REGULATOR_REGISTER(AB8505_VSAFESEL3, 0x00, 0x00),
735 /*
736 * Vaux1Sel = 2.8 V
737 */
738 INIT_REGULATOR_REGISTER(AB8505_VAUX1SEL, 0x0f, 0x0C),
739 /*
740 * Vaux2Sel = 2.9 V
741 */
742 INIT_REGULATOR_REGISTER(AB8505_VAUX2SEL, 0x0f, 0x0d),
743 /*
744 * Vaux3Sel = 2.91 V
745 */
746 INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3SEL, 0x07, 0x07),
747 /*
748 * Vaux4RequestCtrl
749 */
750 INIT_REGULATOR_REGISTER(AB8505_VAUX4REQCTRL, 0x00, 0x00),
751 /*
752 * Vaux4Regu
753 */
754 INIT_REGULATOR_REGISTER(AB8505_VAUX4REGU, 0x00, 0x00),
755 /*
756 * Vaux4Sel
757 */
758 INIT_REGULATOR_REGISTER(AB8505_VAUX4SEL, 0x00, 0x00),
759 /*
760 * Vaux1Disch = short discharge time
761 * Vaux2Disch = short discharge time
762 * Vaux3Disch = short discharge time
763 * Vintcore12Disch = short discharge time
764 * VTVoutDisch = short discharge time
765 * VaudioDisch = short discharge time
766 */
767 INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH, 0xfc, 0x00),
768 /*
769 * VanaDisch = short discharge time
770 * Vaux8PullDownEna = pulldown disabled when Vaux8 is disabled
771 * Vaux8Disch = short discharge time
772 */
773 INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH2, 0x16, 0x00),
774 /*
775 * Vaux4Disch = short discharge time
776 */
777 INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH3, 0x01, 0x00),
778 /*
779 * Vaux5Sel
780 * Vaux5LP
781 * Vaux5Ena
782 * Vaux5Disch
783 * Vaux5DisSfst
784 * Vaux5DisPulld
785 */
786 INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX5, 0x00, 0x00),
787 /*
788 * Vaux6Sel
789 * Vaux6LP
790 * Vaux6Ena
791 * Vaux6DisPulld
792 */
793 INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX6, 0x00, 0x00),
794};
795
796struct regulator_init_data ab8505_regulators[AB8505_NUM_REGULATORS] = {
797 /* supplies to the display/camera */
798 [AB8505_LDO_AUX1] = {
799 .constraints = {
800 .name = "V-DISPLAY",
801 .min_uV = 2800000,
802 .max_uV = 3300000,
803 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
804 REGULATOR_CHANGE_STATUS,
805 .boot_on = 1, /* display is on at boot */
806 },
807 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
808 .consumer_supplies = ab8500_vaux1_consumers,
809 },
810 /* supplies to the on-board eMMC */
811 [AB8505_LDO_AUX2] = {
812 .constraints = {
813 .name = "V-eMMC1",
814 .min_uV = 1100000,
815 .max_uV = 3300000,
816 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
817 REGULATOR_CHANGE_STATUS |
818 REGULATOR_CHANGE_MODE,
819 .valid_modes_mask = REGULATOR_MODE_NORMAL |
820 REGULATOR_MODE_IDLE,
821 },
822 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
823 .consumer_supplies = ab8500_vaux2_consumers,
824 },
825 /* supply for VAUX3, supplies to SDcard slots */
826 [AB8505_LDO_AUX3] = {
827 .constraints = {
828 .name = "V-MMC-SD",
829 .min_uV = 1100000,
830 .max_uV = 3300000,
831 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
832 REGULATOR_CHANGE_STATUS |
833 REGULATOR_CHANGE_MODE,
834 .valid_modes_mask = REGULATOR_MODE_NORMAL |
835 REGULATOR_MODE_IDLE,
836 },
837 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
838 .consumer_supplies = ab8500_vaux3_consumers,
839 },
840 /* supply for VAUX4, supplies to NFC and standalone secure element */
841 [AB8505_LDO_AUX4] = {
842 .constraints = {
843 .name = "V-NFC-SE",
844 .min_uV = 1100000,
845 .max_uV = 3300000,
846 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
847 REGULATOR_CHANGE_STATUS |
848 REGULATOR_CHANGE_MODE,
849 .valid_modes_mask = REGULATOR_MODE_NORMAL |
850 REGULATOR_MODE_IDLE,
851 },
852 .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux4_consumers),
853 .consumer_supplies = ab8505_vaux4_consumers,
854 },
855 /* supply for VAUX5, supplies to TBD */
856 [AB8505_LDO_AUX5] = {
857 .constraints = {
858 .name = "V-AUX5",
859 .min_uV = 1050000,
860 .max_uV = 2790000,
861 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
862 REGULATOR_CHANGE_STATUS |
863 REGULATOR_CHANGE_MODE,
864 .valid_modes_mask = REGULATOR_MODE_NORMAL |
865 REGULATOR_MODE_IDLE,
866 },
867 .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux5_consumers),
868 .consumer_supplies = ab8505_vaux5_consumers,
869 },
870 /* supply for VAUX6, supplies to TBD */
871 [AB8505_LDO_AUX6] = {
872 .constraints = {
873 .name = "V-AUX6",
874 .min_uV = 1050000,
875 .max_uV = 2790000,
876 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
877 REGULATOR_CHANGE_STATUS |
878 REGULATOR_CHANGE_MODE,
879 .valid_modes_mask = REGULATOR_MODE_NORMAL |
880 REGULATOR_MODE_IDLE,
881 },
882 .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux6_consumers),
883 .consumer_supplies = ab8505_vaux6_consumers,
884 },
885 /* supply for gpadc, ADC LDO */
886 [AB8505_LDO_ADC] = {
887 .constraints = {
888 .name = "V-ADC",
889 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
890 },
891 .num_consumer_supplies = ARRAY_SIZE(ab8505_vadc_consumers),
892 .consumer_supplies = ab8505_vadc_consumers,
893 },
894 /* supply for ab8500-vaudio, VAUDIO LDO */
895 [AB8505_LDO_AUDIO] = {
896 .constraints = {
897 .name = "V-AUD",
898 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
899 },
900 .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers),
901 .consumer_supplies = ab8500_vaud_consumers,
902 },
903 /* supply for v-anamic1 VAMic1-LDO */
904 [AB8505_LDO_ANAMIC1] = {
905 .constraints = {
906 .name = "V-AMIC1",
907 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
908 REGULATOR_CHANGE_MODE,
909 .valid_modes_mask = REGULATOR_MODE_NORMAL |
910 REGULATOR_MODE_IDLE,
911 },
912 .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
913 .consumer_supplies = ab8500_vamic1_consumers,
914 },
915 /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
916 [AB8505_LDO_ANAMIC2] = {
917 .constraints = {
918 .name = "V-AMIC2",
919 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
920 REGULATOR_CHANGE_MODE,
921 .valid_modes_mask = REGULATOR_MODE_NORMAL |
922 REGULATOR_MODE_IDLE,
923 },
924 .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
925 .consumer_supplies = ab8500_vamic2_consumers,
926 },
927 /* supply for v-aux8, VAUX8 LDO */
928 [AB8505_LDO_AUX8] = {
929 .constraints = {
930 .name = "V-AUX8",
931 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
932 },
933 .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux8_consumers),
934 .consumer_supplies = ab8505_vaux8_consumers,
935 },
936 /* supply for v-intcore12, VINTCORE12 LDO */
937 [AB8505_LDO_INTCORE] = {
938 .constraints = {
939 .name = "V-INTCORE",
940 .min_uV = 1250000,
941 .max_uV = 1350000,
942 .input_uV = 1800000,
943 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
944 REGULATOR_CHANGE_STATUS |
945 REGULATOR_CHANGE_MODE |
946 REGULATOR_CHANGE_DRMS,
947 .valid_modes_mask = REGULATOR_MODE_NORMAL |
948 REGULATOR_MODE_IDLE,
949 },
950 .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
951 .consumer_supplies = ab8500_vintcore_consumers,
952 },
953 /* supply for LDO USB */
954 [AB8505_LDO_USB] = {
955 .constraints = {
956 .name = "V-USB",
957 .valid_ops_mask = REGULATOR_CHANGE_STATUS |
958 REGULATOR_CHANGE_MODE,
959 .valid_modes_mask = REGULATOR_MODE_NORMAL |
960 REGULATOR_MODE_IDLE,
961 },
962 .num_consumer_supplies = ARRAY_SIZE(ab8505_usb_consumers),
963 .consumer_supplies = ab8505_usb_consumers,
964 },
965 /* supply for U8500 CSI-DSI, VANA LDO */
966 [AB8505_LDO_ANA] = {
967 .constraints = {
968 .name = "V-CSI-DSI",
969 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
970 },
971 .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
972 .consumer_supplies = ab8500_vana_consumers,
973 },
974};
975
976struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
977 .reg_init = ab8500_reg_init,
978 .num_reg_init = ARRAY_SIZE(ab8500_reg_init),
979 .regulator = ab8500_regulators,
980 .num_regulator = ARRAY_SIZE(ab8500_regulators),
981 .ext_regulator = ab8500_ext_regulators,
982 .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators),
983};
984
985/* Use the AB8500 init settings for AB8505 as they are the same right now */
986struct ab8500_regulator_platform_data ab8505_regulator_plat_data = {
987 .reg_init = ab8505_reg_init,
988 .num_reg_init = ARRAY_SIZE(ab8505_reg_init),
989 .regulator = ab8505_regulators,
990 .num_regulator = ARRAY_SIZE(ab8505_regulators),
991};
992
993static void ab8500_modify_reg_init(int id, u8 mask, u8 value)
994{
995 int i;
996
997 if (cpu_is_u8520()) {
998 for (i = ARRAY_SIZE(ab8505_reg_init) - 1; i >= 0; i--) {
999 if (ab8505_reg_init[i].id == id) {
1000 u8 initval = ab8505_reg_init[i].value;
1001 initval = (initval & ~mask) | (value & mask);
1002 ab8505_reg_init[i].value = initval;
1003
1004 BUG_ON(mask & ~ab8505_reg_init[i].mask);
1005 return;
1006 }
1007 }
1008 } else {
1009 for (i = ARRAY_SIZE(ab8500_reg_init) - 1; i >= 0; i--) {
1010 if (ab8500_reg_init[i].id == id) {
1011 u8 initval = ab8500_reg_init[i].value;
1012 initval = (initval & ~mask) | (value & mask);
1013 ab8500_reg_init[i].value = initval;
1014
1015 BUG_ON(mask & ~ab8500_reg_init[i].mask);
1016 return;
1017 }
1018 }
1019 }
1020
1021 BUG_ON(1);
1022}
1023
1024void mop500_regulator_init(void)
1025{
1026 struct regulator_init_data *regulator;
1027
1028 /*
1029 * Temporarily turn on Vaux2 on 8520 machine
1030 */
1031 if (cpu_is_u8520()) {
1032 /* Vaux2 initialized to be on */
1033 ab8500_modify_reg_init(AB8505_VAUX12REGU, 0x0f, 0x05);
1034 }
1035
1036 /*
1037 * Handle AB8500_EXT_SUPPLY2 on HREFP_V20_V50 boards (do it for
1038 * all HREFP_V20 boards)
1039 */
1040 if (cpu_is_u8500v20()) {
1041 /* VextSupply2RequestCtrl = HP/OFF depending on VxRequest */
1042 ab8500_modify_reg_init(AB8500_REGUREQUESTCTRL3, 0x01, 0x01);
1043
1044 /* VextSupply2SysClkReq1HPValid = SysClkReq1 controlled */
1045 ab8500_modify_reg_init(AB8500_REGUSYSCLKREQ1HPVALID2,
1046 0x20, 0x20);
1047
1048 /* VextSupply2 = force HP at initialization */
1049 ab8500_modify_reg_init(AB8500_EXTSUPPLYREGU, 0x0c, 0x04);
1050
1051 /* enable VextSupply2 during platform active */
1052 regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
1053 regulator->constraints.always_on = 1;
1054
1055 /* disable VextSupply2 in suspend */
1056 regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
1057 regulator->constraints.state_mem.disabled = 1;
1058 regulator->constraints.state_standby.disabled = 1;
1059
1060 /* enable VextSupply2 HW control (used in suspend) */
1061 regulator->driver_data = (void *)&ab8500_ext_supply2;
1062 }
1063}
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
index 78a0642a2206..9bece38fe933 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.h
+++ b/arch/arm/mach-ux500/board-mop500-regulators.h
@@ -14,10 +14,11 @@
14#include <linux/regulator/machine.h> 14#include <linux/regulator/machine.h>
15#include <linux/regulator/ab8500.h> 15#include <linux/regulator/ab8500.h>
16 16
17extern struct ab8500_regulator_reg_init 17extern struct ab8500_regulator_platform_data ab8500_regulator_plat_data;
18ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS]; 18extern struct ab8500_regulator_platform_data ab8505_regulator_plat_data;
19extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS];
20extern struct regulator_init_data tps61052_regulator; 19extern struct regulator_init_data tps61052_regulator;
21extern struct regulator_init_data gpio_en_3v3_regulator; 20extern struct regulator_init_data gpio_en_3v3_regulator;
22 21
22void mop500_regulator_init(void);
23
23#endif 24#endif
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index 051b62c27102..7f2cb6c5e2c1 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
81#endif 81#endif
82 82
83struct mmci_platform_data mop500_sdi0_data = { 83struct mmci_platform_data mop500_sdi0_data = {
84 .ios_handler = mop500_sdi0_ios_handler,
85 .ocr_mask = MMC_VDD_29_30, 84 .ocr_mask = MMC_VDD_29_30,
86 .f_max = 50000000, 85 .f_max = 50000000,
87 .capabilities = MMC_CAP_4_BIT_DATA | 86 .capabilities = MMC_CAP_4_BIT_DATA |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index b03457881c4b..ce672378a830 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/clk.h>
15#include <linux/io.h> 16#include <linux/io.h>
16#include <linux/i2c.h> 17#include <linux/i2c.h>
17#include <linux/platform_data/i2c-nomadik.h> 18#include <linux/platform_data/i2c-nomadik.h>
@@ -198,10 +199,7 @@ static struct platform_device snowball_sbnet_dev = {
198 199
199struct ab8500_platform_data ab8500_platdata = { 200struct ab8500_platform_data ab8500_platdata = {
200 .irq_base = MOP500_AB8500_IRQ_BASE, 201 .irq_base = MOP500_AB8500_IRQ_BASE,
201 .regulator_reg_init = ab8500_regulator_reg_init, 202 .regulator = &ab8500_regulator_plat_data,
202 .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init),
203 .regulator = ab8500_regulators,
204 .num_regulator = ARRAY_SIZE(ab8500_regulators),
205 .gpio = &ab8500_gpio_pdata, 203 .gpio = &ab8500_gpio_pdata,
206 .codec = &ab8500_codec_pdata, 204 .codec = &ab8500_codec_pdata,
207}; 205};
@@ -439,6 +437,15 @@ static void mop500_prox_deactivate(struct device *dev)
439 regulator_put(prox_regulator); 437 regulator_put(prox_regulator);
440} 438}
441 439
440void mop500_snowball_ethernet_clock_enable(void)
441{
442 struct clk *clk;
443
444 clk = clk_get_sys("fsmc", NULL);
445 if (!IS_ERR(clk))
446 clk_prepare_enable(clk);
447}
448
442static struct cryp_platform_data u8500_cryp1_platform_data = { 449static struct cryp_platform_data u8500_cryp1_platform_data = {
443 .mem_to_engine = { 450 .mem_to_engine = {
444 .dir = STEDMA40_MEM_TO_PERIPH, 451 .dir = STEDMA40_MEM_TO_PERIPH,
@@ -683,6 +690,8 @@ static void __init snowball_init_machine(void)
683 mop500_audio_init(parent); 690 mop500_audio_init(parent);
684 mop500_uart_init(parent); 691 mop500_uart_init(parent);
685 692
693 mop500_snowball_ethernet_clock_enable();
694
686 /* This board has full regulator constraints */ 695 /* This board has full regulator constraints */
687 regulator_has_full_constraints(); 696 regulator_has_full_constraints();
688} 697}
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index eaa605f5d90d..d38951be70df 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void);
104void __init snowball_pinmaps_init(void); 104void __init snowball_pinmaps_init(void);
105void __init hrefv60_pinmaps_init(void); 105void __init hrefv60_pinmaps_init(void);
106void mop500_audio_init(struct device *parent); 106void mop500_audio_init(struct device *parent);
107void mop500_snowball_ethernet_clock_enable(void);
107 108
108int __init mop500_uib_init(void); 109int __init mop500_uib_init(void);
109void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, 110void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 19235cf7bbe3..f1a581844372 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -312,9 +312,10 @@ static void __init u8500_init_machine(void)
312 /* Pinmaps must be in place before devices register */ 312 /* Pinmaps must be in place before devices register */
313 if (of_machine_is_compatible("st-ericsson,mop500")) 313 if (of_machine_is_compatible("st-ericsson,mop500"))
314 mop500_pinmaps_init(); 314 mop500_pinmaps_init();
315 else if (of_machine_is_compatible("calaosystems,snowball-a9500")) 315 else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
316 snowball_pinmaps_init(); 316 snowball_pinmaps_init();
317 else if (of_machine_is_compatible("st-ericsson,hrefv60+")) 317 mop500_snowball_ethernet_clock_enable();
318 } else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
318 hrefv60_pinmaps_init(); 319 hrefv60_pinmaps_init();
319 else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} 320 else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}
320 /* TODO: Add pinmaps for ccu9540 board. */ 321 /* TODO: Add pinmaps for ccu9540 board. */
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 915683cb67d6..c5e20b52e3b7 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -21,6 +21,8 @@
21#include <linux/regulator/fixed.h> 21#include <linux/regulator/fixed.h>
22#include <linux/regulator/machine.h> 22#include <linux/regulator/machine.h>
23#include <linux/vexpress.h> 23#include <linux/vexpress.h>
24#include <linux/clk-provider.h>
25#include <linux/clkdev.h>
24 26
25#include <asm/arch_timer.h> 27#include <asm/arch_timer.h>
26#include <asm/mach-types.h> 28#include <asm/mach-types.h>
@@ -433,7 +435,7 @@ static void __init v2m_dt_timer_init(void)
433{ 435{
434 struct device_node *node = NULL; 436 struct device_node *node = NULL;
435 437
436 vexpress_clk_of_init(); 438 of_clk_init(NULL);
437 439
438 do { 440 do {
439 node = of_find_compatible_node(node, NULL, "arm,sp804"); 441 node = of_find_compatible_node(node, NULL, "arm,sp804");
@@ -441,6 +443,10 @@ static void __init v2m_dt_timer_init(void)
441 if (node) { 443 if (node) {
442 pr_info("Using SP804 '%s' as a clock & events source\n", 444 pr_info("Using SP804 '%s' as a clock & events source\n",
443 node->full_name); 445 node->full_name);
446 WARN_ON(clk_register_clkdev(of_clk_get_by_name(node,
447 "timclken1"), "v2m-timer0", "sp804"));
448 WARN_ON(clk_register_clkdev(of_clk_get_by_name(node,
449 "timclken2"), "v2m-timer1", "sp804"));
444 v2m_sp804_init(of_iomap(node, 0), 450 v2m_sp804_init(of_iomap(node, 0),
445 irq_of_parse_and_map(node, 0)); 451 irq_of_parse_and_map(node, 0));
446 } 452 }
diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c
index 7abdb9645c5b..e65a80a1ac75 100644
--- a/arch/arm/mach-w90x900/dev.c
+++ b/arch/arm/mach-w90x900/dev.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/cpu.h>
22 23
23#include <linux/mtd/physmap.h> 24#include <linux/mtd/physmap.h>
24#include <linux/mtd/mtd.h> 25#include <linux/mtd/mtd.h>
@@ -531,7 +532,7 @@ static struct platform_device *nuc900_public_dev[] __initdata = {
531 532
532void __init nuc900_board_init(struct platform_device **device, int size) 533void __init nuc900_board_init(struct platform_device **device, int size)
533{ 534{
534 disable_hlt(); 535 cpu_idle_poll_ctrl(true);
535 platform_add_devices(device, size); 536 platform_add_devices(device, size);
536 platform_add_devices(nuc900_public_dev, ARRAY_SIZE(nuc900_public_dev)); 537 platform_add_devices(nuc900_public_dev, ARRAY_SIZE(nuc900_public_dev));
537 spi_register_board_info(nuc900_spi_board_info, 538 spi_register_board_info(nuc900_spi_board_info,
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 025d17328730..4045c4931a30 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -43,7 +43,7 @@ config CPU_ARM740T
43 depends on !MMU 43 depends on !MMU
44 select CPU_32v4T 44 select CPU_32v4T
45 select CPU_ABRT_LV4T 45 select CPU_ABRT_LV4T
46 select CPU_CACHE_V3 # although the core is v4t 46 select CPU_CACHE_V4
47 select CPU_CP15_MPU 47 select CPU_CP15_MPU
48 select CPU_PABRT_LEGACY 48 select CPU_PABRT_LEGACY
49 help 49 help
@@ -469,9 +469,6 @@ config CPU_PABRT_V7
469 bool 469 bool
470 470
471# The cache model 471# The cache model
472config CPU_CACHE_V3
473 bool
474
475config CPU_CACHE_V4 472config CPU_CACHE_V4
476 bool 473 bool
477 474
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 4e333fa2756f..9e51be96f635 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
33obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o 33obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
34obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o 34obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
35 35
36obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
37obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o 36obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
38obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o 37obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
39obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o 38obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index dd3d59122cc3..48bc3c0a87ce 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
343 outer_cache.inv_range = feroceon_l2_inv_range; 343 outer_cache.inv_range = feroceon_l2_inv_range;
344 outer_cache.clean_range = feroceon_l2_clean_range; 344 outer_cache.clean_range = feroceon_l2_clean_range;
345 outer_cache.flush_range = feroceon_l2_flush_range; 345 outer_cache.flush_range = feroceon_l2_flush_range;
346 outer_cache.inv_all = l2_inv_all;
346 347
347 enable_l2(); 348 enable_l2();
348 349
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c2f37390308a..c465faca51b0 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
299 int lockregs; 299 int lockregs;
300 int i; 300 int i;
301 301
302 switch (cache_id) { 302 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
303 case L2X0_CACHE_ID_PART_L310: 303 case L2X0_CACHE_ID_PART_L310:
304 lockregs = 8; 304 lockregs = 8;
305 break; 305 break;
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
333 if (cache_id_part_number_from_dt) 333 if (cache_id_part_number_from_dt)
334 cache_id = cache_id_part_number_from_dt; 334 cache_id = cache_id_part_number_from_dt;
335 else 335 else
336 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) 336 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
337 & L2X0_CACHE_ID_PART_MASK;
338 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 337 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
339 338
340 aux &= aux_mask; 339 aux &= aux_mask;
341 aux |= aux_val; 340 aux |= aux_val;
342 341
343 /* Determine the number of ways */ 342 /* Determine the number of ways */
344 switch (cache_id) { 343 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
345 case L2X0_CACHE_ID_PART_L310: 344 case L2X0_CACHE_ID_PART_L310:
346 if (aux & (1 << 16)) 345 if (aux & (1 << 16))
347 ways = 16; 346 ways = 16;
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
725 .flush_all = l2x0_flush_all, 724 .flush_all = l2x0_flush_all,
726 .inv_all = l2x0_inv_all, 725 .inv_all = l2x0_inv_all,
727 .disable = l2x0_disable, 726 .disable = l2x0_disable,
728 .set_debug = pl310_set_debug,
729 }, 727 },
730}; 728};
731 729
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
814 data->save(); 812 data->save();
815 813
816 of_init = true; 814 of_init = true;
817 l2x0_init(l2x0_base, aux_val, aux_mask);
818
819 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); 815 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
816 l2x0_init(l2x0_base, aux_val, aux_mask);
820 817
821 return 0; 818 return 0;
822} 819}
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
deleted file mode 100644
index 8a3fadece8d3..000000000000
--- a/arch/arm/mm/cache-v3.S
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 * linux/arch/arm/mm/cache-v3.S
3 *
4 * Copyright (C) 1997-2002 Russell king
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11#include <linux/init.h>
12#include <asm/page.h>
13#include "proc-macros.S"
14
15/*
16 * flush_icache_all()
17 *
18 * Unconditionally clean and invalidate the entire icache.
19 */
20ENTRY(v3_flush_icache_all)
21 mov pc, lr
22ENDPROC(v3_flush_icache_all)
23
24/*
25 * flush_user_cache_all()
26 *
27 * Invalidate all cache entries in a particular address
28 * space.
29 *
30 * - mm - mm_struct describing address space
31 */
32ENTRY(v3_flush_user_cache_all)
33 /* FALLTHROUGH */
34/*
35 * flush_kern_cache_all()
36 *
37 * Clean and invalidate the entire cache.
38 */
39ENTRY(v3_flush_kern_cache_all)
40 /* FALLTHROUGH */
41
42/*
43 * flush_user_cache_range(start, end, flags)
44 *
45 * Invalidate a range of cache entries in the specified
46 * address space.
47 *
48 * - start - start address (may not be aligned)
49 * - end - end address (exclusive, may not be aligned)
50 * - flags - vma_area_struct flags describing address space
51 */
52ENTRY(v3_flush_user_cache_range)
53 mov ip, #0
54 mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
55 mov pc, lr
56
57/*
58 * coherent_kern_range(start, end)
59 *
60 * Ensure coherency between the Icache and the Dcache in the
61 * region described by start. If you have non-snooping
62 * Harvard caches, you need to implement this function.
63 *
64 * - start - virtual start address
65 * - end - virtual end address
66 */
67ENTRY(v3_coherent_kern_range)
68 /* FALLTHROUGH */
69
70/*
71 * coherent_user_range(start, end)
72 *
73 * Ensure coherency between the Icache and the Dcache in the
74 * region described by start. If you have non-snooping
75 * Harvard caches, you need to implement this function.
76 *
77 * - start - virtual start address
78 * - end - virtual end address
79 */
80ENTRY(v3_coherent_user_range)
81 mov r0, #0
82 mov pc, lr
83
84/*
85 * flush_kern_dcache_area(void *page, size_t size)
86 *
87 * Ensure no D cache aliasing occurs, either with itself or
88 * the I cache
89 *
90 * - addr - kernel address
91 * - size - region size
92 */
93ENTRY(v3_flush_kern_dcache_area)
94 /* FALLTHROUGH */
95
96/*
97 * dma_flush_range(start, end)
98 *
99 * Clean and invalidate the specified virtual address range.
100 *
101 * - start - virtual start address
102 * - end - virtual end address
103 */
104ENTRY(v3_dma_flush_range)
105 mov r0, #0
106 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
107 mov pc, lr
108
109/*
110 * dma_unmap_area(start, size, dir)
111 * - start - kernel virtual start address
112 * - size - size of region
113 * - dir - DMA direction
114 */
115ENTRY(v3_dma_unmap_area)
116 teq r2, #DMA_TO_DEVICE
117 bne v3_dma_flush_range
118 /* FALLTHROUGH */
119
120/*
121 * dma_map_area(start, size, dir)
122 * - start - kernel virtual start address
123 * - size - size of region
124 * - dir - DMA direction
125 */
126ENTRY(v3_dma_map_area)
127 mov pc, lr
128ENDPROC(v3_dma_unmap_area)
129ENDPROC(v3_dma_map_area)
130
131 .globl v3_flush_kern_cache_louis
132 .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
133
134 __INITDATA
135
136 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
137 define_cache_functions v3
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 43e5d77be677..a7ba68f59f0c 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
58ENTRY(v4_flush_user_cache_range) 58ENTRY(v4_flush_user_cache_range)
59#ifdef CONFIG_CPU_CP15 59#ifdef CONFIG_CPU_CP15
60 mov ip, #0 60 mov ip, #0
61 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache 61 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
62 mov pc, lr 62 mov pc, lr
63#else 63#else
64 /* FALLTHROUGH */ 64 /* FALLTHROUGH */
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index a5a4b2bc42ba..2ac37372ef52 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
48static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); 48static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
49static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); 49static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
50 50
51static DEFINE_PER_CPU(atomic64_t, active_asids); 51DEFINE_PER_CPU(atomic64_t, active_asids);
52static DEFINE_PER_CPU(u64, reserved_asids); 52static DEFINE_PER_CPU(u64, reserved_asids);
53static cpumask_t tlb_flush_pending; 53static cpumask_t tlb_flush_pending;
54 54
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { 215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
216 local_flush_bp_all(); 216 local_flush_bp_all();
217 local_flush_tlb_all(); 217 local_flush_tlb_all();
218 dummy_flush_tlb_a15_erratum();
218 } 219 }
219 220
220 atomic64_set(&per_cpu(active_asids, cpu), asid); 221 atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c7e3759f16d3..e9db6b4bf65a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -342,6 +342,7 @@ static int __init atomic_pool_init(void)
342{ 342{
343 struct dma_pool *pool = &atomic_pool; 343 struct dma_pool *pool = &atomic_pool;
344 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); 344 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
345 gfp_t gfp = GFP_KERNEL | GFP_DMA;
345 unsigned long nr_pages = pool->size >> PAGE_SHIFT; 346 unsigned long nr_pages = pool->size >> PAGE_SHIFT;
346 unsigned long *bitmap; 347 unsigned long *bitmap;
347 struct page *page; 348 struct page *page;
@@ -361,8 +362,8 @@ static int __init atomic_pool_init(void)
361 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, 362 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
362 atomic_pool_init); 363 atomic_pool_init);
363 else 364 else
364 ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, 365 ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
365 &page, atomic_pool_init); 366 atomic_pool_init);
366 if (ptr) { 367 if (ptr) {
367 int i; 368 int i;
368 369
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ad722f1208a5..9a5cdc01fcdf 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -99,6 +99,9 @@ void show_mem(unsigned int filter)
99 printk("Mem-info:\n"); 99 printk("Mem-info:\n");
100 show_free_areas(filter); 100 show_free_areas(filter);
101 101
102 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
103 return;
104
102 for_each_bank (i, mi) { 105 for_each_bank (i, mi) {
103 struct membank *bank = &mi->bank[i]; 106 struct membank *bank = &mi->bank[i];
104 unsigned int pfn1, pfn2; 107 unsigned int pfn1, pfn2;
@@ -424,24 +427,6 @@ void __init bootmem_init(void)
424 max_pfn = max_high - PHYS_PFN_OFFSET; 427 max_pfn = max_high - PHYS_PFN_OFFSET;
425} 428}
426 429
427static inline int free_area(unsigned long pfn, unsigned long end, char *s)
428{
429 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
430
431 for (; pfn < end; pfn++) {
432 struct page *page = pfn_to_page(pfn);
433 ClearPageReserved(page);
434 init_page_count(page);
435 __free_page(page);
436 pages++;
437 }
438
439 if (size && s)
440 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
441
442 return pages;
443}
444
445/* 430/*
446 * Poison init memory with an undefined instruction (ARM) or a branch to an 431 * Poison init memory with an undefined instruction (ARM) or a branch to an
447 * undefined instruction (Thumb). 432 * undefined instruction (Thumb).
@@ -534,6 +519,14 @@ static void __init free_unused_memmap(struct meminfo *mi)
534#endif 519#endif
535} 520}
536 521
522#ifdef CONFIG_HIGHMEM
523static inline void free_area_high(unsigned long pfn, unsigned long end)
524{
525 for (; pfn < end; pfn++)
526 free_highmem_page(pfn_to_page(pfn));
527}
528#endif
529
537static void __init free_highpages(void) 530static void __init free_highpages(void)
538{ 531{
539#ifdef CONFIG_HIGHMEM 532#ifdef CONFIG_HIGHMEM
@@ -569,8 +562,7 @@ static void __init free_highpages(void)
569 if (res_end > end) 562 if (res_end > end)
570 res_end = end; 563 res_end = end;
571 if (res_start != start) 564 if (res_start != start)
572 totalhigh_pages += free_area(start, res_start, 565 free_area_high(start, res_start);
573 NULL);
574 start = res_end; 566 start = res_end;
575 if (start == end) 567 if (start == end)
576 break; 568 break;
@@ -578,9 +570,8 @@ static void __init free_highpages(void)
578 570
579 /* And now free anything which remains */ 571 /* And now free anything which remains */
580 if (start < end) 572 if (start < end)
581 totalhigh_pages += free_area(start, end, NULL); 573 free_area_high(start, end);
582 } 574 }
583 totalram_pages += totalhigh_pages;
584#endif 575#endif
585} 576}
586 577
@@ -609,8 +600,7 @@ void __init mem_init(void)
609 600
610#ifdef CONFIG_SA1111 601#ifdef CONFIG_SA1111
611 /* now that our DMA memory is actually so designated, we can free it */ 602 /* now that our DMA memory is actually so designated, we can free it */
612 totalram_pages += free_area(PHYS_PFN_OFFSET, 603 free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
613 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
614#endif 604#endif
615 605
616 free_highpages(); 606 free_highpages();
@@ -738,16 +728,12 @@ void free_initmem(void)
738 extern char __tcm_start, __tcm_end; 728 extern char __tcm_start, __tcm_end;
739 729
740 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 730 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
741 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), 731 free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
742 __phys_to_pfn(__pa(&__tcm_end)),
743 "TCM link");
744#endif 732#endif
745 733
746 poison_init_mem(__init_begin, __init_end - __init_begin); 734 poison_init_mem(__init_begin, __init_end - __init_begin);
747 if (!machine_is_integrator() && !machine_is_cintegrator()) 735 if (!machine_is_integrator() && !machine_is_cintegrator())
748 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 736 free_initmem_default(0);
749 __phys_to_pfn(__pa(__init_end)),
750 "init");
751} 737}
752 738
753#ifdef CONFIG_BLK_DEV_INITRD 739#ifdef CONFIG_BLK_DEV_INITRD
@@ -758,9 +744,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
758{ 744{
759 if (!keep_initrd) { 745 if (!keep_initrd) {
760 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 746 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
761 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 747 free_reserved_area(start, end, 0, "initrd");
762 __phys_to_pfn(__pa(end)),
763 "initrd");
764 } 748 }
765} 749}
766 750
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e95a996ab78f..a84ff763ac39 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -34,6 +34,7 @@
34#include <asm/mach/pci.h> 34#include <asm/mach/pci.h>
35 35
36#include "mm.h" 36#include "mm.h"
37#include "tcm.h"
37 38
38/* 39/*
39 * empty_zero_page is a special page that is used for 40 * empty_zero_page is a special page that is used for
@@ -598,39 +599,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
598 } while (pte++, addr += PAGE_SIZE, addr != end); 599 } while (pte++, addr += PAGE_SIZE, addr != end);
599} 600}
600 601
601static void __init alloc_init_section(pud_t *pud, unsigned long addr, 602static void __init map_init_section(pmd_t *pmd, unsigned long addr,
602 unsigned long end, phys_addr_t phys, 603 unsigned long end, phys_addr_t phys,
603 const struct mem_type *type) 604 const struct mem_type *type)
604{ 605{
605 pmd_t *pmd = pmd_offset(pud, addr); 606#ifndef CONFIG_ARM_LPAE
606
607 /* 607 /*
608 * Try a section mapping - end, addr and phys must all be aligned 608 * In classic MMU format, puds and pmds are folded in to
609 * to a section boundary. Note that PMDs refer to the individual 609 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
610 * L1 entries, whereas PGDs refer to a group of L1 entries making 610 * group of L1 entries making up one logical pointer to
611 * up one logical pointer to an L2 table. 611 * an L2 table (2MB), where as PMDs refer to the individual
612 * L1 entries (1MB). Hence increment to get the correct
613 * offset for odd 1MB sections.
614 * (See arch/arm/include/asm/pgtable-2level.h)
612 */ 615 */
613 if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { 616 if (addr & SECTION_SIZE)
614 pmd_t *p = pmd; 617 pmd++;
615
616#ifndef CONFIG_ARM_LPAE
617 if (addr & SECTION_SIZE)
618 pmd++;
619#endif 618#endif
619 do {
620 *pmd = __pmd(phys | type->prot_sect);
621 phys += SECTION_SIZE;
622 } while (pmd++, addr += SECTION_SIZE, addr != end);
620 623
621 do { 624 flush_pmd_entry(pmd);
622 *pmd = __pmd(phys | type->prot_sect); 625}
623 phys += SECTION_SIZE;
624 } while (pmd++, addr += SECTION_SIZE, addr != end);
625 626
626 flush_pmd_entry(p); 627static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
627 } else { 628 unsigned long end, phys_addr_t phys,
629 const struct mem_type *type)
630{
631 pmd_t *pmd = pmd_offset(pud, addr);
632 unsigned long next;
633
634 do {
628 /* 635 /*
629 * No need to loop; pte's aren't interested in the 636 * With LPAE, we must loop over to map
630 * individual L1 entries. 637 * all the pmds for the given range.
631 */ 638 */
632 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); 639 next = pmd_addr_end(addr, end);
633 } 640
641 /*
642 * Try a section mapping - addr, next and phys must all be
643 * aligned to a section boundary.
644 */
645 if (type->prot_sect &&
646 ((addr | next | phys) & ~SECTION_MASK) == 0) {
647 map_init_section(pmd, addr, next, phys, type);
648 } else {
649 alloc_init_pte(pmd, addr, next,
650 __phys_to_pfn(phys), type);
651 }
652
653 phys += next - addr;
654
655 } while (pmd++, addr = next, addr != end);
634} 656}
635 657
636static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, 658static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -641,7 +663,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
641 663
642 do { 664 do {
643 next = pud_addr_end(addr, end); 665 next = pud_addr_end(addr, end);
644 alloc_init_section(pud, addr, next, phys, type); 666 alloc_init_pmd(pud, addr, next, phys, type);
645 phys += next - addr; 667 phys += next - addr;
646 } while (pud++, addr = next, addr != end); 668 } while (pud++, addr = next, addr != end);
647} 669}
@@ -1256,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc)
1256 dma_contiguous_remap(); 1278 dma_contiguous_remap();
1257 devicemaps_init(mdesc); 1279 devicemaps_init(mdesc);
1258 kmap_init(); 1280 kmap_init();
1281 tcm_init();
1259 1282
1260 top_pmd = pmd_off_k(0xffff0000); 1283 top_pmd = pmd_off_k(0xffff0000);
1261 1284
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index dc5de5d53f20..fde2d2a794cf 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -77,24 +77,27 @@ __arm740_setup:
77 mcr p15, 0, r0, c6, c0 @ set area 0, default 77 mcr p15, 0, r0, c6, c0 @ set area 0, default
78 78
79 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM 79 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
80 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) 80 ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
81 mov r2, #10 @ 11 is the minimum (4KB) 81 mov r4, #10 @ 11 is the minimum (4KB)
821: add r2, r2, #1 @ area size *= 2 821: add r4, r4, #1 @ area size *= 2
83 mov r1, r1, lsr #1 83 movs r3, r3, lsr #1
84 bne 1b @ count not zero r-shift 84 bne 1b @ count not zero r-shift
85 orr r0, r0, r2, lsl #1 @ the area register value 85 orr r0, r0, r4, lsl #1 @ the area register value
86 orr r0, r0, #1 @ set enable bit 86 orr r0, r0, #1 @ set enable bit
87 mcr p15, 0, r0, c6, c1 @ set area 1, RAM 87 mcr p15, 0, r0, c6, c1 @ set area 1, RAM
88 88
89 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH 89 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
90 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) 90 ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
91 mov r2, #10 @ 11 is the minimum (4KB) 91 cmp r3, #0
921: add r2, r2, #1 @ area size *= 2 92 moveq r0, #0
93 mov r1, r1, lsr #1 93 beq 2f
94 mov r4, #10 @ 11 is the minimum (4KB)
951: add r4, r4, #1 @ area size *= 2
96 movs r3, r3, lsr #1
94 bne 1b @ count not zero r-shift 97 bne 1b @ count not zero r-shift
95 orr r0, r0, r2, lsl #1 @ the area register value 98 orr r0, r0, r4, lsl #1 @ the area register value
96 orr r0, r0, #1 @ set enable bit 99 orr r0, r0, #1 @ set enable bit
97 mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH 1002: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
98 101
99 mov r0, #0x06 102 mov r0, #0x06
100 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable 103 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
@@ -137,13 +140,14 @@ __arm740_proc_info:
137 .long 0x41807400 140 .long 0x41807400
138 .long 0xfffffff0 141 .long 0xfffffff0
139 .long 0 142 .long 0
143 .long 0
140 b __arm740_setup 144 b __arm740_setup
141 .long cpu_arch_name 145 .long cpu_arch_name
142 .long cpu_elf_name 146 .long cpu_elf_name
143 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT 147 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
144 .long cpu_arm740_name 148 .long cpu_arm740_name
145 .long arm740_processor_functions 149 .long arm740_processor_functions
146 .long 0 150 .long 0
147 .long 0 151 .long 0
148 .long v3_cache_fns @ cache model 152 .long v4_cache_fns @ cache model
149 .size __arm740_proc_info, . - __arm740_proc_info 153 .size __arm740_proc_info, . - __arm740_proc_info
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2c3b9421ab5e..2556cf1c2da1 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
388.globl cpu_arm920_suspend_size 388.globl cpu_arm920_suspend_size
389.equ cpu_arm920_suspend_size, 4 * 3 389.equ cpu_arm920_suspend_size, 4 * 3
390#ifdef CONFIG_PM_SLEEP 390#ifdef CONFIG_ARM_CPU_SUSPEND
391ENTRY(cpu_arm920_do_suspend) 391ENTRY(cpu_arm920_do_suspend)
392 stmfd sp!, {r4 - r6, lr} 392 stmfd sp!, {r4 - r6, lr}
393 mrc p15, 0, r4, c13, c0, 0 @ PID 393 mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index f1803f7e2972..344c8a548cc0 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
403.globl cpu_arm926_suspend_size 403.globl cpu_arm926_suspend_size
404.equ cpu_arm926_suspend_size, 4 * 3 404.equ cpu_arm926_suspend_size, 4 * 3
405#ifdef CONFIG_PM_SLEEP 405#ifdef CONFIG_ARM_CPU_SUSPEND
406ENTRY(cpu_arm926_do_suspend) 406ENTRY(cpu_arm926_do_suspend)
407 stmfd sp!, {r4 - r6, lr} 407 stmfd sp!, {r4 - r6, lr}
408 mrc p15, 0, r4, c13, c0, 0 @ PID 408 mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 82f9cdc751d6..0b60dd3d742a 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
350 350
351.globl cpu_mohawk_suspend_size 351.globl cpu_mohawk_suspend_size
352.equ cpu_mohawk_suspend_size, 4 * 6 352.equ cpu_mohawk_suspend_size, 4 * 6
353#ifdef CONFIG_PM_SLEEP 353#ifdef CONFIG_ARM_CPU_SUSPEND
354ENTRY(cpu_mohawk_do_suspend) 354ENTRY(cpu_mohawk_do_suspend)
355 stmfd sp!, {r4 - r9, lr} 355 stmfd sp!, {r4 - r9, lr}
356 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 356 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 3aa0da11fd84..d92dfd081429 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
172 172
173.globl cpu_sa1100_suspend_size 173.globl cpu_sa1100_suspend_size
174.equ cpu_sa1100_suspend_size, 4 * 3 174.equ cpu_sa1100_suspend_size, 4 * 3
175#ifdef CONFIG_PM_SLEEP 175#ifdef CONFIG_ARM_CPU_SUSPEND
176ENTRY(cpu_sa1100_do_suspend) 176ENTRY(cpu_sa1100_do_suspend)
177 stmfd sp!, {r4 - r6, lr} 177 stmfd sp!, {r4 - r6, lr}
178 mrc p15, 0, r4, c3, c0, 0 @ domain ID 178 mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 3e6210b4d6d4..054b491ff764 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -17,7 +17,9 @@
17 17
18#ifndef MULTI_CPU 18#ifndef MULTI_CPU
19EXPORT_SYMBOL(cpu_dcache_clean_area); 19EXPORT_SYMBOL(cpu_dcache_clean_area);
20#ifdef CONFIG_MMU
20EXPORT_SYMBOL(cpu_set_pte_ext); 21EXPORT_SYMBOL(cpu_set_pte_ext);
22#endif
21#else 23#else
22EXPORT_SYMBOL(processor); 24EXPORT_SYMBOL(processor);
23#endif 25#endif
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index bcaaa8de9325..5c07ee4fe3eb 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
138/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 138/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
139.globl cpu_v6_suspend_size 139.globl cpu_v6_suspend_size
140.equ cpu_v6_suspend_size, 4 * 6 140.equ cpu_v6_suspend_size, 4 * 6
141#ifdef CONFIG_PM_SLEEP 141#ifdef CONFIG_ARM_CPU_SUSPEND
142ENTRY(cpu_v6_do_suspend) 142ENTRY(cpu_v6_do_suspend)
143 stmfd sp!, {r4 - r9, lr} 143 stmfd sp!, {r4 - r9, lr}
144 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 144 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3a3c015f8d5c..f584d3f5b37c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
420__v7_ca7mp_proc_info: 420__v7_ca7mp_proc_info:
421 .long 0x410fc070 421 .long 0x410fc070
422 .long 0xff0ffff0 422 .long 0xff0ffff0
423 __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV 423 __v7_proc __v7_ca7mp_setup
424 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info 424 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
425 425
426 /* 426 /*
@@ -430,10 +430,25 @@ __v7_ca7mp_proc_info:
430__v7_ca15mp_proc_info: 430__v7_ca15mp_proc_info:
431 .long 0x410fc0f0 431 .long 0x410fc0f0
432 .long 0xff0ffff0 432 .long 0xff0ffff0
433 __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV 433 __v7_proc __v7_ca15mp_setup
434 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 434 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
435 435
436 /* 436 /*
437 * Qualcomm Inc. Krait processors.
438 */
439 .type __krait_proc_info, #object
440__krait_proc_info:
441 .long 0x510f0400 @ Required ID value
442 .long 0xff0ffc00 @ Mask for ID
443 /*
444 * Some Krait processors don't indicate support for SDIV and UDIV
445 * instructions in the ARM instruction set, even though they actually
446 * do support them.
447 */
448 __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
449 .size __krait_proc_info, . - __krait_proc_info
450
451 /*
437 * Match any ARMv7 processor core. 452 * Match any ARMv7 processor core.
438 */ 453 */
439 .type __v7_proc_info, #object 454 .type __v7_proc_info, #object
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index eb93d6487f35..e8efd83b6f25 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
413 413
414.globl cpu_xsc3_suspend_size 414.globl cpu_xsc3_suspend_size
415.equ cpu_xsc3_suspend_size, 4 * 6 415.equ cpu_xsc3_suspend_size, 4 * 6
416#ifdef CONFIG_PM_SLEEP 416#ifdef CONFIG_ARM_CPU_SUSPEND
417ENTRY(cpu_xsc3_do_suspend) 417ENTRY(cpu_xsc3_do_suspend)
418 stmfd sp!, {r4 - r9, lr} 418 stmfd sp!, {r4 - r9, lr}
419 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 419 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 25510361aa18..e766f889bfd6 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
528 528
529.globl cpu_xscale_suspend_size 529.globl cpu_xscale_suspend_size
530.equ cpu_xscale_suspend_size, 4 * 6 530.equ cpu_xscale_suspend_size, 4 * 6
531#ifdef CONFIG_PM_SLEEP 531#ifdef CONFIG_ARM_CPU_SUSPEND
532ENTRY(cpu_xscale_do_suspend) 532ENTRY(cpu_xscale_do_suspend)
533 stmfd sp!, {r4 - r9, lr} 533 stmfd sp!, {r4 - r9, lr}
534 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 534 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/kernel/tcm.h b/arch/arm/mm/tcm.h
index 8015ad434a40..8015ad434a40 100644
--- a/arch/arm/kernel/tcm.h
+++ b/arch/arm/mm/tcm.h
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6828ef6ce80e..a0bd8a755bdf 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -576,7 +576,7 @@ load_ind:
576 /* x = ((*(frame + k)) & 0xf) << 2; */ 576 /* x = ((*(frame + k)) & 0xf) << 2; */
577 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 577 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
578 /* the interpreter should deal with the negative K */ 578 /* the interpreter should deal with the negative K */
579 if (k < 0) 579 if ((int)k < 0)
580 return -1; 580 return -1;
581 /* offset in r1: we might have to take the slow path */ 581 /* offset in r1: we might have to take the slow path */
582 emit_mov_i(r_off, k, ctx); 582 emit_mov_i(r_off, k, ctx);
diff --git a/arch/arm/plat-orion/addr-map.c b/arch/arm/plat-orion/addr-map.c
index febe3862873c..807ac8e5cbc0 100644
--- a/arch/arm/plat-orion/addr-map.c
+++ b/arch/arm/plat-orion/addr-map.c
@@ -157,9 +157,12 @@ void __init orion_setup_cpu_mbus_target(const struct orion_addr_map_cfg *cfg,
157 u32 size = readl(ddr_window_cpu_base + DDR_SIZE_CS_OFF(i)); 157 u32 size = readl(ddr_window_cpu_base + DDR_SIZE_CS_OFF(i));
158 158
159 /* 159 /*
160 * Chip select enabled? 160 * We only take care of entries for which the chip
161 * select is enabled, and that don't have high base
162 * address bits set (devices can only access the first
163 * 32 bits of the memory).
161 */ 164 */
162 if (size & 1) { 165 if ((size & 1) && !(base & 0xF)) {
163 struct mbus_dram_window *w; 166 struct mbus_dram_window *w;
164 167
165 w = &orion_mbus_dram_info.cs[cs++]; 168 w = &orion_mbus_dram_info.cs[cs++];
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 51afedda9ab6..03db14d8ace9 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11*/ 11*/
12 12
13#include <linux/amba/pl330.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
@@ -1552,6 +1553,9 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
1552 pd.num_cs = num_cs; 1553 pd.num_cs = num_cs;
1553 pd.src_clk_nr = src_clk_nr; 1554 pd.src_clk_nr = src_clk_nr;
1554 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio; 1555 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
1556#ifdef CONFIG_PL330_DMA
1557 pd.filter = pl330_filter;
1558#endif
1555 1559
1556 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0); 1560 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
1557} 1561}
@@ -1590,6 +1594,9 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
1590 pd.num_cs = num_cs; 1594 pd.num_cs = num_cs;
1591 pd.src_clk_nr = src_clk_nr; 1595 pd.src_clk_nr = src_clk_nr;
1592 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio; 1596 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio;
1597#ifdef CONFIG_PL330_DMA
1598 pd.filter = pl330_filter;
1599#endif
1593 1600
1594 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1); 1601 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1);
1595} 1602}
@@ -1628,6 +1635,9 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
1628 pd.num_cs = num_cs; 1635 pd.num_cs = num_cs;
1629 pd.src_clk_nr = src_clk_nr; 1636 pd.src_clk_nr = src_clk_nr;
1630 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio; 1637 pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio;
1638#ifdef CONFIG_PL330_DMA
1639 pd.filter = pl330_filter;
1640#endif
1631 1641
1632 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2); 1642 s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2);
1633} 1643}
diff --git a/arch/arm/plat-samsung/include/plat/fb.h b/arch/arm/plat-samsung/include/plat/fb.h
index b885322717a1..9ae507270785 100644
--- a/arch/arm/plat-samsung/include/plat/fb.h
+++ b/arch/arm/plat-samsung/include/plat/fb.h
@@ -15,55 +15,7 @@
15#ifndef __PLAT_S3C_FB_H 15#ifndef __PLAT_S3C_FB_H
16#define __PLAT_S3C_FB_H __FILE__ 16#define __PLAT_S3C_FB_H __FILE__
17 17
18/* S3C_FB_MAX_WIN 18#include <linux/platform_data/video_s3c.h>
19 * Set to the maximum number of windows that any of the supported hardware
20 * can use. Since the platform data uses this for an array size, having it
21 * set to the maximum of any version of the hardware can do is safe.
22 */
23#define S3C_FB_MAX_WIN (5)
24
25/**
26 * struct s3c_fb_pd_win - per window setup data
27 * @xres : The window X size.
28 * @yres : The window Y size.
29 * @virtual_x: The virtual X size.
30 * @virtual_y: The virtual Y size.
31 */
32struct s3c_fb_pd_win {
33 unsigned short default_bpp;
34 unsigned short max_bpp;
35 unsigned short xres;
36 unsigned short yres;
37 unsigned short virtual_x;
38 unsigned short virtual_y;
39};
40
41/**
42 * struct s3c_fb_platdata - S3C driver platform specific information
43 * @setup_gpio: Setup the external GPIO pins to the right state to transfer
44 * the data from the display system to the connected display
45 * device.
46 * @vidcon0: The base vidcon0 values to control the panel data format.
47 * @vidcon1: The base vidcon1 values to control the panel data output.
48 * @vtiming: Video timing when connected to a RGB type panel.
49 * @win: The setup data for each hardware window, or NULL for unused.
50 * @display_mode: The LCD output display mode.
51 *
52 * The platform data supplies the video driver with all the information
53 * it requires to work with the display(s) attached to the machine. It
54 * controls the initial mode, the number of display windows (0 is always
55 * the base framebuffer) that are initialised etc.
56 *
57 */
58struct s3c_fb_platdata {
59 void (*setup_gpio)(void);
60
61 struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN];
62 struct fb_videomode *vtiming;
63
64 u32 vidcon0;
65 u32 vidcon1;
66};
67 19
68/** 20/**
69 * s3c_fb_set_platdata() - Setup the FB device with platform data. 21 * s3c_fb_set_platdata() - Setup the FB device with platform data.
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index 29c26a818842..f05f2afa440d 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -1,281 +1 @@
1/* arch/arm/plat-samsung/include/plat/regs-serial.h #include <linux/serial_s3c.h>
2 *
3 * From linux/include/asm-arm/hardware/serial_s3c2410.h
4 *
5 * Internal header file for Samsung S3C2410 serial ports (UART0-2)
6 *
7 * Copyright (C) 2002 Shane Nay (shane@minirl.com)
8 *
9 * Additional defines, Copyright 2003 Simtec Electronics (linux@simtec.co.uk)
10 *
11 * Adapted from:
12 *
13 * Internal header file for MX1ADS serial ports (UART1 & 2)
14 *
15 * Copyright (C) 2002 Shane Nay (shane@minirl.com)
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30*/
31
32#ifndef __ASM_ARM_REGS_SERIAL_H
33#define __ASM_ARM_REGS_SERIAL_H
34
35#define S3C24XX_VA_UART0 (S3C_VA_UART)
36#define S3C24XX_VA_UART1 (S3C_VA_UART + 0x4000 )
37#define S3C24XX_VA_UART2 (S3C_VA_UART + 0x8000 )
38#define S3C24XX_VA_UART3 (S3C_VA_UART + 0xC000 )
39
40#define S3C2410_PA_UART0 (S3C24XX_PA_UART)
41#define S3C2410_PA_UART1 (S3C24XX_PA_UART + 0x4000 )
42#define S3C2410_PA_UART2 (S3C24XX_PA_UART + 0x8000 )
43#define S3C2443_PA_UART3 (S3C24XX_PA_UART + 0xC000 )
44
45#define S3C2410_URXH (0x24)
46#define S3C2410_UTXH (0x20)
47#define S3C2410_ULCON (0x00)
48#define S3C2410_UCON (0x04)
49#define S3C2410_UFCON (0x08)
50#define S3C2410_UMCON (0x0C)
51#define S3C2410_UBRDIV (0x28)
52#define S3C2410_UTRSTAT (0x10)
53#define S3C2410_UERSTAT (0x14)
54#define S3C2410_UFSTAT (0x18)
55#define S3C2410_UMSTAT (0x1C)
56
57#define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3))
58
59#define S3C2410_LCON_CS5 (0x0)
60#define S3C2410_LCON_CS6 (0x1)
61#define S3C2410_LCON_CS7 (0x2)
62#define S3C2410_LCON_CS8 (0x3)
63#define S3C2410_LCON_CSMASK (0x3)
64
65#define S3C2410_LCON_PNONE (0x0)
66#define S3C2410_LCON_PEVEN (0x5 << 3)
67#define S3C2410_LCON_PODD (0x4 << 3)
68#define S3C2410_LCON_PMASK (0x7 << 3)
69
70#define S3C2410_LCON_STOPB (1<<2)
71#define S3C2410_LCON_IRM (1<<6)
72
73#define S3C2440_UCON_CLKMASK (3<<10)
74#define S3C2440_UCON_CLKSHIFT (10)
75#define S3C2440_UCON_PCLK (0<<10)
76#define S3C2440_UCON_UCLK (1<<10)
77#define S3C2440_UCON_PCLK2 (2<<10)
78#define S3C2440_UCON_FCLK (3<<10)
79#define S3C2443_UCON_EPLL (3<<10)
80
81#define S3C6400_UCON_CLKMASK (3<<10)
82#define S3C6400_UCON_CLKSHIFT (10)
83#define S3C6400_UCON_PCLK (0<<10)
84#define S3C6400_UCON_PCLK2 (2<<10)
85#define S3C6400_UCON_UCLK0 (1<<10)
86#define S3C6400_UCON_UCLK1 (3<<10)
87
88#define S3C2440_UCON2_FCLK_EN (1<<15)
89#define S3C2440_UCON0_DIVMASK (15 << 12)
90#define S3C2440_UCON1_DIVMASK (15 << 12)
91#define S3C2440_UCON2_DIVMASK (7 << 12)
92#define S3C2440_UCON_DIVSHIFT (12)
93
94#define S3C2412_UCON_CLKMASK (3<<10)
95#define S3C2412_UCON_CLKSHIFT (10)
96#define S3C2412_UCON_UCLK (1<<10)
97#define S3C2412_UCON_USYSCLK (3<<10)
98#define S3C2412_UCON_PCLK (0<<10)
99#define S3C2412_UCON_PCLK2 (2<<10)
100
101#define S3C2410_UCON_CLKMASK (1 << 10)
102#define S3C2410_UCON_CLKSHIFT (10)
103#define S3C2410_UCON_UCLK (1<<10)
104#define S3C2410_UCON_SBREAK (1<<4)
105
106#define S3C2410_UCON_TXILEVEL (1<<9)
107#define S3C2410_UCON_RXILEVEL (1<<8)
108#define S3C2410_UCON_TXIRQMODE (1<<2)
109#define S3C2410_UCON_RXIRQMODE (1<<0)
110#define S3C2410_UCON_RXFIFO_TOI (1<<7)
111#define S3C2443_UCON_RXERR_IRQEN (1<<6)
112#define S3C2443_UCON_LOOPBACK (1<<5)
113
114#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
115 S3C2410_UCON_RXILEVEL | \
116 S3C2410_UCON_TXIRQMODE | \
117 S3C2410_UCON_RXIRQMODE | \
118 S3C2410_UCON_RXFIFO_TOI)
119
120#define S3C2410_UFCON_FIFOMODE (1<<0)
121#define S3C2410_UFCON_TXTRIG0 (0<<6)
122#define S3C2410_UFCON_RXTRIG8 (1<<4)
123#define S3C2410_UFCON_RXTRIG12 (2<<4)
124
125/* S3C2440 FIFO trigger levels */
126#define S3C2440_UFCON_RXTRIG1 (0<<4)
127#define S3C2440_UFCON_RXTRIG8 (1<<4)
128#define S3C2440_UFCON_RXTRIG16 (2<<4)
129#define S3C2440_UFCON_RXTRIG32 (3<<4)
130
131#define S3C2440_UFCON_TXTRIG0 (0<<6)
132#define S3C2440_UFCON_TXTRIG16 (1<<6)
133#define S3C2440_UFCON_TXTRIG32 (2<<6)
134#define S3C2440_UFCON_TXTRIG48 (3<<6)
135
136#define S3C2410_UFCON_RESETBOTH (3<<1)
137#define S3C2410_UFCON_RESETTX (1<<2)
138#define S3C2410_UFCON_RESETRX (1<<1)
139
140#define S3C2410_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
141 S3C2410_UFCON_TXTRIG0 | \
142 S3C2410_UFCON_RXTRIG8 )
143
144#define S3C2410_UMCOM_AFC (1<<4)
145#define S3C2410_UMCOM_RTS_LOW (1<<0)
146
147#define S3C2412_UMCON_AFC_63 (0<<5) /* same as s3c2443 */
148#define S3C2412_UMCON_AFC_56 (1<<5)
149#define S3C2412_UMCON_AFC_48 (2<<5)
150#define S3C2412_UMCON_AFC_40 (3<<5)
151#define S3C2412_UMCON_AFC_32 (4<<5)
152#define S3C2412_UMCON_AFC_24 (5<<5)
153#define S3C2412_UMCON_AFC_16 (6<<5)
154#define S3C2412_UMCON_AFC_8 (7<<5)
155
156#define S3C2410_UFSTAT_TXFULL (1<<9)
157#define S3C2410_UFSTAT_RXFULL (1<<8)
158#define S3C2410_UFSTAT_TXMASK (15<<4)
159#define S3C2410_UFSTAT_TXSHIFT (4)
160#define S3C2410_UFSTAT_RXMASK (15<<0)
161#define S3C2410_UFSTAT_RXSHIFT (0)
162
163/* UFSTAT S3C2443 same as S3C2440 */
164#define S3C2440_UFSTAT_TXFULL (1<<14)
165#define S3C2440_UFSTAT_RXFULL (1<<6)
166#define S3C2440_UFSTAT_TXSHIFT (8)
167#define S3C2440_UFSTAT_RXSHIFT (0)
168#define S3C2440_UFSTAT_TXMASK (63<<8)
169#define S3C2440_UFSTAT_RXMASK (63)
170
171#define S3C2410_UTRSTAT_TXE (1<<2)
172#define S3C2410_UTRSTAT_TXFE (1<<1)
173#define S3C2410_UTRSTAT_RXDR (1<<0)
174
175#define S3C2410_UERSTAT_OVERRUN (1<<0)
176#define S3C2410_UERSTAT_FRAME (1<<2)
177#define S3C2410_UERSTAT_BREAK (1<<3)
178#define S3C2443_UERSTAT_PARITY (1<<1)
179
180#define S3C2410_UERSTAT_ANY (S3C2410_UERSTAT_OVERRUN | \
181 S3C2410_UERSTAT_FRAME | \
182 S3C2410_UERSTAT_BREAK)
183
184#define S3C2410_UMSTAT_CTS (1<<0)
185#define S3C2410_UMSTAT_DeltaCTS (1<<2)
186
187#define S3C2443_DIVSLOT (0x2C)
188
189/* S3C64XX interrupt registers. */
190#define S3C64XX_UINTP 0x30
191#define S3C64XX_UINTSP 0x34
192#define S3C64XX_UINTM 0x38
193
194#define S3C64XX_UINTM_RXD (0)
195#define S3C64XX_UINTM_TXD (2)
196#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD)
197#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD)
198
199/* Following are specific to S5PV210 */
200#define S5PV210_UCON_CLKMASK (1<<10)
201#define S5PV210_UCON_CLKSHIFT (10)
202#define S5PV210_UCON_PCLK (0<<10)
203#define S5PV210_UCON_UCLK (1<<10)
204
205#define S5PV210_UFCON_TXTRIG0 (0<<8)
206#define S5PV210_UFCON_TXTRIG4 (1<<8)
207#define S5PV210_UFCON_TXTRIG8 (2<<8)
208#define S5PV210_UFCON_TXTRIG16 (3<<8)
209#define S5PV210_UFCON_TXTRIG32 (4<<8)
210#define S5PV210_UFCON_TXTRIG64 (5<<8)
211#define S5PV210_UFCON_TXTRIG128 (6<<8)
212#define S5PV210_UFCON_TXTRIG256 (7<<8)
213
214#define S5PV210_UFCON_RXTRIG1 (0<<4)
215#define S5PV210_UFCON_RXTRIG4 (1<<4)
216#define S5PV210_UFCON_RXTRIG8 (2<<4)
217#define S5PV210_UFCON_RXTRIG16 (3<<4)
218#define S5PV210_UFCON_RXTRIG32 (4<<4)
219#define S5PV210_UFCON_RXTRIG64 (5<<4)
220#define S5PV210_UFCON_RXTRIG128 (6<<4)
221#define S5PV210_UFCON_RXTRIG256 (7<<4)
222
223#define S5PV210_UFSTAT_TXFULL (1<<24)
224#define S5PV210_UFSTAT_RXFULL (1<<8)
225#define S5PV210_UFSTAT_TXMASK (255<<16)
226#define S5PV210_UFSTAT_TXSHIFT (16)
227#define S5PV210_UFSTAT_RXMASK (255<<0)
228#define S5PV210_UFSTAT_RXSHIFT (0)
229
230#define S3C2410_UCON_CLKSEL0 (1 << 0)
231#define S3C2410_UCON_CLKSEL1 (1 << 1)
232#define S3C2410_UCON_CLKSEL2 (1 << 2)
233#define S3C2410_UCON_CLKSEL3 (1 << 3)
234
235/* Default values for s5pv210 UCON and UFCON uart registers */
236#define S5PV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
237 S3C2410_UCON_RXILEVEL | \
238 S3C2410_UCON_TXIRQMODE | \
239 S3C2410_UCON_RXIRQMODE | \
240 S3C2410_UCON_RXFIFO_TOI | \
241 S3C2443_UCON_RXERR_IRQEN)
242
243#define S5PV210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
244 S5PV210_UFCON_TXTRIG4 | \
245 S5PV210_UFCON_RXTRIG4)
246
247#ifndef __ASSEMBLY__
248
249/* configuration structure for per-machine configurations for the
250 * serial port
251 *
252 * the pointer is setup by the machine specific initialisation from the
253 * arch/arm/mach-s3c2410/ directory.
254*/
255
256struct s3c2410_uartcfg {
257 unsigned char hwport; /* hardware port number */
258 unsigned char unused;
259 unsigned short flags;
260 upf_t uart_flags; /* default uart flags */
261 unsigned int clk_sel;
262
263 unsigned int has_fracval;
264
265 unsigned long ucon; /* value of ucon for port */
266 unsigned long ulcon; /* value of ulcon for port */
267 unsigned long ufcon; /* value of ufcon for port */
268};
269
270/* s3c24xx_uart_devs
271 *
272 * this is exported from the core as we cannot use driver_register(),
273 * or platform_add_device() before the console_initcall()
274*/
275
276extern struct platform_device *s3c24xx_uart_devs[4];
277
278#endif /* __ASSEMBLY__ */
279
280#endif /* __ASM_ARM_REGS_SERIAL_H */
281
diff --git a/arch/arm/plat-samsung/include/plat/usb-phy.h b/arch/arm/plat-samsung/include/plat/usb-phy.h
index 959bcdb03a25..ab34dfadb7f9 100644
--- a/arch/arm/plat-samsung/include/plat/usb-phy.h
+++ b/arch/arm/plat-samsung/include/plat/usb-phy.h
@@ -11,10 +11,7 @@
11#ifndef __PLAT_SAMSUNG_USB_PHY_H 11#ifndef __PLAT_SAMSUNG_USB_PHY_H
12#define __PLAT_SAMSUNG_USB_PHY_H __FILE__ 12#define __PLAT_SAMSUNG_USB_PHY_H __FILE__
13 13
14enum s5p_usb_phy_type { 14#include <linux/usb/samsung_usb_phy.h>
15 S5P_USB_PHY_DEVICE,
16 S5P_USB_PHY_HOST,
17};
18 15
19extern int s5p_usb_phy_init(struct platform_device *pdev, int type); 16extern int s5p_usb_phy_init(struct platform_device *pdev, int type);
20extern int s5p_usb_phy_exit(struct platform_device *pdev, int type); 17extern int s5p_usb_phy_exit(struct platform_device *pdev, int type);
diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig
index 739d016eb273..8a08c31b5e20 100644
--- a/arch/arm/plat-spear/Kconfig
+++ b/arch/arm/plat-spear/Kconfig
@@ -10,7 +10,7 @@ choice
10 10
11config ARCH_SPEAR13XX 11config ARCH_SPEAR13XX
12 bool "ST SPEAr13xx with Device Tree" 12 bool "ST SPEAr13xx with Device Tree"
13 select ARCH_HAVE_CPUFREQ 13 select ARCH_HAS_CPUFREQ
14 select ARM_GIC 14 select ARM_GIC
15 select CPU_V7 15 select CPU_V7
16 select GPIO_SPEAR_SPICS 16 select GPIO_SPEAR_SPICS
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index fd70a68387eb..9b6d19f74078 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -9,7 +9,6 @@ config ARM64
9 select CLONE_BACKWARDS 9 select CLONE_BACKWARDS
10 select COMMON_CLK 10 select COMMON_CLK
11 select GENERIC_CLOCKEVENTS 11 select GENERIC_CLOCKEVENTS
12 select GENERIC_HARDIRQS_NO_DEPRECATED
13 select GENERIC_IOMAP 12 select GENERIC_IOMAP
14 select GENERIC_IRQ_PROBE 13 select GENERIC_IRQ_PROBE
15 select GENERIC_IRQ_SHOW 14 select GENERIC_IRQ_SHOW
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 51493430f142..1a6bfe954d49 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -6,17 +6,6 @@ config FRAME_POINTER
6 bool 6 bool
7 default y 7 default y
8 8
9config DEBUG_ERRORS
10 bool "Verbose kernel error messages"
11 depends on DEBUG_KERNEL
12 help
13 This option controls verbose debugging information which can be
14 printed when the kernel detects an internal error. This debugging
15 information is useful to kernel hackers when tracking down problems,
16 but mostly meaningless to other people. It's safe to say Y unless
17 you are concerned with the code size or don't want to see these
18 messages.
19
20config DEBUG_STACK_USAGE 9config DEBUG_STACK_USAGE
21 bool "Enable stack utilization instrumentation" 10 bool "Enable stack utilization instrumentation"
22 depends on DEBUG_KERNEL 11 depends on DEBUG_KERNEL
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 9212c7880da7..09bef29f3a09 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -82,4 +82,3 @@ CONFIG_DEBUG_KERNEL=y
82CONFIG_DEBUG_INFO=y 82CONFIG_DEBUG_INFO=y
83# CONFIG_FTRACE is not set 83# CONFIG_FTRACE is not set
84CONFIG_ATOMIC64_SELFTEST=y 84CONFIG_ATOMIC64_SELFTEST=y
85CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h
index bde960720892..42e04c877428 100644
--- a/arch/arm64/include/asm/ucontext.h
+++ b/arch/arm64/include/asm/ucontext.h
@@ -22,7 +22,7 @@ struct ucontext {
22 stack_t uc_stack; 22 stack_t uc_stack;
23 sigset_t uc_sigmask; 23 sigset_t uc_sigmask;
24 /* glibc uses a 1024-bit sigset_t */ 24 /* glibc uses a 1024-bit sigset_t */
25 __u8 __unused[(1024 - sizeof(sigset_t)) / 8]; 25 __u8 __unused[1024 / 8 - sizeof(sigset_t)];
26 /* last for future expansion */ 26 /* last for future expansion */
27 struct sigcontext uc_mcontext; 27 struct sigcontext uc_mcontext;
28}; 28};
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index cef3925eaf60..aa3e948f7885 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -40,7 +40,9 @@ EXPORT_SYMBOL(__copy_to_user);
40EXPORT_SYMBOL(__clear_user); 40EXPORT_SYMBOL(__clear_user);
41 41
42 /* bitops */ 42 /* bitops */
43#ifdef CONFIG_SMP
43EXPORT_SYMBOL(__atomic_hash); 44EXPORT_SYMBOL(__atomic_hash);
45#endif
44 46
45 /* physical memory */ 47 /* physical memory */
46EXPORT_SYMBOL(memstart_addr); 48EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 0337cdb0667b..83a0ad5936a5 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -84,11 +84,15 @@ EXPORT_SYMBOL_GPL(pm_power_off);
84void (*pm_restart)(const char *cmd); 84void (*pm_restart)(const char *cmd);
85EXPORT_SYMBOL_GPL(pm_restart); 85EXPORT_SYMBOL_GPL(pm_restart);
86 86
87void arch_cpu_idle_prepare(void)
88{
89 local_fiq_enable();
90}
87 91
88/* 92/*
89 * This is our default idle handler. 93 * This is our default idle handler.
90 */ 94 */
91static void default_idle(void) 95void arch_cpu_idle(void)
92{ 96{
93 /* 97 /*
94 * This should do all the clock switching and wait for interrupt 98 * This should do all the clock switching and wait for interrupt
@@ -98,43 +102,6 @@ static void default_idle(void)
98 local_irq_enable(); 102 local_irq_enable();
99} 103}
100 104
101/*
102 * The idle thread.
103 * We always respect 'hlt_counter' to prevent low power idle.
104 */
105void cpu_idle(void)
106{
107 local_fiq_enable();
108
109 /* endless idle loop with no priority at all */
110 while (1) {
111 tick_nohz_idle_enter();
112 rcu_idle_enter();
113 while (!need_resched()) {
114 /*
115 * We need to disable interrupts here to ensure
116 * we don't miss a wakeup call.
117 */
118 local_irq_disable();
119 if (!need_resched()) {
120 stop_critical_timings();
121 default_idle();
122 start_critical_timings();
123 /*
124 * default_idle functions should always return
125 * with IRQs enabled.
126 */
127 WARN_ON(irqs_disabled());
128 } else {
129 local_irq_enable();
130 }
131 }
132 rcu_idle_exit();
133 tick_nohz_idle_exit();
134 schedule_preempt_disabled();
135 }
136}
137
138void machine_shutdown(void) 105void machine_shutdown(void)
139{ 106{
140#ifdef CONFIG_SMP 107#ifdef CONFIG_SMP
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 7f4f3673f2bc..e393174fe859 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
549 sigset_t *set, struct pt_regs *regs) 549 sigset_t *set, struct pt_regs *regs)
550{ 550{
551 struct compat_rt_sigframe __user *frame; 551 struct compat_rt_sigframe __user *frame;
552 compat_stack_t stack;
553 int err = 0; 552 int err = 0;
554 553
555 frame = compat_get_sigframe(ka, regs, sizeof(*frame)); 554 frame = compat_get_sigframe(ka, regs, sizeof(*frame));
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index bdd34597254b..261445c4666f 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -216,7 +216,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
216 /* 216 /*
217 * OK, it's off to the idle thread for us 217 * OK, it's off to the idle thread for us
218 */ 218 */
219 cpu_idle(); 219 cpu_startup_entry(CPUHP_ONLINE);
220} 220}
221 221
222void __init smp_cpus_done(unsigned int max_cpus) 222void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 800aac306a08..f497ca77925a 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -197,24 +197,6 @@ void __init bootmem_init(void)
197 max_pfn = max_low_pfn = max; 197 max_pfn = max_low_pfn = max;
198} 198}
199 199
200static inline int free_area(unsigned long pfn, unsigned long end, char *s)
201{
202 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
203
204 for (; pfn < end; pfn++) {
205 struct page *page = pfn_to_page(pfn);
206 ClearPageReserved(page);
207 init_page_count(page);
208 __free_page(page);
209 pages++;
210 }
211
212 if (size && s)
213 pr_info("Freeing %s memory: %dK\n", s, size);
214
215 return pages;
216}
217
218/* 200/*
219 * Poison init memory with an undefined instruction (0x0). 201 * Poison init memory with an undefined instruction (0x0).
220 */ 202 */
@@ -405,9 +387,7 @@ void __init mem_init(void)
405void free_initmem(void) 387void free_initmem(void)
406{ 388{
407 poison_init_mem(__init_begin, __init_end - __init_begin); 389 poison_init_mem(__init_begin, __init_end - __init_begin);
408 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 390 free_initmem_default(0);
409 __phys_to_pfn(__pa(__init_end)),
410 "init");
411} 391}
412 392
413#ifdef CONFIG_BLK_DEV_INITRD 393#ifdef CONFIG_BLK_DEV_INITRD
@@ -418,9 +398,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
418{ 398{
419 if (!keep_initrd) { 399 if (!keep_initrd) {
420 poison_init_mem((void *)start, PAGE_ALIGN(end) - start); 400 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
421 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 401 free_reserved_area(start, end, 0, "initrd");
422 __phys_to_pfn(__pa(end)),
423 "initrd");
424 } 402 }
425} 403}
426 404
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 224b44ab534e..eeecc9c8ed68 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) 261void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
262{ 262{
263 unsigned long size, mask; 263 unsigned long size, mask;
264 bool page64k = IS_ENABLED(ARM64_64K_PAGES); 264 bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
265 pgd_t *pgd; 265 pgd_t *pgd;
266 pud_t *pud; 266 pud_t *pud;
267 pmd_t *pmd; 267 pmd_t *pmd;
@@ -391,17 +391,14 @@ int kern_addr_valid(unsigned long addr)
391} 391}
392#ifdef CONFIG_SPARSEMEM_VMEMMAP 392#ifdef CONFIG_SPARSEMEM_VMEMMAP
393#ifdef CONFIG_ARM64_64K_PAGES 393#ifdef CONFIG_ARM64_64K_PAGES
394int __meminit vmemmap_populate(struct page *start_page, 394int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
395 unsigned long size, int node)
396{ 395{
397 return vmemmap_populate_basepages(start_page, size, node); 396 return vmemmap_populate_basepages(start, end, node);
398} 397}
399#else /* !CONFIG_ARM64_64K_PAGES */ 398#else /* !CONFIG_ARM64_64K_PAGES */
400int __meminit vmemmap_populate(struct page *start_page, 399int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
401 unsigned long size, int node)
402{ 400{
403 unsigned long addr = (unsigned long)start_page; 401 unsigned long addr = start;
404 unsigned long end = (unsigned long)(start_page + size);
405 unsigned long next; 402 unsigned long next;
406 pgd_t *pgd; 403 pgd_t *pgd;
407 pud_t *pud; 404 pud_t *pud;
@@ -434,7 +431,7 @@ int __meminit vmemmap_populate(struct page *start_page,
434 return 0; 431 return 0;
435} 432}
436#endif /* CONFIG_ARM64_64K_PAGES */ 433#endif /* CONFIG_ARM64_64K_PAGES */
437void vmemmap_free(struct page *memmap, unsigned long nr_pages) 434void vmemmap_free(unsigned long start, unsigned long end)
438{ 435{
439} 436}
440#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 437#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 9b89257b2cfd..c1a868d398bd 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -7,7 +7,7 @@ config AVR32
7 select HAVE_OPROFILE 7 select HAVE_OPROFILE
8 select HAVE_KPROBES 8 select HAVE_KPROBES
9 select HAVE_GENERIC_HARDIRQS 9 select HAVE_GENERIC_HARDIRQS
10 select HAVE_VIRT_TO_BUS 10 select VIRT_TO_BUS
11 select GENERIC_IRQ_PROBE 11 select GENERIC_IRQ_PROBE
12 select GENERIC_ATOMIC64 12 select GENERIC_ATOMIC64
13 select HARDIRQS_SW_RESEND 13 select HARDIRQS_SW_RESEND
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h
index cf60d0a9f176..fc6483f83ccc 100644
--- a/arch/avr32/include/asm/io.h
+++ b/arch/avr32/include/asm/io.h
@@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32)
165#define readw_be __raw_readw 165#define readw_be __raw_readw
166#define readl_be __raw_readl 166#define readl_be __raw_readl
167 167
168#define writeb_relaxed writeb
169#define writew_relaxed writew
170#define writel_relaxed writel
171
168#define writeb_be __raw_writeb 172#define writeb_be __raw_writeb
169#define writew_be __raw_writew 173#define writew_be __raw_writew
170#define writel_be __raw_writel 174#define writel_be __raw_writel
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index fd78f58ea79a..073c3c2fa521 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -30,18 +30,9 @@ EXPORT_SYMBOL(pm_power_off);
30 * This file handles the architecture-dependent parts of process handling.. 30 * This file handles the architecture-dependent parts of process handling..
31 */ 31 */
32 32
33void cpu_idle(void) 33void arch_cpu_idle(void)
34{ 34{
35 /* endless idle loop with no priority at all */ 35 cpu_enter_idle();
36 while (1) {
37 tick_nohz_idle_enter();
38 rcu_idle_enter();
39 while (!need_resched())
40 cpu_idle_sleep();
41 rcu_idle_exit();
42 tick_nohz_idle_exit();
43 schedule_preempt_disabled();
44 }
45} 36}
46 37
47void machine_halt(void) 38void machine_halt(void)
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 05ad29112ff4..869a1c6ffeee 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -12,6 +12,7 @@
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/time.h> 14#include <linux/time.h>
15#include <linux/cpu.h>
15 16
16#include <asm/sysreg.h> 17#include <asm/sysreg.h>
17 18
@@ -87,13 +88,17 @@ static void comparator_mode(enum clock_event_mode mode,
87 pr_debug("%s: start\n", evdev->name); 88 pr_debug("%s: start\n", evdev->name);
88 /* FALLTHROUGH */ 89 /* FALLTHROUGH */
89 case CLOCK_EVT_MODE_RESUME: 90 case CLOCK_EVT_MODE_RESUME:
90 cpu_disable_idle_sleep(); 91 /*
92 * If we're using the COUNT and COMPARE registers we
93 * need to force idle poll.
94 */
95 cpu_idle_poll_ctrl(true);
91 break; 96 break;
92 case CLOCK_EVT_MODE_UNUSED: 97 case CLOCK_EVT_MODE_UNUSED:
93 case CLOCK_EVT_MODE_SHUTDOWN: 98 case CLOCK_EVT_MODE_SHUTDOWN:
94 sysreg_write(COMPARE, 0); 99 sysreg_write(COMPARE, 0);
95 pr_debug("%s: stop\n", evdev->name); 100 pr_debug("%s: stop\n", evdev->name);
96 cpu_enable_idle_sleep(); 101 cpu_idle_poll_ctrl(false);
97 break; 102 break;
98 default: 103 default:
99 BUG(); 104 BUG();
diff --git a/arch/avr32/mach-at32ap/include/mach/pm.h b/arch/avr32/mach-at32ap/include/mach/pm.h
index 979b355b77b6..f29ff2cd23d3 100644
--- a/arch/avr32/mach-at32ap/include/mach/pm.h
+++ b/arch/avr32/mach-at32ap/include/mach/pm.h
@@ -21,30 +21,6 @@
21extern void cpu_enter_idle(void); 21extern void cpu_enter_idle(void);
22extern void cpu_enter_standby(unsigned long sdramc_base); 22extern void cpu_enter_standby(unsigned long sdramc_base);
23 23
24extern bool disable_idle_sleep;
25
26static inline void cpu_disable_idle_sleep(void)
27{
28 disable_idle_sleep = true;
29}
30
31static inline void cpu_enable_idle_sleep(void)
32{
33 disable_idle_sleep = false;
34}
35
36static inline void cpu_idle_sleep(void)
37{
38 /*
39 * If we're using the COUNT and COMPARE registers for
40 * timekeeping, we can't use the IDLE state.
41 */
42 if (disable_idle_sleep)
43 cpu_relax();
44 else
45 cpu_enter_idle();
46}
47
48void intc_set_suspend_handler(unsigned long offset); 24void intc_set_suspend_handler(unsigned long offset);
49#endif 25#endif
50 26
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S
index f868f4ce761b..1c8e4e6bff03 100644
--- a/arch/avr32/mach-at32ap/pm-at32ap700x.S
+++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S
@@ -18,13 +18,6 @@
18/* Same as 0xfff00000 but fits in a 21 bit signed immediate */ 18/* Same as 0xfff00000 but fits in a 21 bit signed immediate */
19#define PM_BASE -0x100000 19#define PM_BASE -0x100000
20 20
21 .section .bss, "wa", @nobits
22 .global disable_idle_sleep
23 .type disable_idle_sleep, @object
24disable_idle_sleep:
25 .int 4
26 .size disable_idle_sleep, . - disable_idle_sleep
27
28 /* Keep this close to the irq handlers */ 21 /* Keep this close to the irq handlers */
29 .section .irq.text, "ax", @progbits 22 .section .irq.text, "ax", @progbits
30 23
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index 2798c2d4a1cf..e66e8406f992 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -146,34 +146,14 @@ void __init mem_init(void)
146 initsize >> 10); 146 initsize >> 10);
147} 147}
148 148
149static inline void free_area(unsigned long addr, unsigned long end, char *s)
150{
151 unsigned int size = (end - addr) >> 10;
152
153 for (; addr < end; addr += PAGE_SIZE) {
154 struct page *page = virt_to_page(addr);
155 ClearPageReserved(page);
156 init_page_count(page);
157 free_page(addr);
158 totalram_pages++;
159 }
160
161 if (size && s)
162 printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n",
163 s, size, end - (size << 10), end);
164}
165
166void free_initmem(void) 149void free_initmem(void)
167{ 150{
168 free_area((unsigned long)__init_begin, (unsigned long)__init_end, 151 free_initmem_default(0);
169 "init");
170} 152}
171 153
172#ifdef CONFIG_BLK_DEV_INITRD 154#ifdef CONFIG_BLK_DEV_INITRD
173
174void free_initrd_mem(unsigned long start, unsigned long end) 155void free_initrd_mem(unsigned long start, unsigned long end)
175{ 156{
176 free_area(start, end, "initrd"); 157 free_reserved_area(start, end, 0, "initrd");
177} 158}
178
179#endif 159#endif
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 600494c70e96..c3f2e0bc644a 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -33,7 +33,7 @@ config BLACKFIN
33 select ARCH_HAVE_CUSTOM_GPIO_H 33 select ARCH_HAVE_CUSTOM_GPIO_H
34 select ARCH_WANT_OPTIONAL_GPIOLIB 34 select ARCH_WANT_OPTIONAL_GPIOLIB
35 select HAVE_UID16 35 select HAVE_UID16
36 select HAVE_VIRT_TO_BUS 36 select VIRT_TO_BUS
37 select ARCH_WANT_IPC_PARSE_VERSION 37 select ARCH_WANT_IPC_PARSE_VERSION
38 select HAVE_GENERIC_HARDIRQS 38 select HAVE_GENERIC_HARDIRQS
39 select GENERIC_ATOMIC64 39 select GENERIC_ATOMIC64
diff --git a/arch/blackfin/include/asm/bfin_sport3.h b/arch/blackfin/include/asm/bfin_sport3.h
index 03c00220d69b..d82f5fa0ad9f 100644
--- a/arch/blackfin/include/asm/bfin_sport3.h
+++ b/arch/blackfin/include/asm/bfin_sport3.h
@@ -41,7 +41,7 @@
41#define SPORT_CTL_LAFS 0x00020000 /* Late Transmit frame select */ 41#define SPORT_CTL_LAFS 0x00020000 /* Late Transmit frame select */
42#define SPORT_CTL_RJUST 0x00040000 /* Right Justified mode select */ 42#define SPORT_CTL_RJUST 0x00040000 /* Right Justified mode select */
43#define SPORT_CTL_FSED 0x00080000 /* External frame sync edge select */ 43#define SPORT_CTL_FSED 0x00080000 /* External frame sync edge select */
44#define SPORT_CTL_TFIEN 0x00100000 /* Transmit finish interrrupt enable select */ 44#define SPORT_CTL_TFIEN 0x00100000 /* Transmit finish interrupt enable select */
45#define SPORT_CTL_GCLKEN 0x00200000 /* Gated clock mode select */ 45#define SPORT_CTL_GCLKEN 0x00200000 /* Gated clock mode select */
46#define SPORT_CTL_SPENSEC 0x01000000 /* Enable secondary channel */ 46#define SPORT_CTL_SPENSEC 0x01000000 /* Enable secondary channel */
47#define SPORT_CTL_SPTRAN 0x02000000 /* Data direction control */ 47#define SPORT_CTL_SPTRAN 0x02000000 /* Data direction control */
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index 84ed8375113c..61fbd2de993d 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_init(unsigned int port,
25extern struct console *bfin_jc_early_init(void); 25extern struct console *bfin_jc_early_init(void);
26#endif 26#endif
27 27
28static struct console *early_console;
29
30/* Default console */ 28/* Default console */
31#define DEFAULT_PORT 0 29#define DEFAULT_PORT 0
32#define DEFAULT_CFLAG CS8|B57600 30#define DEFAULT_CFLAG CS8|B57600
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 9782c0329c14..4aa5545c4fde 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -46,15 +46,14 @@ EXPORT_SYMBOL(pm_power_off);
46 * The idle loop on BFIN 46 * The idle loop on BFIN
47 */ 47 */
48#ifdef CONFIG_IDLE_L1 48#ifdef CONFIG_IDLE_L1
49static void default_idle(void)__attribute__((l1_text)); 49void arch_cpu_idle(void)__attribute__((l1_text));
50void cpu_idle(void)__attribute__((l1_text));
51#endif 50#endif
52 51
53/* 52/*
54 * This is our default idle handler. We need to disable 53 * This is our default idle handler. We need to disable
55 * interrupts here to ensure we don't miss a wakeup call. 54 * interrupts here to ensure we don't miss a wakeup call.
56 */ 55 */
57static void default_idle(void) 56void arch_cpu_idle(void)
58{ 57{
59#ifdef CONFIG_IPIPE 58#ifdef CONFIG_IPIPE
60 ipipe_suspend_domain(); 59 ipipe_suspend_domain();
@@ -66,31 +65,12 @@ static void default_idle(void)
66 hard_local_irq_enable(); 65 hard_local_irq_enable();
67} 66}
68 67
69/*
70 * The idle thread. We try to conserve power, while trying to keep
71 * overall latency low. The architecture specific idle is passed
72 * a value to indicate the level of "idleness" of the system.
73 */
74void cpu_idle(void)
75{
76 /* endless idle loop with no priority at all */
77 while (1) {
78
79#ifdef CONFIG_HOTPLUG_CPU 68#ifdef CONFIG_HOTPLUG_CPU
80 if (cpu_is_offline(smp_processor_id())) 69void arch_cpu_idle_dead(void)
81 cpu_die(); 70{
82#endif 71 cpu_die();
83 tick_nohz_idle_enter();
84 rcu_idle_enter();
85 while (!need_resched())
86 default_idle();
87 rcu_idle_exit();
88 tick_nohz_idle_exit();
89 preempt_enable_no_resched();
90 schedule();
91 preempt_disable();
92 }
93} 72}
73#endif
94 74
95/* 75/*
96 * Do necessary setup to start up a newly executed thread. 76 * Do necessary setup to start up a newly executed thread.
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 61c1f47a4bf2..97d701639585 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -936,19 +936,19 @@ static struct v4l2_input adv7842_inputs[] = {
936 .index = 2, 936 .index = 2,
937 .name = "Component", 937 .name = "Component",
938 .type = V4L2_INPUT_TYPE_CAMERA, 938 .type = V4L2_INPUT_TYPE_CAMERA,
939 .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS, 939 .capabilities = V4L2_IN_CAP_DV_TIMINGS,
940 }, 940 },
941 { 941 {
942 .index = 3, 942 .index = 3,
943 .name = "VGA", 943 .name = "VGA",
944 .type = V4L2_INPUT_TYPE_CAMERA, 944 .type = V4L2_INPUT_TYPE_CAMERA,
945 .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS, 945 .capabilities = V4L2_IN_CAP_DV_TIMINGS,
946 }, 946 },
947 { 947 {
948 .index = 4, 948 .index = 4,
949 .name = "HDMI", 949 .name = "HDMI",
950 .type = V4L2_INPUT_TYPE_CAMERA, 950 .type = V4L2_INPUT_TYPE_CAMERA,
951 .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS, 951 .capabilities = V4L2_IN_CAP_DV_TIMINGS,
952 }, 952 },
953}; 953};
954 954
@@ -1074,7 +1074,7 @@ static struct v4l2_output adv7511_outputs[] = {
1074 .index = 0, 1074 .index = 0,
1075 .name = "HDMI", 1075 .name = "HDMI",
1076 .type = V4L2_INPUT_TYPE_CAMERA, 1076 .type = V4L2_INPUT_TYPE_CAMERA,
1077 .capabilities = V4L2_OUT_CAP_CUSTOM_TIMINGS, 1077 .capabilities = V4L2_OUT_CAP_DV_TIMINGS,
1078 }, 1078 },
1079}; 1079};
1080 1080
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index bb61ae4986e4..1bc2ce6f3c94 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -335,7 +335,7 @@ void __cpuinit secondary_start_kernel(void)
335 */ 335 */
336 calibrate_delay(); 336 calibrate_delay();
337 337
338 cpu_idle(); 338 cpu_startup_entry(CPUHP_ONLINE);
339} 339}
340 340
341void __init smp_prepare_boot_cpu(void) 341void __init smp_prepare_boot_cpu(void)
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 9cb85537bd2b..82d01a71207f 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -103,7 +103,7 @@ void __init mem_init(void)
103 max_mapnr = num_physpages = MAP_NR(high_memory); 103 max_mapnr = num_physpages = MAP_NR(high_memory);
104 printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages); 104 printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages);
105 105
106 /* This will put all memory onto the freelists. */ 106 /* This will put all low memory onto the freelists. */
107 totalram_pages = free_all_bootmem(); 107 totalram_pages = free_all_bootmem();
108 108
109 reservedpages = 0; 109 reservedpages = 0;
@@ -129,24 +129,11 @@ void __init mem_init(void)
129 initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); 129 initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10)));
130} 130}
131 131
132static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end)
133{
134 unsigned long addr;
135 /* next to check that the page we free is not a partial page */
136 for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
137 ClearPageReserved(virt_to_page(addr));
138 init_page_count(virt_to_page(addr));
139 free_page(addr);
140 totalram_pages++;
141 }
142 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
143}
144
145#ifdef CONFIG_BLK_DEV_INITRD 132#ifdef CONFIG_BLK_DEV_INITRD
146void __init free_initrd_mem(unsigned long start, unsigned long end) 133void __init free_initrd_mem(unsigned long start, unsigned long end)
147{ 134{
148#ifndef CONFIG_MPU 135#ifndef CONFIG_MPU
149 free_init_pages("initrd memory", start, end); 136 free_reserved_area(start, end, 0, "initrd");
150#endif 137#endif
151} 138}
152#endif 139#endif
@@ -154,10 +141,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
154void __init_refok free_initmem(void) 141void __init_refok free_initmem(void)
155{ 142{
156#if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU 143#if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
157 free_init_pages("unused kernel memory", 144 free_initmem_default(0);
158 (unsigned long)(&__init_begin),
159 (unsigned long)(&__init_end));
160
161 if (memory_start == (unsigned long)(&__init_end)) 145 if (memory_start == (unsigned long)(&__init_end))
162 memory_start = (unsigned long)(&__init_begin); 146 memory_start = (unsigned long)(&__init_begin);
163#endif 147#endif
diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h
index cf78e09e18c3..2c71d5634ec2 100644
--- a/arch/c6x/include/asm/irqflags.h
+++ b/arch/c6x/include/asm/irqflags.h
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)
27/* set interrupt enabled status */ 27/* set interrupt enabled status */
28static inline void arch_local_irq_restore(unsigned long flags) 28static inline void arch_local_irq_restore(unsigned long flags)
29{ 29{
30 asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); 30 asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
31} 31}
32 32
33/* unconditionally enable interrupts */ 33/* unconditionally enable interrupts */
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c
index 6434df476f77..57d2ea8d1977 100644
--- a/arch/c6x/kernel/process.c
+++ b/arch/c6x/kernel/process.c
@@ -33,7 +33,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
33void (*pm_power_off)(void); 33void (*pm_power_off)(void);
34EXPORT_SYMBOL(pm_power_off); 34EXPORT_SYMBOL(pm_power_off);
35 35
36static void c6x_idle(void) 36void arch_cpu_idle(void)
37{ 37{
38 unsigned long tmp; 38 unsigned long tmp;
39 39
@@ -49,32 +49,6 @@ static void c6x_idle(void)
49 : "=b"(tmp)); 49 : "=b"(tmp));
50} 50}
51 51
52/*
53 * The idle loop for C64x
54 */
55void cpu_idle(void)
56{
57 /* endless idle loop with no priority at all */
58 while (1) {
59 tick_nohz_idle_enter();
60 rcu_idle_enter();
61 while (1) {
62 local_irq_disable();
63 if (need_resched()) {
64 local_irq_enable();
65 break;
66 }
67 c6x_idle(); /* enables local irqs */
68 }
69 rcu_idle_exit();
70 tick_nohz_idle_exit();
71
72 preempt_enable_no_resched();
73 schedule();
74 preempt_disable();
75 }
76}
77
78static void halt_loop(void) 52static void halt_loop(void)
79{ 53{
80 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 54 printk(KERN_EMERG "System Halted, OK to turn off power\n");
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index 89395f09648a..a9fcd89b251b 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -77,37 +77,11 @@ void __init mem_init(void)
77#ifdef CONFIG_BLK_DEV_INITRD 77#ifdef CONFIG_BLK_DEV_INITRD
78void __init free_initrd_mem(unsigned long start, unsigned long end) 78void __init free_initrd_mem(unsigned long start, unsigned long end)
79{ 79{
80 int pages = 0; 80 free_reserved_area(start, end, 0, "initrd");
81 for (; start < end; start += PAGE_SIZE) {
82 ClearPageReserved(virt_to_page(start));
83 init_page_count(virt_to_page(start));
84 free_page(start);
85 totalram_pages++;
86 pages++;
87 }
88 printk(KERN_INFO "Freeing initrd memory: %luk freed\n",
89 (pages * PAGE_SIZE) >> 10);
90} 81}
91#endif 82#endif
92 83
93void __init free_initmem(void) 84void __init free_initmem(void)
94{ 85{
95 unsigned long addr; 86 free_initmem_default(0);
96
97 /*
98 * The following code should be cool even if these sections
99 * are not page aligned.
100 */
101 addr = PAGE_ALIGN((unsigned long)(__init_begin));
102
103 /* next to check that the page we free is not a partial page */
104 for (; addr + PAGE_SIZE < (unsigned long)(__init_end);
105 addr += PAGE_SIZE) {
106 ClearPageReserved(virt_to_page(addr));
107 init_page_count(virt_to_page(addr));
108 free_page(addr);
109 totalram_pages++;
110 }
111 printk(KERN_INFO "Freeing unused kernel memory: %dK freed\n",
112 (int) ((addr - PAGE_ALIGN((long) &__init_begin)) >> 10));
113} 87}
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index bb0ac66cf533..06dd026533e3 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -43,7 +43,7 @@ config CRIS
43 select GENERIC_ATOMIC64 43 select GENERIC_ATOMIC64
44 select HAVE_GENERIC_HARDIRQS 44 select HAVE_GENERIC_HARDIRQS
45 select HAVE_UID16 45 select HAVE_UID16
46 select HAVE_VIRT_TO_BUS 46 select VIRT_TO_BUS
47 select ARCH_WANT_IPC_PARSE_VERSION 47 select ARCH_WANT_IPC_PARSE_VERSION
48 select GENERIC_IRQ_SHOW 48 select GENERIC_IRQ_SHOW
49 select GENERIC_IOMAP 49 select GENERIC_IOMAP
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index b1018750cffb..2ba23c13df68 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -30,8 +30,9 @@ void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */
30void default_idle(void) 30void default_idle(void)
31{ 31{
32#ifdef CONFIG_ETRAX_GPIO 32#ifdef CONFIG_ETRAX_GPIO
33 etrax_gpio_wake_up_check(); 33 etrax_gpio_wake_up_check();
34#endif 34#endif
35 local_irq_enable();
35} 36}
36 37
37/* 38/*
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index 2b23ef0e4452..57451faa9b20 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -20,18 +20,12 @@
20 20
21extern void stop_watchdog(void); 21extern void stop_watchdog(void);
22 22
23extern int cris_hlt_counter;
24
25/* We use this if we don't have any better idle routine. */ 23/* We use this if we don't have any better idle routine. */
26void default_idle(void) 24void default_idle(void)
27{ 25{
28 local_irq_disable(); 26 /* Halt until exception. */
29 if (!need_resched() && !cris_hlt_counter) { 27 __asm__ volatile("ei \n\t"
30 /* Halt until exception. */ 28 "halt ");
31 __asm__ volatile("ei \n\t"
32 "halt ");
33 }
34 local_irq_enable();
35} 29}
36 30
37/* 31/*
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 04a16edd5401..cdd12028de0c 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -145,8 +145,6 @@ smp_boot_one_cpu(int cpuid, struct task_struct idle)
145 * specific stuff such as the local timer and the MMU. */ 145 * specific stuff such as the local timer and the MMU. */
146void __init smp_callin(void) 146void __init smp_callin(void)
147{ 147{
148 extern void cpu_idle(void);
149
150 int cpu = cpu_now_booting; 148 int cpu = cpu_now_booting;
151 reg_intr_vect_rw_mask vect_mask = {0}; 149 reg_intr_vect_rw_mask vect_mask = {0};
152 150
@@ -170,7 +168,7 @@ void __init smp_callin(void)
170 local_irq_enable(); 168 local_irq_enable();
171 169
172 set_cpu_online(cpu, true); 170 set_cpu_online(cpu, true);
173 cpu_idle(); 171 cpu_startup_entry(CPUHP_ONLINE);
174} 172}
175 173
176/* Stop execution on this CPU.*/ 174/* Stop execution on this CPU.*/
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 675823f70c0f..c0a29b96b92b 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -65,13 +65,6 @@ static inline void release_thread(struct task_struct *dead_task)
65 65
66#define cpu_relax() barrier() 66#define cpu_relax() barrier()
67 67
68/*
69 * disable hlt during certain critical i/o operations
70 */
71#define HAVE_DISABLE_HLT
72void disable_hlt(void);
73void enable_hlt(void);
74
75void default_idle(void); 68void default_idle(void);
76 69
77#endif /* __ASM_CRIS_PROCESSOR_H */ 70#endif /* __ASM_CRIS_PROCESSOR_H */
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 104ff4dd9b98..b78498eb079b 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -29,59 +29,14 @@
29 29
30//#define DEBUG 30//#define DEBUG
31 31
32/*
33 * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if
34 * there would ever be a halt sequence (for power save when idle) with
35 * some largish delay when halting or resuming *and* a driver that can't
36 * afford that delay. The hlt_counter would then be checked before
37 * executing the halt sequence, and the driver marks the unhaltable
38 * region by enable_hlt/disable_hlt.
39 */
40
41int cris_hlt_counter=0;
42
43void disable_hlt(void)
44{
45 cris_hlt_counter++;
46}
47
48EXPORT_SYMBOL(disable_hlt);
49
50void enable_hlt(void)
51{
52 cris_hlt_counter--;
53}
54
55EXPORT_SYMBOL(enable_hlt);
56
57extern void default_idle(void); 32extern void default_idle(void);
58 33
59void (*pm_power_off)(void); 34void (*pm_power_off)(void);
60EXPORT_SYMBOL(pm_power_off); 35EXPORT_SYMBOL(pm_power_off);
61 36
62/* 37void arch_cpu_idle(void)
63 * The idle thread. There's no useful work to be
64 * done, so just try to conserve power and have a
65 * low exit latency (ie sit in a loop waiting for
66 * somebody to say that they'd like to reschedule)
67 */
68
69void cpu_idle (void)
70{ 38{
71 /* endless idle loop with no priority at all */ 39 default_idle();
72 while (1) {
73 rcu_idle_enter();
74 while (!need_resched()) {
75 /*
76 * Mark this as an RCU critical section so that
77 * synchronize_kernel() in the unload path waits
78 * for our completion.
79 */
80 default_idle();
81 }
82 rcu_idle_exit();
83 schedule_preempt_disabled();
84 }
85} 40}
86 41
87void hard_reset_now (void); 42void hard_reset_now (void);
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index d72ab58fd83e..9ac80946dada 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -12,12 +12,10 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <asm/tlb.h> 14#include <asm/tlb.h>
15#include <asm/sections.h>
15 16
16unsigned long empty_zero_page; 17unsigned long empty_zero_page;
17 18
18extern char _stext, _edata, _etext; /* From linkerscript */
19extern char __init_begin, __init_end;
20
21void __init 19void __init
22mem_init(void) 20mem_init(void)
23{ 21{
@@ -67,15 +65,5 @@ mem_init(void)
67void 65void
68free_initmem(void) 66free_initmem(void)
69{ 67{
70 unsigned long addr; 68 free_initmem_default(0);
71
72 addr = (unsigned long)(&__init_begin);
73 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
74 ClearPageReserved(virt_to_page(addr));
75 init_page_count(virt_to_page(addr));
76 free_page(addr);
77 totalram_pages++;
78 }
79 printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n",
80 (unsigned long)((&__init_end - &__init_begin) >> 10));
81} 69}
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 12369b194c7b..2ce731f9aa4d 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -6,7 +6,7 @@ config FRV
6 select HAVE_PERF_EVENTS 6 select HAVE_PERF_EVENTS
7 select HAVE_UID16 7 select HAVE_UID16
8 select HAVE_GENERIC_HARDIRQS 8 select HAVE_GENERIC_HARDIRQS
9 select HAVE_VIRT_TO_BUS 9 select VIRT_TO_BUS
10 select GENERIC_IRQ_SHOW 10 select GENERIC_IRQ_SHOW
11 select HAVE_DEBUG_BUGVERBOSE 11 select HAVE_DEBUG_BUGVERBOSE
12 select ARCH_HAVE_NMI_SAFE_CMPXCHG 12 select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 23916b2a12a2..5d40aeb7712e 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -59,29 +59,12 @@ static void core_sleep_idle(void)
59 mb(); 59 mb();
60} 60}
61 61
62void (*idle)(void) = core_sleep_idle; 62void arch_cpu_idle(void)
63
64/*
65 * The idle thread. There's no useful work to be
66 * done, so just try to conserve power and have a
67 * low exit latency (ie sit in a loop waiting for
68 * somebody to say that they'd like to reschedule)
69 */
70void cpu_idle(void)
71{ 63{
72 /* endless idle loop with no priority at all */ 64 if (!frv_dma_inprogress)
73 while (1) { 65 core_sleep_idle();
74 rcu_idle_enter(); 66 else
75 while (!need_resched()) { 67 local_irq_enable();
76 check_pgt_cache();
77
78 if (!frv_dma_inprogress && idle)
79 idle();
80 }
81 rcu_idle_exit();
82
83 schedule_preempt_disabled();
84 }
85} 68}
86 69
87void machine_restart(char * __unused) 70void machine_restart(char * __unused)
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index 92e97b0894a6..dee354fa6b64 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -122,7 +122,7 @@ void __init mem_init(void)
122#endif 122#endif
123 int codek = 0, datak = 0; 123 int codek = 0, datak = 0;
124 124
125 /* this will put all memory onto the freelists */ 125 /* this will put all low memory onto the freelists */
126 totalram_pages = free_all_bootmem(); 126 totalram_pages = free_all_bootmem();
127 127
128#ifdef CONFIG_MMU 128#ifdef CONFIG_MMU
@@ -131,14 +131,8 @@ void __init mem_init(void)
131 datapages++; 131 datapages++;
132 132
133#ifdef CONFIG_HIGHMEM 133#ifdef CONFIG_HIGHMEM
134 for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) { 134 for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--)
135 struct page *page = &mem_map[pfn]; 135 free_highmem_page(&mem_map[pfn]);
136
137 ClearPageReserved(page);
138 init_page_count(page);
139 __free_page(page);
140 totalram_pages++;
141 }
142#endif 136#endif
143 137
144 codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10; 138 codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10;
@@ -168,21 +162,7 @@ void __init mem_init(void)
168void free_initmem(void) 162void free_initmem(void)
169{ 163{
170#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) 164#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL)
171 unsigned long start, end, addr; 165 free_initmem_default(0);
172
173 start = PAGE_ALIGN((unsigned long) &__init_begin); /* round up */
174 end = ((unsigned long) &__init_end) & PAGE_MASK; /* round down */
175
176 /* next to check that the page we free is not a partial page */
177 for (addr = start; addr < end; addr += PAGE_SIZE) {
178 ClearPageReserved(virt_to_page(addr));
179 init_page_count(virt_to_page(addr));
180 free_page(addr);
181 totalram_pages++;
182 }
183
184 printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n",
185 (end - start) >> 10, start, end);
186#endif 166#endif
187} /* end free_initmem() */ 167} /* end free_initmem() */
188 168
@@ -193,14 +173,6 @@ void free_initmem(void)
193#ifdef CONFIG_BLK_DEV_INITRD 173#ifdef CONFIG_BLK_DEV_INITRD
194void __init free_initrd_mem(unsigned long start, unsigned long end) 174void __init free_initrd_mem(unsigned long start, unsigned long end)
195{ 175{
196 int pages = 0; 176 free_reserved_area(start, end, 0, "initrd");
197 for (; start < end; start += PAGE_SIZE) {
198 ClearPageReserved(virt_to_page(start));
199 init_page_count(virt_to_page(start));
200 free_page(start);
201 totalram_pages++;
202 pages++;
203 }
204 printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10);
205} /* end free_initrd_mem() */ 177} /* end free_initrd_mem() */
206#endif 178#endif
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index ae8551eb3736..79250de1b12a 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -5,7 +5,7 @@ config H8300
5 select HAVE_GENERIC_HARDIRQS 5 select HAVE_GENERIC_HARDIRQS
6 select GENERIC_ATOMIC64 6 select GENERIC_ATOMIC64
7 select HAVE_UID16 7 select HAVE_UID16
8 select HAVE_VIRT_TO_BUS 8 select VIRT_TO_BUS
9 select ARCH_WANT_IPC_PARSE_VERSION 9 select ARCH_WANT_IPC_PARSE_VERSION
10 select GENERIC_IRQ_SHOW 10 select GENERIC_IRQ_SHOW
11 select GENERIC_CPU_DEVICES 11 select GENERIC_CPU_DEVICES
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index b609f63f1590..a17d2cd463d2 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -53,40 +53,13 @@ asmlinkage void ret_from_kernel_thread(void);
53 * The idle loop on an H8/300.. 53 * The idle loop on an H8/300..
54 */ 54 */
55#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM) 55#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
56static void default_idle(void) 56void arch_cpu_idle(void)
57{ 57{
58 local_irq_disable(); 58 local_irq_enable();
59 if (!need_resched()) { 59 /* XXX: race here! What if need_resched() gets set now? */
60 local_irq_enable(); 60 __asm__("sleep");
61 /* XXX: race here! What if need_resched() gets set now? */
62 __asm__("sleep");
63 } else
64 local_irq_enable();
65}
66#else
67static void default_idle(void)
68{
69 cpu_relax();
70} 61}
71#endif 62#endif
72void (*idle)(void) = default_idle;
73
74/*
75 * The idle thread. There's no useful work to be
76 * done, so just try to conserve power and have a
77 * low exit latency (ie sit in a loop waiting for
78 * somebody to say that they'd like to reschedule)
79 */
80void cpu_idle(void)
81{
82 while (1) {
83 rcu_idle_enter();
84 while (!need_resched())
85 idle();
86 rcu_idle_exit();
87 schedule_preempt_disabled();
88 }
89}
90 63
91void machine_restart(char * __unused) 64void machine_restart(char * __unused)
92{ 65{
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 981e25094b1a..ff349d70a29b 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -139,7 +139,7 @@ void __init mem_init(void)
139 start_mem = PAGE_ALIGN(start_mem); 139 start_mem = PAGE_ALIGN(start_mem);
140 max_mapnr = num_physpages = MAP_NR(high_memory); 140 max_mapnr = num_physpages = MAP_NR(high_memory);
141 141
142 /* this will put all memory onto the freelists */ 142 /* this will put all low memory onto the freelists */
143 totalram_pages = free_all_bootmem(); 143 totalram_pages = free_all_bootmem();
144 144
145 codek = (_etext - _stext) >> 10; 145 codek = (_etext - _stext) >> 10;
@@ -161,15 +161,7 @@ void __init mem_init(void)
161#ifdef CONFIG_BLK_DEV_INITRD 161#ifdef CONFIG_BLK_DEV_INITRD
162void free_initrd_mem(unsigned long start, unsigned long end) 162void free_initrd_mem(unsigned long start, unsigned long end)
163{ 163{
164 int pages = 0; 164 free_reserved_area(start, end, 0, "initrd");
165 for (; start < end; start += PAGE_SIZE) {
166 ClearPageReserved(virt_to_page(start));
167 init_page_count(virt_to_page(start));
168 free_page(start);
169 totalram_pages++;
170 pages++;
171 }
172 printk ("Freeing initrd memory: %dk freed\n", pages);
173} 165}
174#endif 166#endif
175 167
@@ -177,23 +169,7 @@ void
177free_initmem(void) 169free_initmem(void)
178{ 170{
179#ifdef CONFIG_RAMKERNEL 171#ifdef CONFIG_RAMKERNEL
180 unsigned long addr; 172 free_initmem_default(0);
181/*
182 * the following code should be cool even if these sections
183 * are not page aligned.
184 */
185 addr = PAGE_ALIGN((unsigned long)(__init_begin));
186 /* next to check that the page we free is not a partial page */
187 for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) {
188 ClearPageReserved(virt_to_page(addr));
189 init_page_count(virt_to_page(addr));
190 free_page(addr);
191 totalram_pages++;
192 }
193 printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
194 (addr - PAGE_ALIGN((long) __init_begin)) >> 10,
195 (int)(PAGE_ALIGN((unsigned long)__init_begin)),
196 (int)(addr - PAGE_SIZE));
197#endif 173#endif
198} 174}
199 175
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 06ae9ffcabd5..9b948c619a03 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -51,28 +51,11 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
51 * If hardware or VM offer wait termination even though interrupts 51 * If hardware or VM offer wait termination even though interrupts
52 * are disabled. 52 * are disabled.
53 */ 53 */
54static void default_idle(void) 54void arch_cpu_idle(void)
55{ 55{
56 __vmwait(); 56 __vmwait();
57} 57 /* interrupts wake us up, but irqs are still disabled */
58 58 local_irq_enable();
59void (*idle_sleep)(void) = default_idle;
60
61void cpu_idle(void)
62{
63 while (1) {
64 tick_nohz_idle_enter();
65 local_irq_disable();
66 while (!need_resched()) {
67 idle_sleep();
68 /* interrupts wake us up, but aren't serviced */
69 local_irq_enable(); /* service interrupt */
70 local_irq_disable();
71 }
72 local_irq_enable();
73 tick_nohz_idle_exit();
74 schedule();
75 }
76} 59}
77 60
78/* 61/*
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 8e095dffd070..0e364ca43198 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -184,7 +184,7 @@ void __cpuinit start_secondary(void)
184 184
185 local_irq_enable(); 185 local_irq_enable();
186 186
187 cpu_idle(); 187 cpu_startup_entry(CPUHP_ONLINE);
188} 188}
189 189
190 190
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 33f3fdc0b214..e7e55a00f94f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -26,7 +26,7 @@ config IA64
26 select HAVE_MEMBLOCK 26 select HAVE_MEMBLOCK
27 select HAVE_MEMBLOCK_NODE_MAP 27 select HAVE_MEMBLOCK_NODE_MAP
28 select HAVE_VIRT_CPU_ACCOUNTING 28 select HAVE_VIRT_CPU_ACCOUNTING
29 select HAVE_VIRT_TO_BUS 29 select VIRT_TO_BUS
30 select ARCH_DISCARD_MEMBLOCK 30 select ARCH_DISCARD_MEMBLOCK
31 select GENERIC_IRQ_PROBE 31 select GENERIC_IRQ_PROBE
32 select GENERIC_PENDING_IRQ if SMP 32 select GENERIC_PENDING_IRQ if SMP
@@ -187,7 +187,7 @@ config IA64_DIG
187 187
188config IA64_DIG_VTD 188config IA64_DIG_VTD
189 bool "DIG+Intel+IOMMU" 189 bool "DIG+Intel+IOMMU"
190 select DMAR 190 select INTEL_IOMMU
191 select PCI_MSI 191 select PCI_MSI
192 192
193config IA64_HP_ZX1 193config IA64_HP_ZX1
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index da2f319fb71d..e70cadec7ce6 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -142,8 +142,7 @@ static void transmit_chars(struct tty_struct *tty, struct serial_state *info,
142 goto out; 142 goto out;
143 } 143 }
144 144
145 if (info->xmit.head == info->xmit.tail || tty->stopped || 145 if (info->xmit.head == info->xmit.tail || tty->stopped) {
146 tty->hw_stopped) {
147#ifdef SIMSERIAL_DEBUG 146#ifdef SIMSERIAL_DEBUG
148 printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", 147 printk("transmit_chars: head=%d, tail=%d, stopped=%d\n",
149 info->xmit.head, info->xmit.tail, tty->stopped); 148 info->xmit.head, info->xmit.tail, tty->stopped);
@@ -181,7 +180,7 @@ static void rs_flush_chars(struct tty_struct *tty)
181 struct serial_state *info = tty->driver_data; 180 struct serial_state *info = tty->driver_data;
182 181
183 if (info->xmit.head == info->xmit.tail || tty->stopped || 182 if (info->xmit.head == info->xmit.tail || tty->stopped ||
184 tty->hw_stopped || !info->xmit.buf) 183 !info->xmit.buf)
185 return; 184 return;
186 185
187 transmit_chars(tty, info, NULL); 186 transmit_chars(tty, info, NULL);
@@ -217,7 +216,7 @@ static int rs_write(struct tty_struct * tty,
217 * Hey, we transmit directly from here in our case 216 * Hey, we transmit directly from here in our case
218 */ 217 */
219 if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) && 218 if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) &&
220 !tty->stopped && !tty->hw_stopped) 219 !tty->stopped)
221 transmit_chars(tty, info, NULL); 220 transmit_chars(tty, info, NULL);
222 221
223 return ret; 222 return ret;
@@ -325,14 +324,6 @@ static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
325 324
326#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) 325#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
327 326
328static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
329{
330 /* Handle turning off CRTSCTS */
331 if ((old_termios->c_cflag & CRTSCTS) &&
332 !(tty->termios.c_cflag & CRTSCTS)) {
333 tty->hw_stopped = 0;
334 }
335}
336/* 327/*
337 * This routine will shutdown a serial port; interrupts are disabled, and 328 * This routine will shutdown a serial port; interrupts are disabled, and
338 * DTR is dropped if the hangup on close termio flag is on. 329 * DTR is dropped if the hangup on close termio flag is on.
@@ -481,7 +472,6 @@ static const struct tty_operations hp_ops = {
481 .throttle = rs_throttle, 472 .throttle = rs_throttle,
482 .unthrottle = rs_unthrottle, 473 .unthrottle = rs_unthrottle,
483 .send_xchar = rs_send_xchar, 474 .send_xchar = rs_send_xchar,
484 .set_termios = rs_set_termios,
485 .hangup = rs_hangup, 475 .hangup = rs_hangup,
486 .proc_fops = &rs_proc_fops, 476 .proc_fops = &rs_proc_fops,
487}; 477};
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index d2bf1fd5e44f..76acbcd5c060 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -106,16 +106,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
106 return -EFAULT; 106 return -EFAULT;
107 107
108 { 108 {
109 register unsigned long r8 __asm ("r8"); 109 register unsigned long r8 __asm ("r8") = 0;
110 unsigned long prev; 110 unsigned long prev;
111 __asm__ __volatile__( 111 __asm__ __volatile__(
112 " mf;; \n" 112 " mf;; \n"
113 " mov %0=r0 \n"
114 " mov ar.ccv=%4;; \n" 113 " mov ar.ccv=%4;; \n"
115 "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" 114 "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
116 " .xdata4 \"__ex_table\", 1b-., 2f-. \n" 115 " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
117 "[2:]" 116 "[2:]"
118 : "=r" (r8), "=r" (prev) 117 : "+r" (r8), "=&r" (prev)
119 : "r" (uaddr), "r" (newval), 118 : "r" (uaddr), "r" (newval),
120 "rO" ((long) (unsigned) oldval) 119 "rO" ((long) (unsigned) oldval)
121 : "memory"); 120 : "memory");
diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h
index 94eaa5bd5d0c..aa910054b8e7 100644
--- a/arch/ia64/include/asm/hugetlb.h
+++ b/arch/ia64/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
2#define _ASM_IA64_HUGETLB_H 2#define _ASM_IA64_HUGETLB_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
5 6
6 7
7void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 8void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h
index 2b68d856dc78..1bf2cf2f4ab4 100644
--- a/arch/ia64/include/asm/irqflags.h
+++ b/arch/ia64/include/asm/irqflags.h
@@ -89,6 +89,7 @@ static inline bool arch_irqs_disabled(void)
89 89
90static inline void arch_safe_halt(void) 90static inline void arch_safe_halt(void)
91{ 91{
92 arch_local_irq_enable();
92 ia64_pal_halt_light(); /* PAL_HALT_LIGHT */ 93 ia64_pal_halt_light(); /* PAL_HALT_LIGHT */
93} 94}
94 95
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index 43f96ab18fa0..8c7096168716 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
143extern int cpe_vector; 143extern int cpe_vector;
144extern int ia64_cpe_irq; 144extern int ia64_cpe_irq;
145extern void ia64_mca_init(void); 145extern void ia64_mca_init(void);
146extern void ia64_mca_irq_init(void);
146extern void ia64_mca_cpu_init(void *); 147extern void ia64_mca_cpu_init(void *);
147extern void ia64_os_mca_dispatch(void); 148extern void ia64_os_mca_dispatch(void);
148extern void ia64_os_mca_dispatch_end(void); 149extern void ia64_os_mca_dispatch_end(void);
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 2e27ef175652..2db0a6c6daa5 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -67,14 +67,13 @@ extern int paddr_to_nid(unsigned long paddr);
67 67
68extern void map_cpu_to_node(int cpu, int nid); 68extern void map_cpu_to_node(int cpu, int nid);
69extern void unmap_cpu_from_node(int cpu, int nid); 69extern void unmap_cpu_from_node(int cpu, int nid);
70 70extern void numa_clear_node(int cpu);
71 71
72#else /* !CONFIG_NUMA */ 72#else /* !CONFIG_NUMA */
73#define map_cpu_to_node(cpu, nid) do{}while(0) 73#define map_cpu_to_node(cpu, nid) do{}while(0)
74#define unmap_cpu_from_node(cpu, nid) do{}while(0) 74#define unmap_cpu_from_node(cpu, nid) do{}while(0)
75
76#define paddr_to_nid(addr) 0 75#define paddr_to_nid(addr) 0
77 76#define numa_clear_node(cpu) do { } while (0)
78#endif /* CONFIG_NUMA */ 77#endif /* CONFIG_NUMA */
79 78
80#endif /* _ASM_IA64_NUMA_H */ 79#endif /* _ASM_IA64_NUMA_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 020d655ed082..cade13dd0299 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -131,8 +131,6 @@ struct thread_info {
131#define TS_POLLING 1 /* true if in idle loop and not sleeping */ 131#define TS_POLLING 1 /* true if in idle loop and not sleeping */
132#define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */ 132#define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */
133 133
134#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
135
136#ifndef __ASSEMBLY__ 134#ifndef __ASSEMBLY__
137#define HAVE_SET_RESTORE_SIGMASK 1 135#define HAVE_SET_RESTORE_SIGMASK 1
138static inline void set_restore_sigmask(void) 136static inline void set_restore_sigmask(void)
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index c4cd45d97749..abc6dee3799c 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -90,53 +90,6 @@ ENTRY(fsys_getpid)
90 FSYS_RETURN 90 FSYS_RETURN
91END(fsys_getpid) 91END(fsys_getpid)
92 92
93ENTRY(fsys_getppid)
94 .prologue
95 .altrp b6
96 .body
97 add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
98 ;;
99 ld8 r17=[r17] // r17 = current->group_leader
100 add r9=TI_FLAGS+IA64_TASK_SIZE,r16
101 ;;
102
103 ld4 r9=[r9]
104 add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = &current->group_leader->real_parent
105 ;;
106 and r9=TIF_ALLWORK_MASK,r9
107
1081: ld8 r18=[r17] // r18 = current->group_leader->real_parent
109 ;;
110 cmp.ne p8,p0=0,r9
111 add r8=IA64_TASK_TGID_OFFSET,r18 // r8 = &current->group_leader->real_parent->tgid
112 ;;
113
114 /*
115 * The .acq is needed to ensure that the read of tgid has returned its data before
116 * we re-check "real_parent".
117 */
118 ld4.acq r8=[r8] // r8 = current->group_leader->real_parent->tgid
119#ifdef CONFIG_SMP
120 /*
121 * Re-read current->group_leader->real_parent.
122 */
123 ld8 r19=[r17] // r19 = current->group_leader->real_parent
124(p8) br.spnt.many fsys_fallback_syscall
125 ;;
126 cmp.ne p6,p0=r18,r19 // did real_parent change?
127 mov r19=0 // i must not leak kernel bits...
128(p6) br.cond.spnt.few 1b // yes -> redo the read of tgid and the check
129 ;;
130 mov r17=0 // i must not leak kernel bits...
131 mov r18=0 // i must not leak kernel bits...
132#else
133 mov r17=0 // i must not leak kernel bits...
134 mov r18=0 // i must not leak kernel bits...
135 mov r19=0 // i must not leak kernel bits...
136#endif
137 FSYS_RETURN
138END(fsys_getppid)
139
140ENTRY(fsys_set_tid_address) 93ENTRY(fsys_set_tid_address)
141 .prologue 94 .prologue
142 .altrp b6 95 .altrp b6
@@ -614,7 +567,7 @@ paravirt_fsyscall_table:
614 data8 0 // chown 567 data8 0 // chown
615 data8 0 // lseek // 1040 568 data8 0 // lseek // 1040
616 data8 fsys_getpid // getpid 569 data8 fsys_getpid // getpid
617 data8 fsys_getppid // getppid 570 data8 0 // getppid
618 data8 0 // mount 571 data8 0 // mount
619 data8 0 // umount 572 data8 0 // umount
620 data8 0 // setuid // 1045 573 data8 0 // setuid // 1045
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index ee33c3aaa2fc..19f107be734e 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -76,7 +76,7 @@
76 * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ 76 * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
77 * 77 *
78 * Note: The term "IRQ" is loosely used everywhere in Linux kernel to 78 * Note: The term "IRQ" is loosely used everywhere in Linux kernel to
79 * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ 79 * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
80 * (isa_irq) is the only exception in this source code. 80 * (isa_irq) is the only exception in this source code.
81 */ 81 */
82 82
@@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
1010 return 0; 1010 return 0;
1011} 1011}
1012 1012
1013static int
1014iosapic_delete_rte(unsigned int irq, unsigned int gsi)
1015{
1016 struct iosapic_rte_info *rte, *temp;
1017
1018 list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes,
1019 rte_list) {
1020 if (rte->iosapic->gsi_base + rte->rte_index == gsi) {
1021 if (rte->refcnt)
1022 return -EBUSY;
1023
1024 list_del(&rte->rte_list);
1025 kfree(rte);
1026 return 0;
1027 }
1028 }
1029
1030 return -EINVAL;
1031}
1032
1013int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) 1033int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
1014{ 1034{
1015 int num_rte, err, index; 1035 int num_rte, err, index;
@@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
1069 1089
1070int iosapic_remove(unsigned int gsi_base) 1090int iosapic_remove(unsigned int gsi_base)
1071{ 1091{
1072 int index, err = 0; 1092 int i, irq, index, err = 0;
1073 unsigned long flags; 1093 unsigned long flags;
1074 1094
1075 spin_lock_irqsave(&iosapic_lock, flags); 1095 spin_lock_irqsave(&iosapic_lock, flags);
@@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base)
1087 goto out; 1107 goto out;
1088 } 1108 }
1089 1109
1110 for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) {
1111 irq = __gsi_to_irq(i);
1112 if (irq < 0)
1113 continue;
1114
1115 err = iosapic_delete_rte(irq, i);
1116 if (err)
1117 goto out;
1118 }
1119
1090 iounmap(iosapic_lists[index].addr); 1120 iounmap(iosapic_lists[index].addr);
1091 iosapic_free(index); 1121 iosapic_free(index);
1092 out: 1122 out:
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index ad69606613eb..f2c418281130 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -23,6 +23,8 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/kernel_stat.h> 24#include <linux/kernel_stat.h>
25 25
26#include <asm/mca.h>
27
26/* 28/*
27 * 'what should we do if we get a hw irq event on an illegal vector'. 29 * 'what should we do if we get a hw irq event on an illegal vector'.
28 * each architecture has to answer this themselves. 30 * each architecture has to answer this themselves.
@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
83 85
84#endif /* CONFIG_SMP */ 86#endif /* CONFIG_SMP */
85 87
88int __init arch_early_irq_init(void)
89{
90 ia64_mca_irq_init();
91 return 0;
92}
93
86#ifdef CONFIG_HOTPLUG_CPU 94#ifdef CONFIG_HOTPLUG_CPU
87unsigned int vectors_in_migration[NR_IRQS]; 95unsigned int vectors_in_migration[NR_IRQS];
88 96
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 65bf9cd39044..d7396dbb07bb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -2074,22 +2074,16 @@ ia64_mca_init(void)
2074 printk(KERN_INFO "MCA related initialization done\n"); 2074 printk(KERN_INFO "MCA related initialization done\n");
2075} 2075}
2076 2076
2077
2077/* 2078/*
2078 * ia64_mca_late_init 2079 * These pieces cannot be done in ia64_mca_init() because it is called before
2079 * 2080 * early_irq_init() which would wipe out our percpu irq registrations. But we
2080 * Opportunity to setup things that require initialization later 2081 * cannot leave them until ia64_mca_late_init() because by then all the other
2081 * than ia64_mca_init. Setup a timer to poll for CPEs if the 2082 * processors have been brought online and have set their own CMC vectors to
2082 * platform doesn't support an interrupt driven mechanism. 2083 * point at a non-existant action. Called from arch_early_irq_init().
2083 *
2084 * Inputs : None
2085 * Outputs : Status
2086 */ 2084 */
2087static int __init 2085void __init ia64_mca_irq_init(void)
2088ia64_mca_late_init(void)
2089{ 2086{
2090 if (!mca_init)
2091 return 0;
2092
2093 /* 2087 /*
2094 * Configure the CMCI/P vector and handler. Interrupts for CMC are 2088 * Configure the CMCI/P vector and handler. Interrupts for CMC are
2095 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). 2089 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)
2108 /* Setup the CPEI/P handler */ 2102 /* Setup the CPEI/P handler */
2109 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 2103 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2110#endif 2104#endif
2105}
2106
2107/*
2108 * ia64_mca_late_init
2109 *
2110 * Opportunity to setup things that require initialization later
2111 * than ia64_mca_init. Setup a timer to poll for CPEs if the
2112 * platform doesn't support an interrupt driven mechanism.
2113 *
2114 * Inputs : None
2115 * Outputs : Status
2116 */
2117static int __init
2118ia64_mca_late_init(void)
2119{
2120 if (!mca_init)
2121 return 0;
2111 2122
2112 register_hotcpu_notifier(&mca_cpu_notifier); 2123 register_hotcpu_notifier(&mca_cpu_notifier);
2113 2124
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 9392e021c93b..94f8bf777afa 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -349,7 +349,7 @@ init_record_index_pools(void)
349 349
350 /* - 3 - */ 350 /* - 3 - */
351 slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; 351 slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
352 slidx_pool.buffer = (slidx_list_t *) 352 slidx_pool.buffer =
353 kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); 353 kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
354 354
355 return slidx_pool.buffer ? 0 : -ENOMEM; 355 return slidx_pool.buffer ? 0 : -ENOMEM;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 77597e5ea60a..79521d5499f9 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={
849 849
850#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) 850#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
851 851
852/*
853 * this array is used to keep track of the proc entries we create. This is
854 * required in the module mode when we need to remove all entries. The procfs code
855 * does not do recursion of deletion
856 *
857 * Notes:
858 * - +1 accounts for the cpuN directory entry in /proc/pal
859 */
860#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
861
862static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
863static struct proc_dir_entry *palinfo_dir; 852static struct proc_dir_entry *palinfo_dir;
864 853
865/* 854/*
@@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
971static void __cpuinit 960static void __cpuinit
972create_palinfo_proc_entries(unsigned int cpu) 961create_palinfo_proc_entries(unsigned int cpu)
973{ 962{
974# define CPUSTR "cpu%d"
975
976 pal_func_cpu_u_t f; 963 pal_func_cpu_u_t f;
977 struct proc_dir_entry **pdir;
978 struct proc_dir_entry *cpu_dir; 964 struct proc_dir_entry *cpu_dir;
979 int j; 965 int j;
980 char cpustr[sizeof(CPUSTR)]; 966 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
981 967 sprintf(cpustr, "cpu%d", cpu);
982
983 /*
984 * we keep track of created entries in a depth-first order for
985 * cleanup purposes. Each entry is stored into palinfo_proc_entries
986 */
987 sprintf(cpustr,CPUSTR, cpu);
988 968
989 cpu_dir = proc_mkdir(cpustr, palinfo_dir); 969 cpu_dir = proc_mkdir(cpustr, palinfo_dir);
970 if (!cpu_dir)
971 return;
990 972
991 f.req_cpu = cpu; 973 f.req_cpu = cpu;
992 974
993 /*
994 * Compute the location to store per cpu entries
995 * We dont store the top level entry in this list, but
996 * remove it finally after removing all cpu entries.
997 */
998 pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
999 *pdir++ = cpu_dir;
1000 for (j=0; j < NR_PALINFO_ENTRIES; j++) { 975 for (j=0; j < NR_PALINFO_ENTRIES; j++) {
1001 f.func_id = j; 976 f.func_id = j;
1002 *pdir = create_proc_read_entry( 977 create_proc_read_entry(
1003 palinfo_entries[j].name, 0, cpu_dir, 978 palinfo_entries[j].name, 0, cpu_dir,
1004 palinfo_read_entry, (void *)f.value); 979 palinfo_read_entry, (void *)f.value);
1005 pdir++;
1006 } 980 }
1007} 981}
1008 982
1009static void 983static void
1010remove_palinfo_proc_entries(unsigned int hcpu) 984remove_palinfo_proc_entries(unsigned int hcpu)
1011{ 985{
1012 int j; 986 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
1013 struct proc_dir_entry *cpu_dir, **pdir; 987 sprintf(cpustr, "cpu%d", hcpu);
1014 988 remove_proc_subtree(cpustr, palinfo_dir);
1015 pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
1016 cpu_dir = *pdir;
1017 *pdir++=NULL;
1018 for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
1019 if ((*pdir)) {
1020 remove_proc_entry ((*pdir)->name, cpu_dir);
1021 *pdir ++= NULL;
1022 }
1023 }
1024
1025 if (cpu_dir) {
1026 remove_proc_entry(cpu_dir->name, palinfo_dir);
1027 }
1028} 989}
1029 990
1030static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, 991static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
@@ -1058,6 +1019,8 @@ palinfo_init(void)
1058 1019
1059 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); 1020 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
1060 palinfo_dir = proc_mkdir("pal", NULL); 1021 palinfo_dir = proc_mkdir("pal", NULL);
1022 if (!palinfo_dir)
1023 return -ENOMEM;
1061 1024
1062 /* Create palinfo dirs in /proc for all online cpus */ 1025 /* Create palinfo dirs in /proc for all online cpus */
1063 for_each_online_cpu(i) { 1026 for_each_online_cpu(i) {
@@ -1073,22 +1036,8 @@ palinfo_init(void)
1073static void __exit 1036static void __exit
1074palinfo_exit(void) 1037palinfo_exit(void)
1075{ 1038{
1076 int i = 0;
1077
1078 /* remove all nodes: depth first pass. Could optimize this */
1079 for_each_online_cpu(i) {
1080 remove_palinfo_proc_entries(i);
1081 }
1082
1083 /*
1084 * Remove the top level entry finally
1085 */
1086 remove_proc_entry(palinfo_dir->name, NULL);
1087
1088 /*
1089 * Unregister from cpu notifier callbacks
1090 */
1091 unregister_hotcpu_notifier(&palinfo_cpu_notifier); 1039 unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1040 remove_proc_subtree("pal", NULL);
1092} 1041}
1093 1042
1094module_init(palinfo_init); 1043module_init(palinfo_init);
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 2eda28414abb..9ea25fce06d5 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -42,6 +42,7 @@
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/tracehook.h> 43#include <linux/tracehook.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/cpu.h>
45 46
46#include <asm/errno.h> 47#include <asm/errno.h>
47#include <asm/intrinsics.h> 48#include <asm/intrinsics.h>
@@ -1322,8 +1323,6 @@ out:
1322} 1323}
1323EXPORT_SYMBOL(pfm_unregister_buffer_fmt); 1324EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1324 1325
1325extern void update_pal_halt_status(int);
1326
1327static int 1326static int
1328pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) 1327pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1329{ 1328{
@@ -1371,9 +1370,9 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1371 cpu)); 1370 cpu));
1372 1371
1373 /* 1372 /*
1374 * disable default_idle() to go to PAL_HALT 1373 * Force idle() into poll mode
1375 */ 1374 */
1376 update_pal_halt_status(0); 1375 cpu_idle_poll_ctrl(true);
1377 1376
1378 UNLOCK_PFS(flags); 1377 UNLOCK_PFS(flags);
1379 1378
@@ -1430,11 +1429,8 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1430 is_syswide, 1429 is_syswide,
1431 cpu)); 1430 cpu));
1432 1431
1433 /* 1432 /* Undo forced polling. Last session reenables pal_halt */
1434 * if possible, enable default_idle() to go into PAL_HALT 1433 cpu_idle_poll_ctrl(false);
1435 */
1436 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1437 update_pal_halt_status(1);
1438 1434
1439 UNLOCK_PFS(flags); 1435 UNLOCK_PFS(flags);
1440 1436
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index e34f565f595a..a26fc640e4ce 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -209,41 +209,13 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
209 local_irq_disable(); /* force interrupt disable */ 209 local_irq_disable(); /* force interrupt disable */
210} 210}
211 211
212static int pal_halt = 1;
213static int can_do_pal_halt = 1;
214
215static int __init nohalt_setup(char * str) 212static int __init nohalt_setup(char * str)
216{ 213{
217 pal_halt = can_do_pal_halt = 0; 214 cpu_idle_poll_ctrl(true);
218 return 1; 215 return 1;
219} 216}
220__setup("nohalt", nohalt_setup); 217__setup("nohalt", nohalt_setup);
221 218
222void
223update_pal_halt_status(int status)
224{
225 can_do_pal_halt = pal_halt && status;
226}
227
228/*
229 * We use this if we don't have any better idle routine..
230 */
231void
232default_idle (void)
233{
234 local_irq_enable();
235 while (!need_resched()) {
236 if (can_do_pal_halt) {
237 local_irq_disable();
238 if (!need_resched()) {
239 safe_halt();
240 }
241 local_irq_enable();
242 } else
243 cpu_relax();
244 }
245}
246
247#ifdef CONFIG_HOTPLUG_CPU 219#ifdef CONFIG_HOTPLUG_CPU
248/* We don't actually take CPU down, just spin without interrupts. */ 220/* We don't actually take CPU down, just spin without interrupts. */
249static inline void play_dead(void) 221static inline void play_dead(void)
@@ -270,50 +242,29 @@ static inline void play_dead(void)
270} 242}
271#endif /* CONFIG_HOTPLUG_CPU */ 243#endif /* CONFIG_HOTPLUG_CPU */
272 244
273void __attribute__((noreturn)) 245void arch_cpu_idle_dead(void)
274cpu_idle (void) 246{
247 play_dead();
248}
249
250void arch_cpu_idle(void)
275{ 251{
276 void (*mark_idle)(int) = ia64_mark_idle; 252 void (*mark_idle)(int) = ia64_mark_idle;
277 int cpu = smp_processor_id();
278
279 /* endless idle loop with no priority at all */
280 while (1) {
281 rcu_idle_enter();
282 if (can_do_pal_halt) {
283 current_thread_info()->status &= ~TS_POLLING;
284 /*
285 * TS_POLLING-cleared state must be visible before we
286 * test NEED_RESCHED:
287 */
288 smp_mb();
289 } else {
290 current_thread_info()->status |= TS_POLLING;
291 }
292 253
293 if (!need_resched()) {
294 void (*idle)(void);
295#ifdef CONFIG_SMP 254#ifdef CONFIG_SMP
296 min_xtp(); 255 min_xtp();
297#endif 256#endif
298 rmb(); 257 rmb();
299 if (mark_idle) 258 if (mark_idle)
300 (*mark_idle)(1); 259 (*mark_idle)(1);
301 260
302 if (!idle) 261 safe_halt();
303 idle = default_idle; 262
304 (*idle)(); 263 if (mark_idle)
305 if (mark_idle) 264 (*mark_idle)(0);
306 (*mark_idle)(0);
307#ifdef CONFIG_SMP 265#ifdef CONFIG_SMP
308 normal_xtp(); 266 normal_xtp();
309#endif 267#endif
310 }
311 rcu_idle_exit();
312 schedule_preempt_disabled();
313 check_pgt_cache();
314 if (cpu_is_offline(cpu))
315 play_dead();
316 }
317} 268}
318 269
319void 270void
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 500f1e4d9f9d..8d87168d218d 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -455,7 +455,7 @@ start_secondary (void *unused)
455 preempt_disable(); 455 preempt_disable();
456 smp_callin(); 456 smp_callin();
457 457
458 cpu_idle(); 458 cpu_startup_entry(CPUHP_ONLINE);
459 return 0; 459 return 0;
460} 460}
461 461
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 4332f7ee5203..a7869f8f49a6 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
256 "srlz.d;;" 256 "srlz.d;;"
257 "ssm psr.i;;" 257 "ssm psr.i;;"
258 "srlz.d;;" 258 "srlz.d;;"
259 : "=r"(ret) : "r"(iha), "r"(pte):"memory"); 259 : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
260 260
261 return ret; 261 return ret;
262} 262}
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 80dab509dfb0..67c59ebec899 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -47,6 +47,8 @@ void show_mem(unsigned int filter)
47 printk(KERN_INFO "Mem-info:\n"); 47 printk(KERN_INFO "Mem-info:\n");
48 show_free_areas(filter); 48 show_free_areas(filter);
49 printk(KERN_INFO "Node memory in pages:\n"); 49 printk(KERN_INFO "Node memory in pages:\n");
50 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
51 return;
50 for_each_online_pgdat(pgdat) { 52 for_each_online_pgdat(pgdat) {
51 unsigned long present; 53 unsigned long present;
52 unsigned long flags; 54 unsigned long flags;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index c2e955ee79a8..ae4db4bd6d97 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -623,6 +623,8 @@ void show_mem(unsigned int filter)
623 623
624 printk(KERN_INFO "Mem-info:\n"); 624 printk(KERN_INFO "Mem-info:\n");
625 show_free_areas(filter); 625 show_free_areas(filter);
626 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
627 return;
626 printk(KERN_INFO "Node memory in pages:\n"); 628 printk(KERN_INFO "Node memory in pages:\n");
627 for_each_online_pgdat(pgdat) { 629 for_each_online_pgdat(pgdat) {
628 unsigned long present; 630 unsigned long present;
@@ -817,13 +819,12 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
817#endif 819#endif
818 820
819#ifdef CONFIG_SPARSEMEM_VMEMMAP 821#ifdef CONFIG_SPARSEMEM_VMEMMAP
820int __meminit vmemmap_populate(struct page *start_page, 822int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
821 unsigned long size, int node)
822{ 823{
823 return vmemmap_populate_basepages(start_page, size, node); 824 return vmemmap_populate_basepages(start, end, node);
824} 825}
825 826
826void vmemmap_free(struct page *memmap, unsigned long nr_pages) 827void vmemmap_free(unsigned long start, unsigned long end)
827{ 828{
828} 829}
829#endif 830#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 20bc967c7209..d1fe4b402601 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -154,25 +154,14 @@ ia64_init_addr_space (void)
154void 154void
155free_initmem (void) 155free_initmem (void)
156{ 156{
157 unsigned long addr, eaddr; 157 free_reserved_area((unsigned long)ia64_imva(__init_begin),
158 158 (unsigned long)ia64_imva(__init_end),
159 addr = (unsigned long) ia64_imva(__init_begin); 159 0, "unused kernel");
160 eaddr = (unsigned long) ia64_imva(__init_end);
161 while (addr < eaddr) {
162 ClearPageReserved(virt_to_page(addr));
163 init_page_count(virt_to_page(addr));
164 free_page(addr);
165 ++totalram_pages;
166 addr += PAGE_SIZE;
167 }
168 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
169 (__init_end - __init_begin) >> 10);
170} 160}
171 161
172void __init 162void __init
173free_initrd_mem (unsigned long start, unsigned long end) 163free_initrd_mem (unsigned long start, unsigned long end)
174{ 164{
175 struct page *page;
176 /* 165 /*
177 * EFI uses 4KB pages while the kernel can use 4KB or bigger. 166 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
178 * Thus EFI and the kernel may have different page sizes. It is 167 * Thus EFI and the kernel may have different page sizes. It is
@@ -213,11 +202,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
213 for (; start < end; start += PAGE_SIZE) { 202 for (; start < end; start += PAGE_SIZE) {
214 if (!virt_addr_valid(start)) 203 if (!virt_addr_valid(start))
215 continue; 204 continue;
216 page = virt_to_page(start); 205 free_reserved_page(virt_to_page(start));
217 ClearPageReserved(page);
218 init_page_count(page);
219 free_page(start);
220 ++totalram_pages;
221 } 206 }
222} 207}
223 208
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 3dccdd8eb275..43964cde6214 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -16,7 +16,7 @@
16#include <asm/meminit.h> 16#include <asm/meminit.h>
17 17
18static inline void __iomem * 18static inline void __iomem *
19__ioremap (unsigned long phys_addr) 19__ioremap_uc(unsigned long phys_addr)
20{ 20{
21 return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); 21 return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
22} 22}
@@ -24,7 +24,11 @@ __ioremap (unsigned long phys_addr)
24void __iomem * 24void __iomem *
25early_ioremap (unsigned long phys_addr, unsigned long size) 25early_ioremap (unsigned long phys_addr, unsigned long size)
26{ 26{
27 return __ioremap(phys_addr); 27 u64 attr;
28 attr = kern_mem_attribute(phys_addr, size);
29 if (attr & EFI_MEMORY_WB)
30 return (void __iomem *) phys_to_virt(phys_addr);
31 return __ioremap_uc(phys_addr);
28} 32}
29 33
30void __iomem * 34void __iomem *
@@ -47,7 +51,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
47 if (attr & EFI_MEMORY_WB) 51 if (attr & EFI_MEMORY_WB)
48 return (void __iomem *) phys_to_virt(phys_addr); 52 return (void __iomem *) phys_to_virt(phys_addr);
49 else if (attr & EFI_MEMORY_UC) 53 else if (attr & EFI_MEMORY_UC)
50 return __ioremap(phys_addr); 54 return __ioremap_uc(phys_addr);
51 55
52 /* 56 /*
53 * Some chipsets don't support UC access to memory. If 57 * Some chipsets don't support UC access to memory. If
@@ -93,7 +97,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
93 return (void __iomem *) (offset + (char __iomem *)addr); 97 return (void __iomem *) (offset + (char __iomem *)addr);
94 } 98 }
95 99
96 return __ioremap(phys_addr); 100 return __ioremap_uc(phys_addr);
97} 101}
98EXPORT_SYMBOL(ioremap); 102EXPORT_SYMBOL(ioremap);
99 103
@@ -103,7 +107,7 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
103 if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) 107 if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
104 return NULL; 108 return NULL;
105 109
106 return __ioremap(phys_addr); 110 return __ioremap_uc(phys_addr);
107} 111}
108EXPORT_SYMBOL(ioremap_nocache); 112EXPORT_SYMBOL(ioremap_nocache);
109 113
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 3efea7d0a351..4248492b9321 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -61,18 +61,36 @@ paddr_to_nid(unsigned long paddr)
61int __meminit __early_pfn_to_nid(unsigned long pfn) 61int __meminit __early_pfn_to_nid(unsigned long pfn)
62{ 62{
63 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; 63 int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
64 /*
65 * NOTE: The following SMP-unsafe globals are only used early in boot
66 * when the kernel is running single-threaded.
67 */
68 static int __meminitdata last_ssec, last_esec;
69 static int __meminitdata last_nid;
70
71 if (section >= last_ssec && section < last_esec)
72 return last_nid;
64 73
65 for (i = 0; i < num_node_memblks; i++) { 74 for (i = 0; i < num_node_memblks; i++) {
66 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; 75 ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
67 esec = (node_memblk[i].start_paddr + node_memblk[i].size + 76 esec = (node_memblk[i].start_paddr + node_memblk[i].size +
68 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; 77 ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
69 if (section >= ssec && section < esec) 78 if (section >= ssec && section < esec) {
79 last_ssec = ssec;
80 last_esec = esec;
81 last_nid = node_memblk[i].nid;
70 return node_memblk[i].nid; 82 return node_memblk[i].nid;
83 }
71 } 84 }
72 85
73 return -1; 86 return -1;
74} 87}
75 88
89void __cpuinit numa_clear_node(int cpu)
90{
91 unmap_cpu_from_node(cpu, NUMA_NO_NODE);
92}
93
76#ifdef CONFIG_MEMORY_HOTPLUG 94#ifdef CONFIG_MEMORY_HOTPLUG
77/* 95/*
78 * SRAT information is stored in node_memblk[], then we can use SRAT 96 * SRAT information is stored in node_memblk[], then we can use SRAT
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 60532ab27346..de1474ff0bc5 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/pci-acpi.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/ioport.h> 20#include <linux/ioport.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
@@ -458,6 +459,16 @@ void pcibios_fixup_bus(struct pci_bus *b)
458 platform_pci_fixup_bus(b); 459 platform_pci_fixup_bus(b);
459} 460}
460 461
462void pcibios_add_bus(struct pci_bus *bus)
463{
464 acpi_pci_add_bus(bus);
465}
466
467void pcibios_remove_bus(struct pci_bus *bus)
468{
469 acpi_pci_remove_bus(bus);
470}
471
461void pcibios_set_master (struct pci_dev *dev) 472void pcibios_set_master (struct pci_dev *dev)
462{ 473{
463 /* No special bus mastering setup handling */ 474 /* No special bus mastering setup handling */
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 14c1711238c0..e35f6485c1fd 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -490,11 +490,14 @@ static int __init tiocx_init(void)
490{ 490{
491 cnodeid_t cnodeid; 491 cnodeid_t cnodeid;
492 int found_tiocx_device = 0; 492 int found_tiocx_device = 0;
493 int err;
493 494
494 if (!ia64_platform_is("sn2")) 495 if (!ia64_platform_is("sn2"))
495 return 0; 496 return 0;
496 497
497 bus_register(&tiocx_bus_type); 498 err = bus_register(&tiocx_bus_type);
499 if (err)
500 return err;
498 501
499 for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) { 502 for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) {
500 nasid_t nasid; 503 nasid_t nasid;
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 92623818a1fe..bcd17b206571 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -10,7 +10,7 @@ config M32R
10 select ARCH_WANT_IPC_PARSE_VERSION 10 select ARCH_WANT_IPC_PARSE_VERSION
11 select HAVE_DEBUG_BUGVERBOSE 11 select HAVE_DEBUG_BUGVERBOSE
12 select HAVE_GENERIC_HARDIRQS 12 select HAVE_GENERIC_HARDIRQS
13 select HAVE_VIRT_TO_BUS 13 select VIRT_TO_BUS
14 select GENERIC_IRQ_PROBE 14 select GENERIC_IRQ_PROBE
15 select GENERIC_IRQ_SHOW 15 select GENERIC_IRQ_SHOW
16 select GENERIC_ATOMIC64 16 select GENERIC_ATOMIC64
diff --git a/arch/m32r/include/uapi/asm/stat.h b/arch/m32r/include/uapi/asm/stat.h
index da4518f82d6d..98470fe483b6 100644
--- a/arch/m32r/include/uapi/asm/stat.h
+++ b/arch/m32r/include/uapi/asm/stat.h
@@ -63,10 +63,10 @@ struct stat64 {
63 long long st_size; 63 long long st_size;
64 unsigned long st_blksize; 64 unsigned long st_blksize;
65 65
66#if defined(__BIG_ENDIAN) 66#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
67 unsigned long __pad4; /* future possible st_blocks high bits */ 67 unsigned long __pad4; /* future possible st_blocks high bits */
68 unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 68 unsigned long st_blocks; /* Number 512-byte blocks allocated. */
69#elif defined(__LITTLE_ENDIAN) 69#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
70 unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 70 unsigned long st_blocks; /* Number 512-byte blocks allocated. */
71 unsigned long __pad4; /* future possible st_blocks high bits */ 71 unsigned long __pad4; /* future possible st_blocks high bits */
72#else 72#else
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index bde899e155d3..e2d049018c3b 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -47,24 +47,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
47void (*pm_power_off)(void) = NULL; 47void (*pm_power_off)(void) = NULL;
48EXPORT_SYMBOL(pm_power_off); 48EXPORT_SYMBOL(pm_power_off);
49 49
50/*
51 * The idle thread. There's no useful work to be
52 * done, so just try to conserve power and have a
53 * low exit latency (ie sit in a loop waiting for
54 * somebody to say that they'd like to reschedule)
55 */
56void cpu_idle (void)
57{
58 /* endless idle loop with no priority at all */
59 while (1) {
60 rcu_idle_enter();
61 while (!need_resched())
62 cpu_relax();
63 rcu_idle_exit();
64 schedule_preempt_disabled();
65 }
66}
67
68void machine_restart(char *__unused) 50void machine_restart(char *__unused)
69{ 51{
70#if defined(CONFIG_PLAT_MAPPI3) 52#if defined(CONFIG_PLAT_MAPPI3)
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 13168a769f8f..0ac558adc605 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -432,7 +432,7 @@ int __init start_secondary(void *unused)
432 */ 432 */
433 local_flush_tlb_all(); 433 local_flush_tlb_all();
434 434
435 cpu_idle(); 435 cpu_startup_entry(CPUHP_ONLINE);
436 return 0; 436 return 0;
437} 437}
438 438
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 3bcb207e5b6d..9fe3467a5133 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -132,10 +132,8 @@ static void show_trace(struct task_struct *task, unsigned long *stack)
132 printk("Call Trace: "); 132 printk("Call Trace: ");
133 while (!kstack_end(stack)) { 133 while (!kstack_end(stack)) {
134 addr = *stack++; 134 addr = *stack++;
135 if (__kernel_text_address(addr)) { 135 if (__kernel_text_address(addr))
136 printk("[<%08lx>] ", addr); 136 printk("[<%08lx>] %pSR\n", addr, (void *)addr);
137 print_symbol("%s\n", addr);
138 }
139 } 137 }
140 printk("\n"); 138 printk("\n");
141} 139}
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 78b660e903da..ab4cbce91a9b 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -28,10 +28,7 @@
28#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
29#include <asm/setup.h> 29#include <asm/setup.h>
30#include <asm/tlb.h> 30#include <asm/tlb.h>
31 31#include <asm/sections.h>
32/* References to section boundaries */
33extern char _text, _etext, _edata;
34extern char __init_begin, __init_end;
35 32
36pgd_t swapper_pg_dir[1024]; 33pgd_t swapper_pg_dir[1024];
37 34
@@ -184,17 +181,7 @@ void __init mem_init(void)
184 *======================================================================*/ 181 *======================================================================*/
185void free_initmem(void) 182void free_initmem(void)
186{ 183{
187 unsigned long addr; 184 free_initmem_default(0);
188
189 addr = (unsigned long)(&__init_begin);
190 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
191 ClearPageReserved(virt_to_page(addr));
192 init_page_count(virt_to_page(addr));
193 free_page(addr);
194 totalram_pages++;
195 }
196 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", \
197 (int)(&__init_end - &__init_begin) >> 10);
198} 185}
199 186
200#ifdef CONFIG_BLK_DEV_INITRD 187#ifdef CONFIG_BLK_DEV_INITRD
@@ -204,13 +191,6 @@ void free_initmem(void)
204 *======================================================================*/ 191 *======================================================================*/
205void free_initrd_mem(unsigned long start, unsigned long end) 192void free_initrd_mem(unsigned long start, unsigned long end)
206{ 193{
207 unsigned long p; 194 free_reserved_area(start, end, 0, "initrd");
208 for (p = start; p < end; p += PAGE_SIZE) {
209 ClearPageReserved(virt_to_page(p));
210 init_page_count(virt_to_page(p));
211 free_page(p);
212 totalram_pages++;
213 }
214 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
215} 195}
216#endif 196#endif
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 0e708c78e01c..6de813370b8c 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -8,7 +8,7 @@ config M68K
8 select GENERIC_IRQ_SHOW 8 select GENERIC_IRQ_SHOW
9 select GENERIC_ATOMIC64 9 select GENERIC_ATOMIC64
10 select HAVE_UID16 10 select HAVE_UID16
11 select HAVE_VIRT_TO_BUS 11 select VIRT_TO_BUS
12 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS 12 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
13 select GENERIC_CPU_DEVICES 13 select GENERIC_CPU_DEVICES
14 select GENERIC_STRNCPY_FROM_USER if MMU 14 select GENERIC_STRNCPY_FROM_USER if MMU
diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
index 93ef0346b209..675b087198f6 100644
--- a/arch/m68k/Kconfig.bus
+++ b/arch/m68k/Kconfig.bus
@@ -45,6 +45,16 @@ config ISA
45 (MCA) or VESA. ISA is an older system, now being displaced by PCI; 45 (MCA) or VESA. ISA is an older system, now being displaced by PCI;
46 newer boards don't support it. If you have ISA, say Y, otherwise N. 46 newer boards don't support it. If you have ISA, say Y, otherwise N.
47 47
48config ATARI_ROM_ISA
49 bool "Atari ROM port ISA adapter support"
50 depends on ATARI
51 help
52 This option enables support for the ROM port ISA adapter used to
53 operate ISA cards on Atari. Only 8 bit cards are supported, and
54 no interrupt lines are connected.
55 The only driver currently using this adapter is the EtherNEC
56 driver for RTL8019AS based NE2000 compatible network cards.
57
48config GENERIC_ISA_DMA 58config GENERIC_ISA_DMA
49 def_bool ISA 59 def_bool ISA
50 60
diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
index 4bc945dfe467..d163991c5717 100644
--- a/arch/m68k/Kconfig.devices
+++ b/arch/m68k/Kconfig.devices
@@ -55,6 +55,30 @@ config NFETH
55 which will emulate a regular ethernet device while presenting an 55 which will emulate a regular ethernet device while presenting an
56 ethertap device to the host system. 56 ethertap device to the host system.
57 57
58config ATARI_ETHERNAT
59 bool "Atari EtherNAT Ethernet support"
60 depends on ATARI
61 ---help---
62 Say Y to include support for the EtherNAT network adapter for the
63 CT/60 extension port.
64
65 To compile the actual ethernet driver, choose Y or M for the SMC91X
66 option in the network device section; the module will be called smc91x.
67
68config ATARI_ETHERNEC
69 bool "Atari EtherNEC Ethernet support"
70 depends on ATARI_ROM_ISA
71 ---help---
72 Say Y to include support for the EtherNEC network adapter for the
73 ROM port. The driver works by polling instead of interrupts, so it
74 is quite slow.
75
76 This driver also suppports the ethernet part of the NetUSBee ROM
77 port combined Ethernet/USB adapter.
78
79 To compile the actual ethernet driver, choose Y or M in for the NE2000
80 option in the network device section; the module will be called ne.
81
58endmenu 82endmenu
59 83
60menu "Character devices" 84menu "Character devices"
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 7cdf6b010381..7240584d3439 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -310,7 +310,6 @@ config COBRA5282
310config SOM5282EM 310config SOM5282EM
311 bool "EMAC.Inc SOM5282EM board support" 311 bool "EMAC.Inc SOM5282EM board support"
312 depends on M528x 312 depends on M528x
313 select EMAC_INC
314 help 313 help
315 Support for the EMAC.Inc SOM5282EM module. 314 Support for the EMAC.Inc SOM5282EM module.
316 315
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 3f41092d1b70..20cde4e9fc77 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -49,6 +49,7 @@
49#include <asm/atari_stdma.h> 49#include <asm/atari_stdma.h>
50#include <asm/irq.h> 50#include <asm/irq.h>
51#include <asm/entry.h> 51#include <asm/entry.h>
52#include <asm/io.h>
52 53
53 54
54/* 55/*
@@ -122,6 +123,136 @@ static struct irq_chip atari_irq_chip = {
122}; 123};
123 124
124/* 125/*
126 * ST-MFP timer D chained interrupts - each driver gets its own timer
127 * interrupt instance.
128 */
129
130struct mfptimerbase {
131 volatile struct MFP *mfp;
132 unsigned char mfp_mask, mfp_data;
133 unsigned short int_mask;
134 int handler_irq, mfptimer_irq, server_irq;
135 char *name;
136} stmfp_base = {
137 .mfp = &st_mfp,
138 .int_mask = 0x0,
139 .handler_irq = IRQ_MFP_TIMD,
140 .mfptimer_irq = IRQ_MFP_TIMER1,
141 .name = "MFP Timer D"
142};
143
144static irqreturn_t mfptimer_handler(int irq, void *dev_id)
145{
146 struct mfptimerbase *base = dev_id;
147 int mach_irq;
148 unsigned char ints;
149
150 mach_irq = base->mfptimer_irq;
151 ints = base->int_mask;
152 for (; ints; mach_irq++, ints >>= 1) {
153 if (ints & 1)
154 generic_handle_irq(mach_irq);
155 }
156 return IRQ_HANDLED;
157}
158
159
160static void atari_mfptimer_enable(struct irq_data *data)
161{
162 int mfp_num = data->irq - IRQ_MFP_TIMER1;
163 stmfp_base.int_mask |= 1 << mfp_num;
164 atari_enable_irq(IRQ_MFP_TIMD);
165}
166
167static void atari_mfptimer_disable(struct irq_data *data)
168{
169 int mfp_num = data->irq - IRQ_MFP_TIMER1;
170 stmfp_base.int_mask &= ~(1 << mfp_num);
171 if (!stmfp_base.int_mask)
172 atari_disable_irq(IRQ_MFP_TIMD);
173}
174
175static struct irq_chip atari_mfptimer_chip = {
176 .name = "timer_d",
177 .irq_enable = atari_mfptimer_enable,
178 .irq_disable = atari_mfptimer_disable,
179};
180
181
182/*
183 * EtherNAT CPLD interrupt handling
184 * CPLD interrupt register is at phys. 0x80000023
185 * Need this mapped in at interrupt startup time
186 * Possibly need this mapped on demand anyway -
187 * EtherNAT USB driver needs to disable IRQ before
188 * startup!
189 */
190
191static unsigned char *enat_cpld;
192
193static unsigned int atari_ethernat_startup(struct irq_data *data)
194{
195 int enat_num = 140 - data->irq + 1;
196
197 m68k_irq_startup(data);
198 /*
199 * map CPLD interrupt register
200 */
201 if (!enat_cpld)
202 enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2);
203 /*
204 * do _not_ enable the USB chip interrupt here - causes interrupt storm
205 * and triggers dead interrupt watchdog
206 * Need to reset the USB chip to a sane state in early startup before
207 * removing this hack
208 */
209 if (enat_num == 1)
210 *enat_cpld |= 1 << enat_num;
211
212 return 0;
213}
214
215static void atari_ethernat_enable(struct irq_data *data)
216{
217 int enat_num = 140 - data->irq + 1;
218 /*
219 * map CPLD interrupt register
220 */
221 if (!enat_cpld)
222 enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2);
223 *enat_cpld |= 1 << enat_num;
224}
225
226static void atari_ethernat_disable(struct irq_data *data)
227{
228 int enat_num = 140 - data->irq + 1;
229 /*
230 * map CPLD interrupt register
231 */
232 if (!enat_cpld)
233 enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2);
234 *enat_cpld &= ~(1 << enat_num);
235}
236
237static void atari_ethernat_shutdown(struct irq_data *data)
238{
239 int enat_num = 140 - data->irq + 1;
240 if (enat_cpld) {
241 *enat_cpld &= ~(1 << enat_num);
242 iounmap(enat_cpld);
243 enat_cpld = NULL;
244 }
245}
246
247static struct irq_chip atari_ethernat_chip = {
248 .name = "ethernat",
249 .irq_startup = atari_ethernat_startup,
250 .irq_shutdown = atari_ethernat_shutdown,
251 .irq_enable = atari_ethernat_enable,
252 .irq_disable = atari_ethernat_disable,
253};
254
255/*
125 * void atari_init_IRQ (void) 256 * void atari_init_IRQ (void)
126 * 257 *
127 * Parameters: None 258 * Parameters: None
@@ -198,6 +329,27 @@ void __init atari_init_IRQ(void)
198 /* Initialize the PSG: all sounds off, both ports output */ 329 /* Initialize the PSG: all sounds off, both ports output */
199 sound_ym.rd_data_reg_sel = 7; 330 sound_ym.rd_data_reg_sel = 7;
200 sound_ym.wd_data = 0xff; 331 sound_ym.wd_data = 0xff;
332
333 m68k_setup_irq_controller(&atari_mfptimer_chip, handle_simple_irq,
334 IRQ_MFP_TIMER1, 8);
335
336 /* prepare timer D data for use as poll interrupt */
337 /* set Timer D data Register - needs to be > 0 */
338 st_mfp.tim_dt_d = 254; /* < 100 Hz */
339 /* start timer D, div = 1:100 */
340 st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 0xf0) | 0x6;
341
342 /* request timer D dispatch handler */
343 if (request_irq(IRQ_MFP_TIMD, mfptimer_handler, IRQF_SHARED,
344 stmfp_base.name, &stmfp_base))
345 pr_err("Couldn't register %s interrupt\n", stmfp_base.name);
346
347 /*
348 * EtherNAT ethernet / USB interrupt handlers
349 */
350
351 m68k_setup_irq_controller(&atari_ethernat_chip, handle_simple_irq,
352 139, 2);
201} 353}
202 354
203 355
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 037c11c99331..fb2d0bd9b3ad 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -31,6 +31,8 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/ioport.h> 33#include <linux/ioport.h>
34#include <linux/platform_device.h>
35#include <linux/usb/isp116x.h>
34#include <linux/vt_kern.h> 36#include <linux/vt_kern.h>
35#include <linux/module.h> 37#include <linux/module.h>
36 38
@@ -655,3 +657,240 @@ static void atari_get_hardware_list(struct seq_file *m)
655 ATARIHW_ANNOUNCE(VME, "VME Bus"); 657 ATARIHW_ANNOUNCE(VME, "VME Bus");
656 ATARIHW_ANNOUNCE(DSP56K, "DSP56001 processor"); 658 ATARIHW_ANNOUNCE(DSP56K, "DSP56001 processor");
657} 659}
660
661/*
662 * MSch: initial platform device support for Atari,
663 * required for EtherNAT/EtherNEC/NetUSBee drivers
664 */
665
666#if defined(CONFIG_ATARI_ETHERNAT) || defined(CONFIG_ATARI_ETHERNEC)
667static void isp1160_delay(struct device *dev, int delay)
668{
669 ndelay(delay);
670}
671#endif
672
673#ifdef CONFIG_ATARI_ETHERNAT
674/*
675 * EtherNAT: SMC91C111 Ethernet chipset, handled by smc91x driver
676 */
677
678#define ATARI_ETHERNAT_IRQ 140
679
680static struct resource smc91x_resources[] = {
681 [0] = {
682 .name = "smc91x-regs",
683 .start = ATARI_ETHERNAT_PHYS_ADDR,
684 .end = ATARI_ETHERNAT_PHYS_ADDR + 0xfffff,
685 .flags = IORESOURCE_MEM,
686 },
687 [1] = {
688 .name = "smc91x-irq",
689 .start = ATARI_ETHERNAT_IRQ,
690 .end = ATARI_ETHERNAT_IRQ,
691 .flags = IORESOURCE_IRQ,
692 },
693};
694
695static struct platform_device smc91x_device = {
696 .name = "smc91x",
697 .id = -1,
698 .num_resources = ARRAY_SIZE(smc91x_resources),
699 .resource = smc91x_resources,
700};
701
702/*
703 * ISP 1160 - using the isp116x-hcd module
704 */
705
706#define ATARI_USB_PHYS_ADDR 0x80000012
707#define ATARI_USB_IRQ 139
708
709static struct resource isp1160_resources[] = {
710 [0] = {
711 .name = "isp1160-data",
712 .start = ATARI_USB_PHYS_ADDR,
713 .end = ATARI_USB_PHYS_ADDR + 0x1,
714 .flags = IORESOURCE_MEM,
715 },
716 [1] = {
717 .name = "isp1160-regs",
718 .start = ATARI_USB_PHYS_ADDR + 0x4,
719 .end = ATARI_USB_PHYS_ADDR + 0x5,
720 .flags = IORESOURCE_MEM,
721 },
722 [2] = {
723 .name = "isp1160-irq",
724 .start = ATARI_USB_IRQ,
725 .end = ATARI_USB_IRQ,
726 .flags = IORESOURCE_IRQ,
727 },
728};
729
730/* (DataBusWidth16|AnalogOCEnable|DREQOutputPolarity|DownstreamPort15KRSel ) */
731static struct isp116x_platform_data isp1160_platform_data = {
732 /* Enable internal resistors on downstream ports */
733 .sel15Kres = 1,
734 /* On-chip overcurrent protection */
735 .oc_enable = 1,
736 /* INT output polarity */
737 .int_act_high = 1,
738 /* INT edge or level triggered */
739 .int_edge_triggered = 0,
740
741 /* WAKEUP pin connected - NOT SUPPORTED */
742 /* .remote_wakeup_connected = 0, */
743 /* Wakeup by devices on usb bus enabled */
744 .remote_wakeup_enable = 0,
745 .delay = isp1160_delay,
746};
747
748static struct platform_device isp1160_device = {
749 .name = "isp116x-hcd",
750 .id = 0,
751 .num_resources = ARRAY_SIZE(isp1160_resources),
752 .resource = isp1160_resources,
753 .dev = {
754 .platform_data = &isp1160_platform_data,
755 },
756};
757
758static struct platform_device *atari_ethernat_devices[] __initdata = {
759 &smc91x_device,
760 &isp1160_device
761};
762#endif /* CONFIG_ATARI_ETHERNAT */
763
764#ifdef CONFIG_ATARI_ETHERNEC
765/*
766 * EtherNEC: RTL8019 (NE2000 compatible) Ethernet chipset,
767 * handled by ne.c driver
768 */
769
770#define ATARI_ETHERNEC_PHYS_ADDR 0xfffa0000
771#define ATARI_ETHERNEC_BASE 0x300
772#define ATARI_ETHERNEC_IRQ IRQ_MFP_TIMER1
773
774static struct resource rtl8019_resources[] = {
775 [0] = {
776 .name = "rtl8019-regs",
777 .start = ATARI_ETHERNEC_BASE,
778 .end = ATARI_ETHERNEC_BASE + 0x20 - 1,
779 .flags = IORESOURCE_IO,
780 },
781 [1] = {
782 .name = "rtl8019-irq",
783 .start = ATARI_ETHERNEC_IRQ,
784 .end = ATARI_ETHERNEC_IRQ,
785 .flags = IORESOURCE_IRQ,
786 },
787};
788
789static struct platform_device rtl8019_device = {
790 .name = "ne",
791 .id = -1,
792 .num_resources = ARRAY_SIZE(rtl8019_resources),
793 .resource = rtl8019_resources,
794};
795
796/*
797 * NetUSBee: ISP1160 USB host adapter via ROM-port adapter
798 */
799
800#define ATARI_NETUSBEE_PHYS_ADDR 0xfffa8000
801#define ATARI_NETUSBEE_BASE 0x340
802#define ATARI_NETUSBEE_IRQ IRQ_MFP_TIMER2
803
804static struct resource netusbee_resources[] = {
805 [0] = {
806 .name = "isp1160-data",
807 .start = ATARI_NETUSBEE_BASE,
808 .end = ATARI_NETUSBEE_BASE + 0x1,
809 .flags = IORESOURCE_MEM,
810 },
811 [1] = {
812 .name = "isp1160-regs",
813 .start = ATARI_NETUSBEE_BASE + 0x20,
814 .end = ATARI_NETUSBEE_BASE + 0x21,
815 .flags = IORESOURCE_MEM,
816 },
817 [2] = {
818 .name = "isp1160-irq",
819 .start = ATARI_NETUSBEE_IRQ,
820 .end = ATARI_NETUSBEE_IRQ,
821 .flags = IORESOURCE_IRQ,
822 },
823};
824
825/* (DataBusWidth16|AnalogOCEnable|DREQOutputPolarity|DownstreamPort15KRSel ) */
826static struct isp116x_platform_data netusbee_platform_data = {
827 /* Enable internal resistors on downstream ports */
828 .sel15Kres = 1,
829 /* On-chip overcurrent protection */
830 .oc_enable = 1,
831 /* INT output polarity */
832 .int_act_high = 1,
833 /* INT edge or level triggered */
834 .int_edge_triggered = 0,
835
836 /* WAKEUP pin connected - NOT SUPPORTED */
837 /* .remote_wakeup_connected = 0, */
838 /* Wakeup by devices on usb bus enabled */
839 .remote_wakeup_enable = 0,
840 .delay = isp1160_delay,
841};
842
843static struct platform_device netusbee_device = {
844 .name = "isp116x-hcd",
845 .id = 1,
846 .num_resources = ARRAY_SIZE(netusbee_resources),
847 .resource = netusbee_resources,
848 .dev = {
849 .platform_data = &netusbee_platform_data,
850 },
851};
852
853static struct platform_device *atari_netusbee_devices[] __initdata = {
854 &rtl8019_device,
855 &netusbee_device
856};
857#endif /* CONFIG_ATARI_ETHERNEC */
858
859int __init atari_platform_init(void)
860{
861 int rv = 0;
862
863 if (!MACH_IS_ATARI)
864 return -ENODEV;
865
866#ifdef CONFIG_ATARI_ETHERNAT
867 {
868 unsigned char *enatc_virt;
869 enatc_virt = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0xf);
870 if (hwreg_present(enatc_virt)) {
871 rv = platform_add_devices(atari_ethernat_devices,
872 ARRAY_SIZE(atari_ethernat_devices));
873 }
874 iounmap(enatc_virt);
875 }
876#endif
877
878#ifdef CONFIG_ATARI_ETHERNEC
879 {
880 int error;
881 unsigned char *enec_virt;
882 enec_virt = (unsigned char *)ioremap((ATARI_ETHERNEC_PHYS_ADDR), 0xf);
883 if (hwreg_present(enec_virt)) {
884 error = platform_add_devices(atari_netusbee_devices,
885 ARRAY_SIZE(atari_netusbee_devices));
886 if (error && !rv)
887 rv = error;
888 }
889 iounmap(enec_virt);
890 }
891#endif
892
893 return rv;
894}
895
896arch_initcall(atari_platform_init);
diff --git a/arch/m68k/include/asm/MC68328.h b/arch/m68k/include/asm/MC68328.h
index a337e56d09bf..4ebf098b8a1f 100644
--- a/arch/m68k/include/asm/MC68328.h
+++ b/arch/m68k/include/asm/MC68328.h
@@ -293,7 +293,7 @@
293/* 293/*
294 * Here go the bitmasks themselves 294 * Here go the bitmasks themselves
295 */ 295 */
296#define IMR_MSPIM (1 << SPIM _IRQ_NUM) /* Mask SPI Master interrupt */ 296#define IMR_MSPIM (1 << SPIM_IRQ_NUM) /* Mask SPI Master interrupt */
297#define IMR_MTMR2 (1 << TMR2_IRQ_NUM) /* Mask Timer 2 interrupt */ 297#define IMR_MTMR2 (1 << TMR2_IRQ_NUM) /* Mask Timer 2 interrupt */
298#define IMR_MUART (1 << UART_IRQ_NUM) /* Mask UART interrupt */ 298#define IMR_MUART (1 << UART_IRQ_NUM) /* Mask UART interrupt */
299#define IMR_MWDT (1 << WDT_IRQ_NUM) /* Mask Watchdog Timer interrupt */ 299#define IMR_MWDT (1 << WDT_IRQ_NUM) /* Mask Watchdog Timer interrupt */
@@ -327,7 +327,7 @@
327#define IWR_ADDR 0xfffff308 327#define IWR_ADDR 0xfffff308
328#define IWR LONG_REF(IWR_ADDR) 328#define IWR LONG_REF(IWR_ADDR)
329 329
330#define IWR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ 330#define IWR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */
331#define IWR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ 331#define IWR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */
332#define IWR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ 332#define IWR_UART (1 << UART_IRQ_NUM) /* UART interrupt */
333#define IWR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ 333#define IWR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */
@@ -357,7 +357,7 @@
357#define ISR_ADDR 0xfffff30c 357#define ISR_ADDR 0xfffff30c
358#define ISR LONG_REF(ISR_ADDR) 358#define ISR LONG_REF(ISR_ADDR)
359 359
360#define ISR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ 360#define ISR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */
361#define ISR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ 361#define ISR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */
362#define ISR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ 362#define ISR_UART (1 << UART_IRQ_NUM) /* UART interrupt */
363#define ISR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ 363#define ISR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */
@@ -391,7 +391,7 @@
391#define IPR_ADDR 0xfffff310 391#define IPR_ADDR 0xfffff310
392#define IPR LONG_REF(IPR_ADDR) 392#define IPR LONG_REF(IPR_ADDR)
393 393
394#define IPR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ 394#define IPR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */
395#define IPR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ 395#define IPR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */
396#define IPR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ 396#define IPR_UART (1 << UART_IRQ_NUM) /* UART interrupt */
397#define IPR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ 397#define IPR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */
@@ -757,7 +757,7 @@
757 757
758/* 'EZ328-compatible definitions */ 758/* 'EZ328-compatible definitions */
759#define TCN_ADDR TCN1_ADDR 759#define TCN_ADDR TCN1_ADDR
760#define TCN TCN 760#define TCN TCN1
761 761
762/* 762/*
763 * Timer Unit 1 and 2 Status Registers 763 * Timer Unit 1 and 2 Status Registers
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index c0cb36350775..d887050e6da6 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -805,5 +805,11 @@ struct MSTE_RTC {
805 805
806#define mste_rtc ((*(volatile struct MSTE_RTC *)MSTE_RTC_BAS)) 806#define mste_rtc ((*(volatile struct MSTE_RTC *)MSTE_RTC_BAS))
807 807
808/*
809** EtherNAT add-on card for Falcon - combined ethernet and USB adapter
810*/
811
812#define ATARI_ETHERNAT_PHYS_ADDR 0x80000000
813
808#endif /* linux/atarihw.h */ 814#endif /* linux/atarihw.h */
809 815
diff --git a/arch/m68k/include/asm/atariints.h b/arch/m68k/include/asm/atariints.h
index 5fc13bdf9044..953e0ac6855e 100644
--- a/arch/m68k/include/asm/atariints.h
+++ b/arch/m68k/include/asm/atariints.h
@@ -32,7 +32,7 @@
32#define VME_SOURCE_BASE 56 32#define VME_SOURCE_BASE 56
33#define VME_MAX_SOURCES 16 33#define VME_MAX_SOURCES 16
34 34
35#define NUM_ATARI_SOURCES (VME_SOURCE_BASE+VME_MAX_SOURCES-STMFP_SOURCE_BASE) 35#define NUM_ATARI_SOURCES 141
36 36
37/* convert vector number to int source number */ 37/* convert vector number to int source number */
38#define IRQ_VECTOR_TO_SOURCE(v) ((v) - ((v) < 0x20 ? 0x18 : (0x40-8))) 38#define IRQ_VECTOR_TO_SOURCE(v) ((v) - ((v) < 0x20 ? 0x18 : (0x40-8)))
@@ -94,6 +94,15 @@
94#define IRQ_SCCA_RX (52) 94#define IRQ_SCCA_RX (52)
95#define IRQ_SCCA_SPCOND (54) 95#define IRQ_SCCA_SPCOND (54)
96 96
97/* shared MFP timer D interrupts - hires timer for EtherNEC et al. */
98#define IRQ_MFP_TIMER1 (64)
99#define IRQ_MFP_TIMER2 (65)
100#define IRQ_MFP_TIMER3 (66)
101#define IRQ_MFP_TIMER4 (67)
102#define IRQ_MFP_TIMER5 (68)
103#define IRQ_MFP_TIMER6 (69)
104#define IRQ_MFP_TIMER7 (70)
105#define IRQ_MFP_TIMER8 (71)
97 106
98#define INT_CLK 24576 /* CLK while int_clk =2.456MHz and divide = 100 */ 107#define INT_CLK 24576 /* CLK while int_clk =2.456MHz and divide = 100 */
99#define INT_TICKS 246 /* to make sched_time = 99.902... HZ */ 108#define INT_TICKS 246 /* to make sched_time = 99.902... HZ */
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index 5c81d0eae5cf..bc755bc620ad 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -124,6 +124,9 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
124#define cmpxchg_local(ptr, o, n) \ 124#define cmpxchg_local(ptr, o, n) \
125 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ 125 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
126 (unsigned long)(n), sizeof(*(ptr)))) 126 (unsigned long)(n), sizeof(*(ptr))))
127
128#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
129
127#else 130#else
128 131
129/* 132/*
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index 12d8fe4f1d30..d28fa8fe26fe 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -92,5 +92,28 @@ static inline void __udelay(unsigned long usecs)
92#define udelay(n) (__builtin_constant_p(n) ? \ 92#define udelay(n) (__builtin_constant_p(n) ? \
93 ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n)) 93 ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
94 94
95/*
96 * nanosecond delay:
97 *
98 * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of loops
99 * per microsecond
100 *
101 * 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of
102 * nanoseconds per loop
103 *
104 * So n / ( 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) ) would
105 * be the number of loops for n nanoseconds
106 */
107
108/*
109 * The simpler m68k and ColdFire processors do not have a 32*32->64
110 * multiply instruction. So we need to handle them a little differently.
111 * We use a bit of shifting and a single 32*32->32 multiply to get close.
112 * This is a macro so that the const version can factor out the first
113 * multiply and shift.
114 */
115#define HZSCALE (268435456 / (1000000 / HZ))
116
117#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000));
95 118
96#endif /* defined(_M68K_DELAY_H) */ 119#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index 4395ffc51fdb..8cc83431805b 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio)
86 return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); 86 return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
87} 87}
88 88
89static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
90{
91 int err;
92
93 err = gpio_request(gpio, label);
94 if (err)
95 return err;
96
97 if (flags & GPIOF_DIR_IN)
98 err = gpio_direction_input(gpio);
99 else
100 err = gpio_direction_output(gpio,
101 (flags & GPIOF_INIT_HIGH) ? 1 : 0);
102
103 if (err)
104 gpio_free(gpio);
105
106 return err;
107}
108
89#endif 109#endif
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index a6686d26fe17..ffdf54f44bc6 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -63,6 +63,23 @@
63#endif 63#endif
64#endif /* AMIGA_PCMCIA */ 64#endif /* AMIGA_PCMCIA */
65 65
66#ifdef CONFIG_ATARI_ROM_ISA
67
68#define enec_isa_read_base 0xfffa0000
69#define enec_isa_write_base 0xfffb0000
70
71#define ENEC_ISA_IO_B(ioaddr) (enec_isa_read_base+((((unsigned long)(ioaddr))&0x7F)<<9))
72#define ENEC_ISA_IO_W(ioaddr) (enec_isa_read_base+((((unsigned long)(ioaddr))&0x7F)<<9))
73#define ENEC_ISA_MEM_B(madr) (enec_isa_read_base+((((unsigned long)(madr))&0x7F)<<9))
74#define ENEC_ISA_MEM_W(madr) (enec_isa_read_base+((((unsigned long)(madr))&0x7F)<<9))
75
76#ifndef MULTI_ISA
77#define MULTI_ISA 0
78#else
79#undef MULTI_ISA
80#define MULTI_ISA 1
81#endif
82#endif /* ATARI_ROM_ISA */
66 83
67 84
68#if defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE) 85#if defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE)
@@ -111,14 +128,15 @@ void mcf_pci_outsl(u32 addr, const u32 *buf, u32 len);
111#define readw(addr) in_le16(addr) 128#define readw(addr) in_le16(addr)
112#define writew(v, addr) out_le16((addr), (v)) 129#define writew(v, addr) out_le16((addr), (v))
113 130
114#elif defined(CONFIG_ISA) 131#elif defined(CONFIG_ISA) || defined(CONFIG_ATARI_ROM_ISA)
115 132
116#if MULTI_ISA == 0 133#if MULTI_ISA == 0
117#undef MULTI_ISA 134#undef MULTI_ISA
118#endif 135#endif
119 136
120#define ISA_TYPE_Q40 (1) 137#define ISA_TYPE_Q40 (1)
121#define ISA_TYPE_AG (2) 138#define ISA_TYPE_AG (2)
139#define ISA_TYPE_ENEC (3)
122 140
123#if defined(CONFIG_Q40) && !defined(MULTI_ISA) 141#if defined(CONFIG_Q40) && !defined(MULTI_ISA)
124#define ISA_TYPE ISA_TYPE_Q40 142#define ISA_TYPE ISA_TYPE_Q40
@@ -128,6 +146,10 @@ void mcf_pci_outsl(u32 addr, const u32 *buf, u32 len);
128#define ISA_TYPE ISA_TYPE_AG 146#define ISA_TYPE ISA_TYPE_AG
129#define ISA_SEX 1 147#define ISA_SEX 1
130#endif 148#endif
149#if defined(CONFIG_ATARI_ROM_ISA) && !defined(MULTI_ISA)
150#define ISA_TYPE ISA_TYPE_ENEC
151#define ISA_SEX 0
152#endif
131 153
132#ifdef MULTI_ISA 154#ifdef MULTI_ISA
133extern int isa_type; 155extern int isa_type;
@@ -152,6 +174,9 @@ static inline u8 __iomem *isa_itb(unsigned long addr)
152#ifdef CONFIG_AMIGA_PCMCIA 174#ifdef CONFIG_AMIGA_PCMCIA
153 case ISA_TYPE_AG: return (u8 __iomem *)AG_ISA_IO_B(addr); 175 case ISA_TYPE_AG: return (u8 __iomem *)AG_ISA_IO_B(addr);
154#endif 176#endif
177#ifdef CONFIG_ATARI_ROM_ISA
178 case ISA_TYPE_ENEC: return (u8 __iomem *)ENEC_ISA_IO_B(addr);
179#endif
155 default: return NULL; /* avoid warnings, just in case */ 180 default: return NULL; /* avoid warnings, just in case */
156 } 181 }
157} 182}
@@ -165,6 +190,9 @@ static inline u16 __iomem *isa_itw(unsigned long addr)
165#ifdef CONFIG_AMIGA_PCMCIA 190#ifdef CONFIG_AMIGA_PCMCIA
166 case ISA_TYPE_AG: return (u16 __iomem *)AG_ISA_IO_W(addr); 191 case ISA_TYPE_AG: return (u16 __iomem *)AG_ISA_IO_W(addr);
167#endif 192#endif
193#ifdef CONFIG_ATARI_ROM_ISA
194 case ISA_TYPE_ENEC: return (u16 __iomem *)ENEC_ISA_IO_W(addr);
195#endif
168 default: return NULL; /* avoid warnings, just in case */ 196 default: return NULL; /* avoid warnings, just in case */
169 } 197 }
170} 198}
@@ -188,6 +216,9 @@ static inline u8 __iomem *isa_mtb(unsigned long addr)
188#ifdef CONFIG_AMIGA_PCMCIA 216#ifdef CONFIG_AMIGA_PCMCIA
189 case ISA_TYPE_AG: return (u8 __iomem *)addr; 217 case ISA_TYPE_AG: return (u8 __iomem *)addr;
190#endif 218#endif
219#ifdef CONFIG_ATARI_ROM_ISA
220 case ISA_TYPE_ENEC: return (u8 __iomem *)ENEC_ISA_MEM_B(addr);
221#endif
191 default: return NULL; /* avoid warnings, just in case */ 222 default: return NULL; /* avoid warnings, just in case */
192 } 223 }
193} 224}
@@ -201,6 +232,9 @@ static inline u16 __iomem *isa_mtw(unsigned long addr)
201#ifdef CONFIG_AMIGA_PCMCIA 232#ifdef CONFIG_AMIGA_PCMCIA
202 case ISA_TYPE_AG: return (u16 __iomem *)addr; 233 case ISA_TYPE_AG: return (u16 __iomem *)addr;
203#endif 234#endif
235#ifdef CONFIG_ATARI_ROM_ISA
236 case ISA_TYPE_ENEC: return (u16 __iomem *)ENEC_ISA_MEM_W(addr);
237#endif
204 default: return NULL; /* avoid warnings, just in case */ 238 default: return NULL; /* avoid warnings, just in case */
205 } 239 }
206} 240}
@@ -222,6 +256,36 @@ static inline u16 __iomem *isa_mtw(unsigned long addr)
222 (ISA_SEX ? out_be16(isa_mtw((unsigned long)(p)),(val)) \ 256 (ISA_SEX ? out_be16(isa_mtw((unsigned long)(p)),(val)) \
223 : out_le16(isa_mtw((unsigned long)(p)),(val))) 257 : out_le16(isa_mtw((unsigned long)(p)),(val)))
224 258
259#ifdef CONFIG_ATARI_ROM_ISA
260#define isa_rom_inb(port) rom_in_8(isa_itb(port))
261#define isa_rom_inw(port) \
262 (ISA_SEX ? rom_in_be16(isa_itw(port)) \
263 : rom_in_le16(isa_itw(port)))
264
265#define isa_rom_outb(val, port) rom_out_8(isa_itb(port), (val))
266#define isa_rom_outw(val, port) \
267 (ISA_SEX ? rom_out_be16(isa_itw(port), (val)) \
268 : rom_out_le16(isa_itw(port), (val)))
269
270#define isa_rom_readb(p) rom_in_8(isa_mtb((unsigned long)(p)))
271#define isa_rom_readw(p) \
272 (ISA_SEX ? rom_in_be16(isa_mtw((unsigned long)(p))) \
273 : rom_in_le16(isa_mtw((unsigned long)(p))))
274#define isa_rom_readw_swap(p) \
275 (ISA_SEX ? rom_in_le16(isa_mtw((unsigned long)(p))) \
276 : rom_in_be16(isa_mtw((unsigned long)(p))))
277#define isa_rom_readw_raw(p) rom_in_be16(isa_mtw((unsigned long)(p)))
278
279#define isa_rom_writeb(val, p) rom_out_8(isa_mtb((unsigned long)(p)), (val))
280#define isa_rom_writew(val, p) \
281 (ISA_SEX ? rom_out_be16(isa_mtw((unsigned long)(p)), (val)) \
282 : rom_out_le16(isa_mtw((unsigned long)(p)), (val)))
283#define isa_rom_writew_swap(val, p) \
284 (ISA_SEX ? rom_out_le16(isa_mtw((unsigned long)(p)), (val)) \
285 : rom_out_be16(isa_mtw((unsigned long)(p)), (val)))
286#define isa_rom_writew_raw(val, p) rom_out_be16(isa_mtw((unsigned long)(p)), (val))
287#endif /* CONFIG_ATARI_ROM_ISA */
288
225static inline void isa_delay(void) 289static inline void isa_delay(void)
226{ 290{
227 switch(ISA_TYPE) 291 switch(ISA_TYPE)
@@ -232,6 +296,9 @@ static inline void isa_delay(void)
232#ifdef CONFIG_AMIGA_PCMCIA 296#ifdef CONFIG_AMIGA_PCMCIA
233 case ISA_TYPE_AG: break; 297 case ISA_TYPE_AG: break;
234#endif 298#endif
299#ifdef CONFIG_ATARI_ROM_ISA
300 case ISA_TYPE_ENEC: break;
301#endif
235 default: break; /* avoid warnings */ 302 default: break; /* avoid warnings */
236 } 303 }
237} 304}
@@ -263,6 +330,29 @@ static inline void isa_delay(void)
263 raw_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr)<<1)) 330 raw_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr)<<1))
264 331
265 332
333#ifdef CONFIG_ATARI_ROM_ISA
334#define isa_rom_inb_p(p) ({ u8 _v = isa_rom_inb(p); isa_delay(); _v; })
335#define isa_rom_inw_p(p) ({ u16 _v = isa_rom_inw(p); isa_delay(); _v; })
336#define isa_rom_outb_p(v, p) ({ isa_rom_outb((v), (p)); isa_delay(); })
337#define isa_rom_outw_p(v, p) ({ isa_rom_outw((v), (p)); isa_delay(); })
338
339#define isa_rom_insb(port, buf, nr) raw_rom_insb(isa_itb(port), (u8 *)(buf), (nr))
340
341#define isa_rom_insw(port, buf, nr) \
342 (ISA_SEX ? raw_rom_insw(isa_itw(port), (u16 *)(buf), (nr)) : \
343 raw_rom_insw_swapw(isa_itw(port), (u16 *)(buf), (nr)))
344
345#define isa_rom_outsb(port, buf, nr) raw_rom_outsb(isa_itb(port), (u8 *)(buf), (nr))
346
347#define isa_rom_outsw(port, buf, nr) \
348 (ISA_SEX ? raw_rom_outsw(isa_itw(port), (u16 *)(buf), (nr)) : \
349 raw_rom_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr)))
350#endif /* CONFIG_ATARI_ROM_ISA */
351
352#endif /* CONFIG_ISA || CONFIG_ATARI_ROM_ISA */
353
354
355#if defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
266#define inb isa_inb 356#define inb isa_inb
267#define inb_p isa_inb_p 357#define inb_p isa_inb_p
268#define outb isa_outb 358#define outb isa_outb
@@ -285,9 +375,43 @@ static inline void isa_delay(void)
285#define readw isa_readw 375#define readw isa_readw
286#define writeb isa_writeb 376#define writeb isa_writeb
287#define writew isa_writew 377#define writew isa_writew
378#endif /* CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
288 379
289#else /* CONFIG_ISA */ 380#ifdef CONFIG_ATARI_ROM_ISA
290 381/*
382 * kernel with both ROM port ISA and IDE compiled in, those have
383 * conflicting defs for in/out. Simply consider port < 1024
384 * ROM port ISA and everything else regular ISA for IDE. read,write defined
385 * below.
386 */
387#define inb(port) ((port) < 1024 ? isa_rom_inb(port) : in_8(port))
388#define inb_p(port) ((port) < 1024 ? isa_rom_inb_p(port) : in_8(port))
389#define inw(port) ((port) < 1024 ? isa_rom_inw(port) : in_le16(port))
390#define inw_p(port) ((port) < 1024 ? isa_rom_inw_p(port) : in_le16(port))
391#define inl isa_inl
392#define inl_p isa_inl_p
393
394#define outb(val, port) ((port) < 1024 ? isa_rom_outb((val), (port)) : out_8((port), (val)))
395#define outb_p(val, port) ((port) < 1024 ? isa_rom_outb_p((val), (port)) : out_8((port), (val)))
396#define outw(val, port) ((port) < 1024 ? isa_rom_outw((val), (port)) : out_le16((port), (val)))
397#define outw_p(val, port) ((port) < 1024 ? isa_rom_outw_p((val), (port)) : out_le16((port), (val)))
398#define outl isa_outl
399#define outl_p isa_outl_p
400
401#define insb(port, buf, nr) ((port) < 1024 ? isa_rom_insb((port), (buf), (nr)) : isa_insb((port), (buf), (nr)))
402#define insw(port, buf, nr) ((port) < 1024 ? isa_rom_insw((port), (buf), (nr)) : isa_insw((port), (buf), (nr)))
403#define insl isa_insl
404#define outsb(port, buf, nr) ((port) < 1024 ? isa_rom_outsb((port), (buf), (nr)) : isa_outsb((port), (buf), (nr)))
405#define outsw(port, buf, nr) ((port) < 1024 ? isa_rom_outsw((port), (buf), (nr)) : isa_outsw((port), (buf), (nr)))
406#define outsl isa_outsl
407
408#define readb(addr) in_8(addr)
409#define writeb(val, addr) out_8((addr), (val))
410#define readw(addr) in_le16(addr)
411#define writew(val, addr) out_le16((addr), (val))
412#endif /* CONFIG_ATARI_ROM_ISA */
413
414#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
291/* 415/*
292 * We need to define dummy functions for GENERIC_IOMAP support. 416 * We need to define dummy functions for GENERIC_IOMAP support.
293 */ 417 */
@@ -319,7 +443,7 @@ static inline void isa_delay(void)
319#define readw(addr) in_le16(addr) 443#define readw(addr) in_le16(addr)
320#define writew(val,addr) out_le16((addr),(val)) 444#define writew(val,addr) out_le16((addr),(val))
321 445
322#endif /* CONFIG_ISA */ 446#endif /* !CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
323 447
324#define readl(addr) in_le32(addr) 448#define readl(addr) in_le32(addr)
325#define writel(val,addr) out_le32((addr),(val)) 449#define writel(val,addr) out_le32((addr),(val))
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index c1155f0e22cc..81ca118d58af 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -6,12 +6,16 @@
6 * different m68k hosts compiled into the kernel. 6 * different m68k hosts compiled into the kernel.
7 * Currently the Atari has 72 and the Amiga 24, but if both are 7 * Currently the Atari has 72 and the Amiga 24, but if both are
8 * supported in the kernel it is better to make room for 72. 8 * supported in the kernel it is better to make room for 72.
9 * With EtherNAT add-on card on Atari, the highest interrupt
10 * number is 140 so NR_IRQS needs to be 141.
9 */ 11 */
10#if defined(CONFIG_COLDFIRE) 12#if defined(CONFIG_COLDFIRE)
11#define NR_IRQS 256 13#define NR_IRQS 256
12#elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X) 14#elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
13#define NR_IRQS 200 15#define NR_IRQS 200
14#elif defined(CONFIG_ATARI) || defined(CONFIG_MAC) 16#elif defined(CONFIG_ATARI)
17#define NR_IRQS 141
18#elif defined(CONFIG_MAC)
15#define NR_IRQS 72 19#define NR_IRQS 72
16#elif defined(CONFIG_Q40) 20#elif defined(CONFIG_Q40)
17#define NR_IRQS 43 21#define NR_IRQS 43
diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h
index d9eb9834ccc8..932faa35655b 100644
--- a/arch/m68k/include/asm/raw_io.h
+++ b/arch/m68k/include/asm/raw_io.h
@@ -10,7 +10,7 @@
10 10
11#ifdef __KERNEL__ 11#ifdef __KERNEL__
12 12
13#include <asm/types.h> 13#include <asm/byteorder.h>
14 14
15 15
16/* Values for nocacheflag and cmode */ 16/* Values for nocacheflag and cmode */
@@ -60,6 +60,57 @@ extern void __iounmap(void *addr, unsigned long size);
60#define __raw_writew(val,addr) out_be16((addr),(val)) 60#define __raw_writew(val,addr) out_be16((addr),(val))
61#define __raw_writel(val,addr) out_be32((addr),(val)) 61#define __raw_writel(val,addr) out_be32((addr),(val))
62 62
63/*
64 * Atari ROM port (cartridge port) ISA adapter, used for the EtherNEC NE2000
65 * network card driver.
66 * The ISA adapter connects address lines A9-A13 to ISA address lines A0-A4,
67 * and hardwires the rest of the ISA addresses for a base address of 0x300.
68 *
69 * Data lines D8-D15 are connected to ISA data lines D0-D7 for reading.
70 * For writes, address lines A1-A8 are latched to ISA data lines D0-D7
71 * (meaning the bit pattern on A1-A8 can be read back as byte).
72 *
73 * Read and write operations are distinguished by the base address used:
74 * reads are from the ROM A side range, writes are through the B side range
75 * addresses (A side base + 0x10000).
76 *
77 * Reads and writes are byte only.
78 *
79 * 16 bit reads and writes are necessary for the NetUSBee adapter's USB
80 * chipset - 16 bit words are read straight off the ROM port while 16 bit
81 * reads are split into two byte writes. The low byte is latched to the
82 * NetUSBee buffer by a read from the _read_ window (with the data pattern
83 * asserted as A1-A8 address pattern). The high byte is then written to the
84 * write range as usual, completing the write cycle.
85 */
86
87#if defined(CONFIG_ATARI_ROM_ISA)
88#define rom_in_8(addr) \
89 ({ u16 __v = (*(__force volatile u16 *) (addr)); __v >>= 8; __v; })
90#define rom_in_be16(addr) \
91 ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
92#define rom_in_le16(addr) \
93 ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
94
95#define rom_out_8(addr, b) \
96 ({u8 __w, __v = (b); u32 _addr = ((u32) (addr)); \
97 __w = ((*(__force volatile u8 *) ((_addr | 0x10000) + (__v<<1)))); })
98#define rom_out_be16(addr, w) \
99 ({u16 __w, __v = (w); u32 _addr = ((u32) (addr)); \
100 __w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v & 0xFF)<<1)))); \
101 __w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v >> 8)<<1)))); })
102#define rom_out_le16(addr, w) \
103 ({u16 __w, __v = (w); u32 _addr = ((u32) (addr)); \
104 __w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v >> 8)<<1)))); \
105 __w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v & 0xFF)<<1)))); })
106
107#define raw_rom_inb rom_in_8
108#define raw_rom_inw rom_in_be16
109
110#define raw_rom_outb(val, port) rom_out_8((port), (val))
111#define raw_rom_outw(val, port) rom_out_be16((port), (val))
112#endif /* CONFIG_ATARI_ROM_ISA */
113
63static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len) 114static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
64{ 115{
65 unsigned int i; 116 unsigned int i;
@@ -342,6 +393,62 @@ static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
342 : "d0", "a0", "a1", "d6"); 393 : "d0", "a0", "a1", "d6");
343} 394}
344 395
396
397#if defined(CONFIG_ATARI_ROM_ISA)
398static inline void raw_rom_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
399{
400 unsigned int i;
401
402 for (i = 0; i < len; i++)
403 *buf++ = rom_in_8(port);
404}
405
406static inline void raw_rom_outsb(volatile u8 __iomem *port, const u8 *buf,
407 unsigned int len)
408{
409 unsigned int i;
410
411 for (i = 0; i < len; i++)
412 rom_out_8(port, *buf++);
413}
414
415static inline void raw_rom_insw(volatile u16 __iomem *port, u16 *buf,
416 unsigned int nr)
417{
418 unsigned int i;
419
420 for (i = 0; i < nr; i++)
421 *buf++ = rom_in_be16(port);
422}
423
424static inline void raw_rom_outsw(volatile u16 __iomem *port, const u16 *buf,
425 unsigned int nr)
426{
427 unsigned int i;
428
429 for (i = 0; i < nr; i++)
430 rom_out_be16(port, *buf++);
431}
432
433static inline void raw_rom_insw_swapw(volatile u16 __iomem *port, u16 *buf,
434 unsigned int nr)
435{
436 unsigned int i;
437
438 for (i = 0; i < nr; i++)
439 *buf++ = rom_in_le16(port);
440}
441
442static inline void raw_rom_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
443 unsigned int nr)
444{
445 unsigned int i;
446
447 for (i = 0; i < nr; i++)
448 rom_out_le16(port, *buf++);
449}
450#endif /* CONFIG_ATARI_ROM_ISA */
451
345#endif /* __KERNEL__ */ 452#endif /* __KERNEL__ */
346 453
347#endif /* _RAW_IO_H */ 454#endif /* _RAW_IO_H */
diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h
index 32198454da70..9aea9f11fa25 100644
--- a/arch/m68k/include/asm/string.h
+++ b/arch/m68k/include/asm/string.h
@@ -4,15 +4,6 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6 6
7static inline size_t __kernel_strlen(const char *s)
8{
9 const char *sc;
10
11 for (sc = s; *sc++; )
12 ;
13 return sc - s - 1;
14}
15
16static inline char *__kernel_strcpy(char *dest, const char *src) 7static inline char *__kernel_strcpy(char *dest, const char *src)
17{ 8{
18 char *xdest = dest; 9 char *xdest = dest;
@@ -27,11 +18,6 @@ static inline char *__kernel_strcpy(char *dest, const char *src)
27 18
28#ifndef __IN_STRING_C 19#ifndef __IN_STRING_C
29 20
30#define __HAVE_ARCH_STRLEN
31#define strlen(s) (__builtin_constant_p(s) ? \
32 __builtin_strlen(s) : \
33 __kernel_strlen(s))
34
35#define __HAVE_ARCH_STRNLEN 21#define __HAVE_ARCH_STRNLEN
36static inline size_t strnlen(const char *s, size_t count) 22static inline size_t strnlen(const char *s, size_t count)
37{ 23{
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index d538694ad208..c55ff719fa72 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -51,40 +51,16 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
51 return sw->retpc; 51 return sw->retpc;
52} 52}
53 53
54/* 54void arch_cpu_idle(void)
55 * The idle loop on an m68k..
56 */
57static void default_idle(void)
58{ 55{
59 if (!need_resched())
60#if defined(MACH_ATARI_ONLY) 56#if defined(MACH_ATARI_ONLY)
61 /* block out HSYNC on the atari (falcon) */ 57 /* block out HSYNC on the atari (falcon) */
62 __asm__("stop #0x2200" : : : "cc"); 58 __asm__("stop #0x2200" : : : "cc");
63#else 59#else
64 __asm__("stop #0x2000" : : : "cc"); 60 __asm__("stop #0x2000" : : : "cc");
65#endif 61#endif
66} 62}
67 63
68void (*idle)(void) = default_idle;
69
70/*
71 * The idle thread. There's no useful work to be
72 * done, so just try to conserve power and have a
73 * low exit latency (ie sit in a loop waiting for
74 * somebody to say that they'd like to reschedule)
75 */
76void cpu_idle(void)
77{
78 /* endless idle loop with no priority at all */
79 while (1) {
80 rcu_idle_enter();
81 while (!need_resched())
82 idle();
83 rcu_idle_exit();
84 schedule_preempt_disabled();
85 }
86}
87
88void machine_restart(char * __unused) 64void machine_restart(char * __unused)
89{ 65{
90 if (mach_reset) 66 if (mach_reset)
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 80cfbe56ea32..e67e53159573 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -381,6 +381,12 @@ void __init setup_arch(char **cmdline_p)
381 isa_sex = 1; 381 isa_sex = 1;
382 } 382 }
383#endif 383#endif
384#ifdef CONFIG_ATARI_ROM_ISA
385 if (MACH_IS_ATARI) {
386 isa_type = ISA_TYPE_ENEC;
387 isa_sex = 0;
388 }
389#endif
384#endif 390#endif
385} 391}
386 392
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 71fb29938dba..911ba472e6c4 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -57,6 +57,9 @@ void (*mach_reset)(void);
57void (*mach_halt)(void); 57void (*mach_halt)(void);
58void (*mach_power_off)(void); 58void (*mach_power_off)(void);
59 59
60#ifdef CONFIG_M68000
61#define CPU_NAME "MC68000"
62#endif
60#ifdef CONFIG_M68328 63#ifdef CONFIG_M68328
61#define CPU_NAME "MC68328" 64#define CPU_NAME "MC68328"
62#endif 65#endif
diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c
index b9a57abfad08..4d61fa8a112c 100644
--- a/arch/m68k/lib/string.c
+++ b/arch/m68k/lib/string.c
@@ -17,6 +17,6 @@ EXPORT_SYMBOL(strcpy);
17 17
18char *strcat(char *dest, const char *src) 18char *strcat(char *dest, const char *src)
19{ 19{
20 return __kernel_strcpy(dest + __kernel_strlen(dest), src); 20 return __kernel_strcpy(dest + strlen(dest), src);
21} 21}
22EXPORT_SYMBOL(strcat); 22EXPORT_SYMBOL(strcat);
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index afd8106fd83b..1af2ca3411f6 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -110,18 +110,7 @@ void __init paging_init(void)
110void free_initmem(void) 110void free_initmem(void)
111{ 111{
112#ifndef CONFIG_MMU_SUN3 112#ifndef CONFIG_MMU_SUN3
113 unsigned long addr; 113 free_initmem_default(0);
114
115 addr = (unsigned long) __init_begin;
116 for (; addr < ((unsigned long) __init_end); addr += PAGE_SIZE) {
117 ClearPageReserved(virt_to_page(addr));
118 init_page_count(virt_to_page(addr));
119 free_page(addr);
120 totalram_pages++;
121 }
122 pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n",
123 (addr - (unsigned long) __init_begin) >> 10,
124 (unsigned int) __init_begin, (unsigned int) __init_end);
125#endif /* CONFIG_MMU_SUN3 */ 114#endif /* CONFIG_MMU_SUN3 */
126} 115}
127 116
@@ -188,7 +177,7 @@ void __init mem_init(void)
188 } 177 }
189 } 178 }
190 179
191#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) 180#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
192 /* insert pointer tables allocated so far into the tablelist */ 181 /* insert pointer tables allocated so far into the tablelist */
193 init_pointer_table((unsigned long)kernel_pg_dir); 182 init_pointer_table((unsigned long)kernel_pg_dir);
194 for (i = 0; i < PTRS_PER_PGD; i++) { 183 for (i = 0; i < PTRS_PER_PGD; i++) {
@@ -213,15 +202,6 @@ void __init mem_init(void)
213#ifdef CONFIG_BLK_DEV_INITRD 202#ifdef CONFIG_BLK_DEV_INITRD
214void free_initrd_mem(unsigned long start, unsigned long end) 203void free_initrd_mem(unsigned long start, unsigned long end)
215{ 204{
216 int pages = 0; 205 free_reserved_area(start, end, 0, "initrd");
217 for (; start < end; start += PAGE_SIZE) {
218 ClearPageReserved(virt_to_page(start));
219 init_page_count(virt_to_page(start));
220 free_page(start);
221 totalram_pages++;
222 pages++;
223 }
224 pr_notice("Freeing initrd memory: %dk freed\n",
225 pages << (PAGE_SHIFT - 10));
226} 206}
227#endif 207#endif
diff --git a/arch/m68k/platform/coldfire/m528x.c b/arch/m68k/platform/coldfire/m528x.c
index 83b7dad7a84e..b03a9d271837 100644
--- a/arch/m68k/platform/coldfire/m528x.c
+++ b/arch/m68k/platform/coldfire/m528x.c
@@ -69,7 +69,7 @@ static void __init m528x_uarts_init(void)
69 u8 port; 69 u8 port;
70 70
71 /* make sure PUAPAR is set for UART0 and UART1 */ 71 /* make sure PUAPAR is set for UART0 and UART1 */
72 port = readb(MCF5282_GPIO_PUAPAR); 72 port = readb(MCFGPIO_PUAPAR);
73 port |= 0x03 | (0x03 << 2); 73 port |= 0x03 | (0x03 << 2);
74 writeb(port, MCFGPIO_PUAPAR); 74 writeb(port, MCFGPIO_PUAPAR);
75} 75}
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
index 0ecd34d8b5f6..7c4a33006142 100644
--- a/arch/metag/include/asm/thread_info.h
+++ b/arch/metag/include/asm/thread_info.h
@@ -150,6 +150,4 @@ static inline int kstack_end(void *addr)
150#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ 150#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
151 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) 151 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
152 152
153#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
154
155#endif /* _ASM_THREAD_INFO_H */ 153#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c
index c6efe62e5b76..dc5923544560 100644
--- a/arch/metag/kernel/process.c
+++ b/arch/metag/kernel/process.c
@@ -22,6 +22,7 @@
22#include <linux/pm.h> 22#include <linux/pm.h>
23#include <linux/syscalls.h> 23#include <linux/syscalls.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <linux/smp.h>
25#include <asm/core_reg.h> 26#include <asm/core_reg.h>
26#include <asm/user_gateway.h> 27#include <asm/user_gateway.h>
27#include <asm/tcm.h> 28#include <asm/tcm.h>
@@ -31,7 +32,7 @@
31/* 32/*
32 * Wait for the next interrupt and enable local interrupts 33 * Wait for the next interrupt and enable local interrupts
33 */ 34 */
34static inline void arch_idle(void) 35void arch_cpu_idle(void)
35{ 36{
36 int tmp; 37 int tmp;
37 38
@@ -59,36 +60,12 @@ static inline void arch_idle(void)
59 : "r" (get_trigger_mask())); 60 : "r" (get_trigger_mask()));
60} 61}
61 62
62void cpu_idle(void)
63{
64 set_thread_flag(TIF_POLLING_NRFLAG);
65
66 while (1) {
67 tick_nohz_idle_enter();
68 rcu_idle_enter();
69
70 while (!need_resched()) {
71 /*
72 * We need to disable interrupts here to ensure we don't
73 * miss a wakeup call.
74 */
75 local_irq_disable();
76 if (!need_resched()) {
77#ifdef CONFIG_HOTPLUG_CPU 63#ifdef CONFIG_HOTPLUG_CPU
78 if (cpu_is_offline(smp_processor_id())) 64void arch_cpu_idle_dead(void)
79 cpu_die(); 65{
80#endif 66 cpu_die();
81 arch_idle();
82 } else {
83 local_irq_enable();
84 }
85 }
86
87 rcu_idle_exit();
88 tick_nohz_idle_exit();
89 schedule_preempt_disabled();
90 }
91} 67}
68#endif
92 69
93void (*pm_power_off)(void); 70void (*pm_power_off)(void);
94EXPORT_SYMBOL(pm_power_off); 71EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index 4e7751ac75d2..f443ec9a7cbe 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -412,7 +412,7 @@ asmlinkage void secondary_start_kernel(void)
412 /* 412 /*
413 * OK, it's off to the idle thread for us 413 * OK, it's off to the idle thread for us
414 */ 414 */
415 cpu_idle(); 415 cpu_startup_entry(CPUHP_ONLINE);
416} 416}
417 417
418void __init smp_cpus_done(unsigned int max_cpus) 418void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index 504a398d5f8b..d05b8455c44c 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -380,14 +380,8 @@ void __init mem_init(void)
380 380
381#ifdef CONFIG_HIGHMEM 381#ifdef CONFIG_HIGHMEM
382 unsigned long tmp; 382 unsigned long tmp;
383 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 383 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
384 struct page *page = pfn_to_page(tmp); 384 free_highmem_page(pfn_to_page(tmp));
385 ClearPageReserved(page);
386 init_page_count(page);
387 __free_page(page);
388 totalhigh_pages++;
389 }
390 totalram_pages += totalhigh_pages;
391 num_physpages += totalhigh_pages; 385 num_physpages += totalhigh_pages;
392#endif /* CONFIG_HIGHMEM */ 386#endif /* CONFIG_HIGHMEM */
393 387
@@ -412,32 +406,15 @@ void __init mem_init(void)
412 return; 406 return;
413} 407}
414 408
415static void free_init_pages(char *what, unsigned long begin, unsigned long end)
416{
417 unsigned long addr;
418
419 for (addr = begin; addr < end; addr += PAGE_SIZE) {
420 ClearPageReserved(virt_to_page(addr));
421 init_page_count(virt_to_page(addr));
422 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
423 free_page(addr);
424 totalram_pages++;
425 }
426 pr_info("Freeing %s: %luk freed\n", what, (end - begin) >> 10);
427}
428
429void free_initmem(void) 409void free_initmem(void)
430{ 410{
431 free_init_pages("unused kernel memory", 411 free_initmem_default(POISON_FREE_INITMEM);
432 (unsigned long)(&__init_begin),
433 (unsigned long)(&__init_end));
434} 412}
435 413
436#ifdef CONFIG_BLK_DEV_INITRD 414#ifdef CONFIG_BLK_DEV_INITRD
437void free_initrd_mem(unsigned long start, unsigned long end) 415void free_initrd_mem(unsigned long start, unsigned long end)
438{ 416{
439 end = end & PAGE_MASK; 417 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
440 free_init_pages("initrd memory", start, end);
441} 418}
442#endif 419#endif
443 420
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 9dbb2448a9b2..54237af0b07c 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -19,13 +19,14 @@ config MICROBLAZE
19 select HAVE_DEBUG_KMEMLEAK 19 select HAVE_DEBUG_KMEMLEAK
20 select IRQ_DOMAIN 20 select IRQ_DOMAIN
21 select HAVE_GENERIC_HARDIRQS 21 select HAVE_GENERIC_HARDIRQS
22 select HAVE_VIRT_TO_BUS 22 select VIRT_TO_BUS
23 select GENERIC_IRQ_PROBE 23 select GENERIC_IRQ_PROBE
24 select GENERIC_IRQ_SHOW 24 select GENERIC_IRQ_SHOW
25 select GENERIC_PCI_IOMAP 25 select GENERIC_PCI_IOMAP
26 select GENERIC_CPU_DEVICES 26 select GENERIC_CPU_DEVICES
27 select GENERIC_ATOMIC64 27 select GENERIC_ATOMIC64
28 select GENERIC_CLOCKEVENTS 28 select GENERIC_CLOCKEVENTS
29 select GENERIC_IDLE_POLL_SETUP
29 select MODULES_USE_ELF_RELA 30 select MODULES_USE_ELF_RELA
30 select CLONE_BACKWARDS 31 select CLONE_BACKWARDS
31 32
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 0759153e8117..d6e0ffea28b6 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -22,7 +22,6 @@
22extern const struct seq_operations cpuinfo_op; 22extern const struct seq_operations cpuinfo_op;
23 23
24# define cpu_relax() barrier() 24# define cpu_relax() barrier()
25# define cpu_sleep() do {} while (0)
26 25
27#define task_pt_regs(tsk) \ 26#define task_pt_regs(tsk) \
28 (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) 27 (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
@@ -160,10 +159,6 @@ unsigned long get_wchan(struct task_struct *p);
160# define STACK_TOP TASK_SIZE 159# define STACK_TOP TASK_SIZE
161# define STACK_TOP_MAX STACK_TOP 160# define STACK_TOP_MAX STACK_TOP
162 161
163void disable_hlt(void);
164void enable_hlt(void);
165void default_idle(void);
166
167#ifdef CONFIG_DEBUG_FS 162#ifdef CONFIG_DEBUG_FS
168extern struct dentry *of_debugfs_root; 163extern struct dentry *of_debugfs_root;
169#endif 164#endif
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 0e0b0a5ec756..f05df5630c84 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -46,7 +46,6 @@ void machine_shutdown(void);
46void machine_halt(void); 46void machine_halt(void);
47void machine_power_off(void); 47void machine_power_off(void);
48 48
49void free_init_pages(char *what, unsigned long begin, unsigned long end);
50extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); 49extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
51extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); 50extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
52 51
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 008f30433d22..de26ea6373de 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -182,7 +182,6 @@ static inline bool test_and_clear_restore_sigmask(void)
182 ti->status &= ~TS_RESTORE_SIGMASK; 182 ti->status &= ~TS_RESTORE_SIGMASK;
183 return true; 183 return true;
184} 184}
185#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
186#endif 185#endif
187 186
188#endif /* __KERNEL__ */ 187#endif /* __KERNEL__ */
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index 60dcacc68038..365f2d53f1b2 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -21,7 +21,6 @@
21#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/prom.h> 22#include <asm/prom.h>
23 23
24static u32 early_console_initialized;
25static u32 base_addr; 24static u32 base_addr;
26 25
27#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE 26#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
@@ -109,27 +108,11 @@ static struct console early_serial_uart16550_console = {
109}; 108};
110#endif /* CONFIG_SERIAL_8250_CONSOLE */ 109#endif /* CONFIG_SERIAL_8250_CONSOLE */
111 110
112static struct console *early_console;
113
114void early_printk(const char *fmt, ...)
115{
116 char buf[512];
117 int n;
118 va_list ap;
119
120 if (early_console_initialized) {
121 va_start(ap, fmt);
122 n = vscnprintf(buf, 512, fmt, ap);
123 early_console->write(early_console, buf, n);
124 va_end(ap);
125 }
126}
127
128int __init setup_early_printk(char *opt) 111int __init setup_early_printk(char *opt)
129{ 112{
130 int version = 0; 113 int version = 0;
131 114
132 if (early_console_initialized) 115 if (early_console)
133 return 1; 116 return 1;
134 117
135 base_addr = of_early_console(&version); 118 base_addr = of_early_console(&version);
@@ -159,7 +142,6 @@ int __init setup_early_printk(char *opt)
159 } 142 }
160 143
161 register_console(early_console); 144 register_console(early_console);
162 early_console_initialized = 1;
163 return 0; 145 return 0;
164 } 146 }
165 return 1; 147 return 1;
@@ -169,7 +151,7 @@ int __init setup_early_printk(char *opt)
169 * only for early console because of performance degression */ 151 * only for early console because of performance degression */
170void __init remap_early_printk(void) 152void __init remap_early_printk(void)
171{ 153{
172 if (!early_console_initialized || !early_console) 154 if (!early_console)
173 return; 155 return;
174 pr_info("early_printk_console remapping from 0x%x to ", base_addr); 156 pr_info("early_printk_console remapping from 0x%x to ", base_addr);
175 base_addr = (u32) ioremap(base_addr, PAGE_SIZE); 157 base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
@@ -194,9 +176,9 @@ void __init remap_early_printk(void)
194 176
195void __init disable_early_printk(void) 177void __init disable_early_printk(void)
196{ 178{
197 if (!early_console_initialized || !early_console) 179 if (!early_console)
198 return; 180 return;
199 pr_warn("disabling early console\n"); 181 pr_warn("disabling early console\n");
200 unregister_console(early_console); 182 unregister_console(early_console);
201 early_console_initialized = 0; 183 early_console = NULL;
202} 184}
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index fa0ea609137c..7cce2e9c1719 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -44,71 +44,6 @@ void show_regs(struct pt_regs *regs)
44void (*pm_power_off)(void) = NULL; 44void (*pm_power_off)(void) = NULL;
45EXPORT_SYMBOL(pm_power_off); 45EXPORT_SYMBOL(pm_power_off);
46 46
47static int hlt_counter = 1;
48
49void disable_hlt(void)
50{
51 hlt_counter++;
52}
53EXPORT_SYMBOL(disable_hlt);
54
55void enable_hlt(void)
56{
57 hlt_counter--;
58}
59EXPORT_SYMBOL(enable_hlt);
60
61static int __init nohlt_setup(char *__unused)
62{
63 hlt_counter = 1;
64 return 1;
65}
66__setup("nohlt", nohlt_setup);
67
68static int __init hlt_setup(char *__unused)
69{
70 hlt_counter = 0;
71 return 1;
72}
73__setup("hlt", hlt_setup);
74
75void default_idle(void)
76{
77 if (likely(hlt_counter)) {
78 local_irq_disable();
79 stop_critical_timings();
80 cpu_relax();
81 start_critical_timings();
82 local_irq_enable();
83 } else {
84 clear_thread_flag(TIF_POLLING_NRFLAG);
85 smp_mb__after_clear_bit();
86 local_irq_disable();
87 while (!need_resched())
88 cpu_sleep();
89 local_irq_enable();
90 set_thread_flag(TIF_POLLING_NRFLAG);
91 }
92}
93
94void cpu_idle(void)
95{
96 set_thread_flag(TIF_POLLING_NRFLAG);
97
98 /* endless idle loop with no priority at all */
99 while (1) {
100 tick_nohz_idle_enter();
101 rcu_idle_enter();
102 while (!need_resched())
103 default_idle();
104 rcu_idle_exit();
105 tick_nohz_idle_exit();
106
107 schedule_preempt_disabled();
108 check_pgt_cache();
109 }
110}
111
112void flush_thread(void) 47void flush_thread(void)
113{ 48{
114} 49}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 8f8b367c079e..4ec137d13ad7 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -82,13 +82,9 @@ static unsigned long highmem_setup(void)
82 /* FIXME not sure about */ 82 /* FIXME not sure about */
83 if (memblock_is_reserved(pfn << PAGE_SHIFT)) 83 if (memblock_is_reserved(pfn << PAGE_SHIFT))
84 continue; 84 continue;
85 ClearPageReserved(page); 85 free_highmem_page(page);
86 init_page_count(page);
87 __free_page(page);
88 totalhigh_pages++;
89 reservedpages++; 86 reservedpages++;
90 } 87 }
91 totalram_pages += totalhigh_pages;
92 pr_info("High memory: %luk\n", 88 pr_info("High memory: %luk\n",
93 totalhigh_pages << (PAGE_SHIFT-10)); 89 totalhigh_pages << (PAGE_SHIFT-10));
94 90
@@ -236,40 +232,16 @@ void __init setup_memory(void)
236 paging_init(); 232 paging_init();
237} 233}
238 234
239void free_init_pages(char *what, unsigned long begin, unsigned long end)
240{
241 unsigned long addr;
242
243 for (addr = begin; addr < end; addr += PAGE_SIZE) {
244 ClearPageReserved(virt_to_page(addr));
245 init_page_count(virt_to_page(addr));
246 free_page(addr);
247 totalram_pages++;
248 }
249 pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
250}
251
252#ifdef CONFIG_BLK_DEV_INITRD 235#ifdef CONFIG_BLK_DEV_INITRD
253void free_initrd_mem(unsigned long start, unsigned long end) 236void free_initrd_mem(unsigned long start, unsigned long end)
254{ 237{
255 int pages = 0; 238 free_reserved_area(start, end, 0, "initrd");
256 for (; start < end; start += PAGE_SIZE) {
257 ClearPageReserved(virt_to_page(start));
258 init_page_count(virt_to_page(start));
259 free_page(start);
260 totalram_pages++;
261 pages++;
262 }
263 pr_notice("Freeing initrd memory: %dk freed\n",
264 (int)(pages * (PAGE_SIZE / 1024)));
265} 239}
266#endif 240#endif
267 241
268void free_initmem(void) 242void free_initmem(void)
269{ 243{
270 free_init_pages("unused kernel memory", 244 free_initmem_default(0);
271 (unsigned long)(&__init_begin),
272 (unsigned long)(&__init_end));
273} 245}
274 246
275void __init mem_init(void) 247void __init mem_init(void)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ae9c716c46bb..3a7b3954ce1b 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -18,7 +18,7 @@ config MIPS
18 select HAVE_KRETPROBES 18 select HAVE_KRETPROBES
19 select HAVE_DEBUG_KMEMLEAK 19 select HAVE_DEBUG_KMEMLEAK
20 select ARCH_BINFMT_ELF_RANDOMIZE_PIE 20 select ARCH_BINFMT_ELF_RANDOMIZE_PIE
21 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 21 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
22 select RTC_LIB if !MACH_LOONGSON 22 select RTC_LIB if !MACH_LOONGSON
23 select GENERIC_ATOMIC64 if !64BIT 23 select GENERIC_ATOMIC64 if !64BIT
24 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 24 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -38,7 +38,7 @@ config MIPS
38 select GENERIC_CLOCKEVENTS 38 select GENERIC_CLOCKEVENTS
39 select GENERIC_CMOS_UPDATE 39 select GENERIC_CMOS_UPDATE
40 select HAVE_MOD_ARCH_SPECIFIC 40 select HAVE_MOD_ARCH_SPECIFIC
41 select HAVE_VIRT_TO_BUS 41 select VIRT_TO_BUS
42 select MODULES_USE_ELF_REL if MODULES 42 select MODULES_USE_ELF_REL if MODULES
43 select MODULES_USE_ELF_RELA if MODULES && 64BIT 43 select MODULES_USE_ELF_RELA if MODULES && 64BIT
44 select CLONE_BACKWARDS 44 select CLONE_BACKWARDS
@@ -404,6 +404,8 @@ config PMC_MSP
404 select IRQ_CPU 404 select IRQ_CPU
405 select SERIAL_8250 405 select SERIAL_8250
406 select SERIAL_8250_CONSOLE 406 select SERIAL_8250_CONSOLE
407 select USB_EHCI_BIG_ENDIAN_MMIO
408 select USB_EHCI_BIG_ENDIAN_DESC
407 help 409 help
408 This adds support for the PMC-Sierra family of Multi-Service 410 This adds support for the PMC-Sierra family of Multi-Service
409 Processor System-On-A-Chips. These parts include a number 411 Processor System-On-A-Chips. These parts include a number
@@ -657,7 +659,7 @@ config SNI_RM
657 bool "SNI RM200/300/400" 659 bool "SNI RM200/300/400"
658 select FW_ARC if CPU_LITTLE_ENDIAN 660 select FW_ARC if CPU_LITTLE_ENDIAN
659 select FW_ARC32 if CPU_LITTLE_ENDIAN 661 select FW_ARC32 if CPU_LITTLE_ENDIAN
660 select SNIPROM if CPU_BIG_ENDIAN 662 select FW_SNIPROM if CPU_BIG_ENDIAN
661 select ARCH_MAY_HAVE_PC_FDC 663 select ARCH_MAY_HAVE_PC_FDC
662 select BOOT_ELF32 664 select BOOT_ELF32
663 select CEVT_R4K 665 select CEVT_R4K
@@ -1144,7 +1146,7 @@ config DEFAULT_SGI_PARTITION
1144config FW_ARC32 1146config FW_ARC32
1145 bool 1147 bool
1146 1148
1147config SNIPROM 1149config FW_SNIPROM
1148 bool 1150 bool
1149 1151
1150config BOOT_ELF32 1152config BOOT_ELF32
@@ -1433,6 +1435,7 @@ config CPU_CAVIUM_OCTEON
1433 select CPU_SUPPORTS_HUGEPAGES 1435 select CPU_SUPPORTS_HUGEPAGES
1434 select LIBFDT 1436 select LIBFDT
1435 select USE_OF 1437 select USE_OF
1438 select USB_EHCI_BIG_ENDIAN_MMIO
1436 help 1439 help
1437 The Cavium Octeon processor is a highly integrated chip containing 1440 The Cavium Octeon processor is a highly integrated chip containing
1438 many ethernet hardware widgets for networking tasks. The processor 1441 many ethernet hardware widgets for networking tasks. The processor
@@ -1493,7 +1496,6 @@ config CPU_XLP
1493 select CPU_SUPPORTS_32BIT_KERNEL 1496 select CPU_SUPPORTS_32BIT_KERNEL
1494 select CPU_SUPPORTS_64BIT_KERNEL 1497 select CPU_SUPPORTS_64BIT_KERNEL
1495 select CPU_SUPPORTS_HIGHMEM 1498 select CPU_SUPPORTS_HIGHMEM
1496 select CPU_HAS_LLSC
1497 select WEAK_ORDERING 1499 select WEAK_ORDERING
1498 select WEAK_REORDERING_BEYOND_LLSC 1500 select WEAK_REORDERING_BEYOND_LLSC
1499 select CPU_HAS_PREFETCH 1501 select CPU_HAS_PREFETCH
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index ed1949c29508..9aa7d44898ed 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -745,10 +745,7 @@ void __init board_prom_init(void)
745 strcpy(cfe_version, "unknown"); 745 strcpy(cfe_version, "unknown");
746 printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); 746 printk(KERN_INFO PFX "CFE version: %s\n", cfe_version);
747 747
748 if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { 748 bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);
749 printk(KERN_ERR PFX "invalid nvram checksum\n");
750 return;
751 }
752 749
753 board_name = bcm63xx_nvram_get_name(); 750 board_name = bcm63xx_nvram_get_name();
754 /* find board by name */ 751 /* find board by name */
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index f1c9c3e2f678..e97fd60e92ef 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -85,20 +85,9 @@ static struct platform_device bcm63xx_spi_device = {
85 85
86int __init bcm63xx_spi_register(void) 86int __init bcm63xx_spi_register(void)
87{ 87{
88 struct clk *periph_clk;
89
90 if (BCMCPU_IS_6328() || BCMCPU_IS_6345()) 88 if (BCMCPU_IS_6328() || BCMCPU_IS_6345())
91 return -ENODEV; 89 return -ENODEV;
92 90
93 periph_clk = clk_get(NULL, "periph");
94 if (IS_ERR(periph_clk)) {
95 pr_err("unable to get periph clock\n");
96 return -ENODEV;
97 }
98
99 /* Set bus frequency */
100 spi_pdata.speed_hz = clk_get_rate(periph_clk);
101
102 spi_resources[0].start = bcm63xx_regset_address(RSET_SPI); 91 spi_resources[0].start = bcm63xx_regset_address(RSET_SPI);
103 spi_resources[0].end = spi_resources[0].start; 92 spi_resources[0].end = spi_resources[0].start;
104 spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI); 93 spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c
index 620611680839..a4b8864f9307 100644
--- a/arch/mips/bcm63xx/nvram.c
+++ b/arch/mips/bcm63xx/nvram.c
@@ -38,7 +38,7 @@ struct bcm963xx_nvram {
38static struct bcm963xx_nvram nvram; 38static struct bcm963xx_nvram nvram;
39static int mac_addr_used; 39static int mac_addr_used;
40 40
41int __init bcm63xx_nvram_init(void *addr) 41void __init bcm63xx_nvram_init(void *addr)
42{ 42{
43 unsigned int check_len; 43 unsigned int check_len;
44 u32 crc, expected_crc; 44 u32 crc, expected_crc;
@@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr)
60 crc = crc32_le(~0, (u8 *)&nvram, check_len); 60 crc = crc32_le(~0, (u8 *)&nvram, check_len);
61 61
62 if (crc != expected_crc) 62 if (crc != expected_crc)
63 return -EINVAL; 63 pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
64 64 expected_crc, crc);
65 return 0;
66} 65}
67 66
68u8 *bcm63xx_nvram_get_name(void) 67u8 *bcm63xx_nvram_get_name(void)
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 314231be788c..35e18e98beb9 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void)
157 return board_register_devices(); 157 return board_register_devices();
158} 158}
159 159
160device_initcall(bcm63xx_register_devices); 160arch_initcall(bcm63xx_register_devices);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index c594a3d4f743..b0baa299f899 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image)
174 174
175static void octeon_generic_shutdown(void) 175static void octeon_generic_shutdown(void)
176{ 176{
177 int cpu, i; 177 int i;
178#ifdef CONFIG_SMP
179 int cpu;
180#endif
178 struct cvmx_bootmem_desc *bootmem_desc; 181 struct cvmx_bootmem_desc *bootmem_desc;
179 void *named_block_array_ptr; 182 void *named_block_array_ptr;
180 183
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index ef99db994c2f..fe0d15d32660 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -10,6 +10,7 @@
10#define __ASM_HUGETLB_H 10#define __ASM_HUGETLB_H
11 11
12#include <asm/page.h> 12#include <asm/page.h>
13#include <asm-generic/hugetlb.h>
13 14
14 15
15static inline int is_hugepage_only_range(struct mm_struct *mm, 16static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index c9bae1362606..b0184cf02575 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -13,7 +13,6 @@ struct bcm63xx_spi_pdata {
13 unsigned int msg_ctl_width; 13 unsigned int msg_ctl_width;
14 int bus_num; 14 int bus_num;
15 int num_chipselect; 15 int num_chipselect;
16 u32 speed_hz;
17}; 16};
18 17
19enum bcm63xx_regs_spi { 18enum bcm63xx_regs_spi {
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
index 62d6a3b4d3b7..4e0b6bc1165e 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
@@ -9,10 +9,8 @@
9 * 9 *
10 * Initialized the local nvram copy from the target address and checks 10 * Initialized the local nvram copy from the target address and checks
11 * its checksum. 11 * its checksum.
12 *
13 * Returns 0 on success.
14 */ 12 */
15int __init bcm63xx_nvram_init(void *nvram); 13void bcm63xx_nvram_init(void *nvram);
16 14
17/** 15/**
18 * bcm63xx_nvram_get_name() - returns the board name according to nvram 16 * bcm63xx_nvram_get_name() - returns the board name according to nvram
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index d9c828419037..193c0912d38e 100644
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -28,11 +28,7 @@
28/* #define cpu_has_prefetch ? */ 28/* #define cpu_has_prefetch ? */
29#define cpu_has_mcheck 1 29#define cpu_has_mcheck 1
30/* #define cpu_has_ejtag ? */ 30/* #define cpu_has_ejtag ? */
31#ifdef CONFIG_CPU_HAS_LLSC
32#define cpu_has_llsc 1 31#define cpu_has_llsc 1
33#else
34#define cpu_has_llsc 0
35#endif
36/* #define cpu_has_vtag_icache ? */ 32/* #define cpu_has_vtag_icache ? */
37/* #define cpu_has_dc_aliases ? */ 33/* #define cpu_has_dc_aliases ? */
38/* #define cpu_has_ic_fills_f_dc ? */ 34/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 12b70c25906a..0da44d422f5b 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1166,7 +1166,10 @@ do { \
1166 unsigned int __dspctl; \ 1166 unsigned int __dspctl; \
1167 \ 1167 \
1168 __asm__ __volatile__( \ 1168 __asm__ __volatile__( \
1169 " .set push \n" \
1170 " .set dsp \n" \
1169 " rddsp %0, %x1 \n" \ 1171 " rddsp %0, %x1 \n" \
1172 " .set pop \n" \
1170 : "=r" (__dspctl) \ 1173 : "=r" (__dspctl) \
1171 : "i" (mask)); \ 1174 : "i" (mask)); \
1172 __dspctl; \ 1175 __dspctl; \
@@ -1175,30 +1178,198 @@ do { \
1175#define wrdsp(val, mask) \ 1178#define wrdsp(val, mask) \
1176do { \ 1179do { \
1177 __asm__ __volatile__( \ 1180 __asm__ __volatile__( \
1181 " .set push \n" \
1182 " .set dsp \n" \
1178 " wrdsp %0, %x1 \n" \ 1183 " wrdsp %0, %x1 \n" \
1184 " .set pop \n" \
1179 : \ 1185 : \
1180 : "r" (val), "i" (mask)); \ 1186 : "r" (val), "i" (mask)); \
1181} while (0) 1187} while (0)
1182 1188
1183#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) 1189#define mflo0() \
1184#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) 1190({ \
1185#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) 1191 long mflo0; \
1186#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) 1192 __asm__( \
1187 1193 " .set push \n" \
1188#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) 1194 " .set dsp \n" \
1189#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) 1195 " mflo %0, $ac0 \n" \
1190#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) 1196 " .set pop \n" \
1191#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) 1197 : "=r" (mflo0)); \
1192 1198 mflo0; \
1193#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) 1199})
1194#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) 1200
1195#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) 1201#define mflo1() \
1196#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) 1202({ \
1197 1203 long mflo1; \
1198#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) 1204 __asm__( \
1199#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) 1205 " .set push \n" \
1200#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) 1206 " .set dsp \n" \
1201#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) 1207 " mflo %0, $ac1 \n" \
1208 " .set pop \n" \
1209 : "=r" (mflo1)); \
1210 mflo1; \
1211})
1212
1213#define mflo2() \
1214({ \
1215 long mflo2; \
1216 __asm__( \
1217 " .set push \n" \
1218 " .set dsp \n" \
1219 " mflo %0, $ac2 \n" \
1220 " .set pop \n" \
1221 : "=r" (mflo2)); \
1222 mflo2; \
1223})
1224
1225#define mflo3() \
1226({ \
1227 long mflo3; \
1228 __asm__( \
1229 " .set push \n" \
1230 " .set dsp \n" \
1231 " mflo %0, $ac3 \n" \
1232 " .set pop \n" \
1233 : "=r" (mflo3)); \
1234 mflo3; \
1235})
1236
1237#define mfhi0() \
1238({ \
1239 long mfhi0; \
1240 __asm__( \
1241 " .set push \n" \
1242 " .set dsp \n" \
1243 " mfhi %0, $ac0 \n" \
1244 " .set pop \n" \
1245 : "=r" (mfhi0)); \
1246 mfhi0; \
1247})
1248
1249#define mfhi1() \
1250({ \
1251 long mfhi1; \
1252 __asm__( \
1253 " .set push \n" \
1254 " .set dsp \n" \
1255 " mfhi %0, $ac1 \n" \
1256 " .set pop \n" \
1257 : "=r" (mfhi1)); \
1258 mfhi1; \
1259})
1260
1261#define mfhi2() \
1262({ \
1263 long mfhi2; \
1264 __asm__( \
1265 " .set push \n" \
1266 " .set dsp \n" \
1267 " mfhi %0, $ac2 \n" \
1268 " .set pop \n" \
1269 : "=r" (mfhi2)); \
1270 mfhi2; \
1271})
1272
1273#define mfhi3() \
1274({ \
1275 long mfhi3; \
1276 __asm__( \
1277 " .set push \n" \
1278 " .set dsp \n" \
1279 " mfhi %0, $ac3 \n" \
1280 " .set pop \n" \
1281 : "=r" (mfhi3)); \
1282 mfhi3; \
1283})
1284
1285
1286#define mtlo0(x) \
1287({ \
1288 __asm__( \
1289 " .set push \n" \
1290 " .set dsp \n" \
1291 " mtlo %0, $ac0 \n" \
1292 " .set pop \n" \
1293 : \
1294 : "r" (x)); \
1295})
1296
1297#define mtlo1(x) \
1298({ \
1299 __asm__( \
1300 " .set push \n" \
1301 " .set dsp \n" \
1302 " mtlo %0, $ac1 \n" \
1303 " .set pop \n" \
1304 : \
1305 : "r" (x)); \
1306})
1307
1308#define mtlo2(x) \
1309({ \
1310 __asm__( \
1311 " .set push \n" \
1312 " .set dsp \n" \
1313 " mtlo %0, $ac2 \n" \
1314 " .set pop \n" \
1315 : \
1316 : "r" (x)); \
1317})
1318
1319#define mtlo3(x) \
1320({ \
1321 __asm__( \
1322 " .set push \n" \
1323 " .set dsp \n" \
1324 " mtlo %0, $ac3 \n" \
1325 " .set pop \n" \
1326 : \
1327 : "r" (x)); \
1328})
1329
1330#define mthi0(x) \
1331({ \
1332 __asm__( \
1333 " .set push \n" \
1334 " .set dsp \n" \
1335 " mthi %0, $ac0 \n" \
1336 " .set pop \n" \
1337 : \
1338 : "r" (x)); \
1339})
1340
1341#define mthi1(x) \
1342({ \
1343 __asm__( \
1344 " .set push \n" \
1345 " .set dsp \n" \
1346 " mthi %0, $ac1 \n" \
1347 " .set pop \n" \
1348 : \
1349 : "r" (x)); \
1350})
1351
1352#define mthi2(x) \
1353({ \
1354 __asm__( \
1355 " .set push \n" \
1356 " .set dsp \n" \
1357 " mthi %0, $ac2 \n" \
1358 " .set pop \n" \
1359 : \
1360 : "r" (x)); \
1361})
1362
1363#define mthi3(x) \
1364({ \
1365 __asm__( \
1366 " .set push \n" \
1367 " .set dsp \n" \
1368 " mthi %0, $ac3 \n" \
1369 " .set pop \n" \
1370 : \
1371 : "r" (x)); \
1372})
1202 1373
1203#else 1374#else
1204 1375
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 99fc547af9d3..eab99e536b5c 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -31,7 +31,7 @@
31#define PAGE_SHIFT 16 31#define PAGE_SHIFT 16
32#endif 32#endif
33#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 33#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
34#define PAGE_MASK (~(PAGE_SIZE - 1)) 34#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
35 35
36#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 36#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
37#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) 37#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 197f6367c201..8efe5a9e2c3e 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -21,6 +21,6 @@
21#include <asm/sigcontext.h> 21#include <asm/sigcontext.h>
22#include <asm/siginfo.h> 22#include <asm/siginfo.h>
23 23
24#define __ARCH_HAS_ODD_SIGACTION 24#define __ARCH_HAS_IRIX_SIGACTION
25 25
26#endif /* _ASM_SIGNAL_H */ 26#endif /* _ASM_SIGNAL_H */
diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h
index d6b18b4d0f3a..addb9f556b71 100644
--- a/arch/mips/include/uapi/asm/signal.h
+++ b/arch/mips/include/uapi/asm/signal.h
@@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
72 * 72 *
73 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single 73 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
74 * Unix names RESETHAND and NODEFER respectively. 74 * Unix names RESETHAND and NODEFER respectively.
75 *
76 * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever
77 * supported its use and no libc was using it, so the entire sa-restorer
78 * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48
79 * retaining only the SA_RESTORER definition as a reminder to avoid
80 * accidental reuse of the mask bit.
75 */ 81 */
76#define SA_ONSTACK 0x08000000 82#define SA_ONSTACK 0x08000000
77#define SA_RESETHAND 0x80000000 83#define SA_RESETHAND 0x80000000
@@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
84#define SA_NOMASK SA_NODEFER 90#define SA_NOMASK SA_NODEFER
85#define SA_ONESHOT SA_RESETHAND 91#define SA_ONESHOT SA_RESETHAND
86 92
87#define SA_RESTORER 0x04000000 /* Only for o32 */
88
89#define MINSIGSTKSZ 2048 93#define MINSIGSTKSZ 2048
90#define SIGSTKSZ 8192 94#define SIGSTKSZ 8192
91 95
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index f81d98f6184c..de75fb50562b 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
100obj-$(CONFIG_JUMP_LABEL) += jump_label.o 100obj-$(CONFIG_JUMP_LABEL) += jump_label.o
101 101
102# 102#
103# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe 103# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
104# to enable DSP assembler support here even if the MIPS Release 2 CPU we 104# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
105# are targetting does not support DSP because all code-paths making use of 105# here because the compiler may use DSP ASE instructions (such as lwx) in
106# it properly check that the running CPU *actually does* support these 106# code paths where we cannot check that the CPU we are running on supports it.
107# instructions. 107# Proper abstraction using HAVE_AS_DSP and macros is done in
108# arch/mips/include/asm/mipsregs.h.
108# 109#
109ifeq ($(CONFIG_CPU_MIPSR2), y) 110ifeq ($(CONFIG_CPU_MIPSR2), y)
110CFLAGS_DSP = -DHAVE_AS_DSP 111CFLAGS_DSP = -DHAVE_AS_DSP
111 112
112#
113# Check if assembler supports DSP ASE
114#
115ifeq ($(call cc-option-yn,-mdsp), y)
116CFLAGS_DSP += -mdsp
117endif
118
119#
120# Check if assembler supports DSP ASE Rev2
121#
122ifeq ($(call cc-option-yn,-mdspr2), y)
123CFLAGS_DSP += -mdspr2
124endif
125
126CFLAGS_signal.o = $(CFLAGS_DSP) 113CFLAGS_signal.o = $(CFLAGS_DSP)
127CFLAGS_signal32.o = $(CFLAGS_DSP) 114CFLAGS_signal32.o = $(CFLAGS_DSP)
128CFLAGS_process.o = $(CFLAGS_DSP) 115CFLAGS_process.o = $(CFLAGS_DSP)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 6bfccc227a95..5fe66a0c3224 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
580 c->tlbsize = 48; 580 c->tlbsize = 48;
581 break; 581 break;
582 case PRID_IMP_VR41XX: 582 case PRID_IMP_VR41XX:
583 set_isa(c, MIPS_CPU_ISA_III);
584 c->options = R4K_OPTS;
585 c->tlbsize = 32;
583 switch (c->processor_id & 0xf0) { 586 switch (c->processor_id & 0xf0) {
584 case PRID_REV_VR4111: 587 case PRID_REV_VR4111:
585 c->cputype = CPU_VR4111; 588 c->cputype = CPU_VR4111;
@@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
604 __cpu_name[cpu] = "NEC VR4131"; 607 __cpu_name[cpu] = "NEC VR4131";
605 } else { 608 } else {
606 c->cputype = CPU_VR4133; 609 c->cputype = CPU_VR4133;
610 c->options |= MIPS_CPU_LLSC;
607 __cpu_name[cpu] = "NEC VR4133"; 611 __cpu_name[cpu] = "NEC VR4133";
608 } 612 }
609 break; 613 break;
@@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
613 __cpu_name[cpu] = "NEC Vr41xx"; 617 __cpu_name[cpu] = "NEC Vr41xx";
614 break; 618 break;
615 } 619 }
616 set_isa(c, MIPS_CPU_ISA_III);
617 c->options = R4K_OPTS;
618 c->tlbsize = 32;
619 break; 620 break;
620 case PRID_IMP_R4300: 621 case PRID_IMP_R4300:
621 c->cputype = CPU_R4300; 622 c->cputype = CPU_R4300;
@@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void)
1226 if (c->options & MIPS_CPU_FPU) { 1227 if (c->options & MIPS_CPU_FPU) {
1227 c->fpu_id = cpu_get_fpu_id(); 1228 c->fpu_id = cpu_get_fpu_id();
1228 1229
1229 if (c->isa_level == MIPS_CPU_ISA_M32R1 || 1230 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1230 c->isa_level == MIPS_CPU_ISA_M32R2 || 1231 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
1231 c->isa_level == MIPS_CPU_ISA_M64R1 ||
1232 c->isa_level == MIPS_CPU_ISA_M64R2) {
1233 if (c->fpu_id & MIPS_FPIR_3D) 1232 if (c->fpu_id & MIPS_FPIR_3D)
1234 c->ases |= MIPS_ASE_MIPS3D; 1233 c->ases |= MIPS_ASE_MIPS3D;
1235 } 1234 }
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
index 9e6440eaa455..505cb77d1280 100644
--- a/arch/mips/kernel/early_printk.c
+++ b/arch/mips/kernel/early_printk.c
@@ -7,7 +7,9 @@
7 * Copyright (C) 2007 MIPS Technologies, Inc. 7 * Copyright (C) 2007 MIPS Technologies, Inc.
8 * written by Ralf Baechle (ralf@linux-mips.org) 8 * written by Ralf Baechle (ralf@linux-mips.org)
9 */ 9 */
10#include <linux/kernel.h>
10#include <linux/console.h> 11#include <linux/console.h>
12#include <linux/printk.h>
11#include <linux/init.h> 13#include <linux/init.h>
12 14
13#include <asm/setup.h> 15#include <asm/setup.h>
@@ -24,20 +26,18 @@ static void early_console_write(struct console *con, const char *s, unsigned n)
24 } 26 }
25} 27}
26 28
27static struct console early_console = { 29static struct console early_console_prom = {
28 .name = "early", 30 .name = "early",
29 .write = early_console_write, 31 .write = early_console_write,
30 .flags = CON_PRINTBUFFER | CON_BOOT, 32 .flags = CON_PRINTBUFFER | CON_BOOT,
31 .index = -1 33 .index = -1
32}; 34};
33 35
34static int early_console_initialized __initdata;
35
36void __init setup_early_printk(void) 36void __init setup_early_printk(void)
37{ 37{
38 if (early_console_initialized) 38 if (early_console)
39 return; 39 return;
40 early_console_initialized = 1; 40 early_console = &early_console_prom;
41 41
42 register_console(&early_console); 42 register_console(&early_console_prom);
43} 43}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 8eeee1c860c0..db9655f08892 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
171 err = compat_sys_shmctl(first, second, compat_ptr(ptr)); 171 err = compat_sys_shmctl(first, second, compat_ptr(ptr));
172 break; 172 break;
173 default: 173 default:
174 err = -EINVAL; 174 err = -ENOSYS;
175 break; 175 break;
176 } 176 }
177 177
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 165867673357..33d067148e61 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -46,10 +46,9 @@
46 PTR_L a5, PT_R9(sp) 46 PTR_L a5, PT_R9(sp)
47 PTR_L a6, PT_R10(sp) 47 PTR_L a6, PT_R10(sp)
48 PTR_L a7, PT_R11(sp) 48 PTR_L a7, PT_R11(sp)
49#else
50 PTR_ADDIU sp, PT_SIZE
51#endif 49#endif
52.endm 50 PTR_ADDIU sp, PT_SIZE
51 .endm
53 52
54 .macro RETURN_BACK 53 .macro RETURN_BACK
55 jr ra 54 jr ra
@@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra)
68 .globl _mcount 67 .globl _mcount
69_mcount: 68_mcount:
70 b ftrace_stub 69 b ftrace_stub
71 addiu sp,sp,8 70#ifdef CONFIG_32BIT
71 addiu sp,sp,8
72#else
73 nop
74#endif
72 75
73 /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ 76 /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
74 lw t1, function_trace_stop 77 lw t1, function_trace_stop
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 135c4aadccbe..7a54f74b7818 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
67 if (cpu_has_mips_r) { 67 if (cpu_has_mips_r) {
68 seq_printf(m, "isa\t\t\t:"); 68 seq_printf(m, "isa\t\t\t:");
69 if (cpu_has_mips_1) 69 if (cpu_has_mips_1)
70 seq_printf(m, "%s", "mips1"); 70 seq_printf(m, "%s", " mips1");
71 if (cpu_has_mips_2) 71 if (cpu_has_mips_2)
72 seq_printf(m, "%s", " mips2"); 72 seq_printf(m, "%s", " mips2");
73 if (cpu_has_mips_3) 73 if (cpu_has_mips_3)
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 3be4405c2d14..cfc742d75b7f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -41,44 +41,26 @@
41#include <asm/inst.h> 41#include <asm/inst.h>
42#include <asm/stacktrace.h> 42#include <asm/stacktrace.h>
43 43
44/* 44#ifdef CONFIG_HOTPLUG_CPU
45 * The idle thread. There's no useful work to be done, so just try to conserve 45void arch_cpu_idle_dead(void)
46 * power and have a low exit latency (ie sit in a loop waiting for somebody to
47 * say that they'd like to reschedule)
48 */
49void __noreturn cpu_idle(void)
50{ 46{
51 int cpu; 47 /* What the heck is this check doing ? */
52 48 if (!cpu_isset(smp_processor_id(), cpu_callin_map))
53 /* CPU is going idle. */ 49 play_dead();
54 cpu = smp_processor_id(); 50}
51#endif
55 52
56 /* endless idle loop with no priority at all */ 53void arch_cpu_idle(void)
57 while (1) { 54{
58 tick_nohz_idle_enter();
59 rcu_idle_enter();
60 while (!need_resched() && cpu_online(cpu)) {
61#ifdef CONFIG_MIPS_MT_SMTC 55#ifdef CONFIG_MIPS_MT_SMTC
62 extern void smtc_idle_loop_hook(void); 56 extern void smtc_idle_loop_hook(void);
63 57
64 smtc_idle_loop_hook(); 58 smtc_idle_loop_hook();
65#endif 59#endif
66 60 if (cpu_wait)
67 if (cpu_wait) { 61 (*cpu_wait)();
68 /* Don't trace irqs off for idle */ 62 else
69 stop_critical_timings(); 63 local_irq_enable();
70 (*cpu_wait)();
71 start_critical_timings();
72 }
73 }
74#ifdef CONFIG_HOTPLUG_CPU
75 if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
76 play_dead();
77#endif
78 rcu_idle_exit();
79 tick_nohz_idle_exit();
80 schedule_preempt_disabled();
81 }
82} 64}
83 65
84asmlinkage void ret_from_fork(void); 66asmlinkage void ret_from_fork(void);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 66bf4e22d9b9..aee04af213c5 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -139,7 +139,7 @@ asmlinkage __cpuinit void start_secondary(void)
139 WARN_ON_ONCE(!irqs_disabled()); 139 WARN_ON_ONCE(!irqs_disabled());
140 mp_ops->smp_finish(); 140 mp_ops->smp_finish();
141 141
142 cpu_idle(); 142 cpu_startup_entry(CPUHP_ONLINE);
143} 143}
144 144
145/* 145/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index a200b5bdbb87..c3abb88170fc 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1571#ifdef CONFIG_64BIT 1571#ifdef CONFIG_64BIT
1572 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 1572 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1573#endif 1573#endif
1574 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) 1574 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
1575 status_set |= ST0_XX; 1575 status_set |= ST0_XX;
1576 if (cpu_has_dsp) 1576 if (cpu_has_dsp)
1577 status_set |= ST0_MX; 1577 status_set |= ST0_MX;
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 81f1dcfdcab8..a64daee740ee 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr,
90 unsigned bit = nr & SZLONG_MASK; 90 unsigned bit = nr & SZLONG_MASK;
91 unsigned long mask; 91 unsigned long mask;
92 unsigned long flags; 92 unsigned long flags;
93 unsigned long res; 93 int res;
94 94
95 a += nr >> SZLONG_LOG; 95 a += nr >> SZLONG_LOG;
96 mask = 1UL << bit; 96 mask = 1UL << bit;
97 raw_local_irq_save(flags); 97 raw_local_irq_save(flags);
98 res = (mask & *a); 98 res = (mask & *a) != 0;
99 *a |= mask; 99 *a |= mask;
100 raw_local_irq_restore(flags); 100 raw_local_irq_restore(flags);
101 return res; 101 return res;
@@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr,
116 unsigned bit = nr & SZLONG_MASK; 116 unsigned bit = nr & SZLONG_MASK;
117 unsigned long mask; 117 unsigned long mask;
118 unsigned long flags; 118 unsigned long flags;
119 unsigned long res; 119 int res;
120 120
121 a += nr >> SZLONG_LOG; 121 a += nr >> SZLONG_LOG;
122 mask = 1UL << bit; 122 mask = 1UL << bit;
123 raw_local_irq_save(flags); 123 raw_local_irq_save(flags);
124 res = (mask & *a); 124 res = (mask & *a) != 0;
125 *a |= mask; 125 *a |= mask;
126 raw_local_irq_restore(flags); 126 raw_local_irq_restore(flags);
127 return res; 127 return res;
@@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
141 unsigned bit = nr & SZLONG_MASK; 141 unsigned bit = nr & SZLONG_MASK;
142 unsigned long mask; 142 unsigned long mask;
143 unsigned long flags; 143 unsigned long flags;
144 unsigned long res; 144 int res;
145 145
146 a += nr >> SZLONG_LOG; 146 a += nr >> SZLONG_LOG;
147 mask = 1UL << bit; 147 mask = 1UL << bit;
148 raw_local_irq_save(flags); 148 raw_local_irq_save(flags);
149 res = (mask & *a); 149 res = (mask & *a) != 0;
150 *a &= ~mask; 150 *a &= ~mask;
151 raw_local_irq_restore(flags); 151 raw_local_irq_restore(flags);
152 return res; 152 return res;
@@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
166 unsigned bit = nr & SZLONG_MASK; 166 unsigned bit = nr & SZLONG_MASK;
167 unsigned long mask; 167 unsigned long mask;
168 unsigned long flags; 168 unsigned long flags;
169 unsigned long res; 169 int res;
170 170
171 a += nr >> SZLONG_LOG; 171 a += nr >> SZLONG_LOG;
172 mask = 1UL << bit; 172 mask = 1UL << bit;
173 raw_local_irq_save(flags); 173 raw_local_irq_save(flags);
174 res = (mask & *a); 174 res = (mask & *a) != 0;
175 *a ^= mask; 175 *a ^= mask;
176 raw_local_irq_restore(flags); 176 raw_local_irq_restore(flags);
177 return res; 177 return res;
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 507147aebd41..a6adffbb4e5f 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -270,7 +270,7 @@ LEAF(csum_partial)
270#endif 270#endif
271 271
272 /* odd buffer alignment? */ 272 /* odd buffer alignment? */
273#ifdef CPU_MIPSR2 273#ifdef CONFIG_CPU_MIPSR2
274 wsbh v1, sum 274 wsbh v1, sum
275 movn sum, v1, t7 275 movn sum, v1, t7
276#else 276#else
@@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
670 addu sum, v1 670 addu sum, v1
671#endif 671#endif
672 672
673#ifdef CPU_MIPSR2 673#ifdef CONFIG_CPU_MIPSR2
674 wsbh v1, sum 674 wsbh v1, sum
675 movn sum, v1, odd 675 movn sum, v1, odd
676#else 676#else
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index ecca559b8d7b..2078915eacb9 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void)
1247 return; 1247 return;
1248 1248
1249 default: 1249 default:
1250 if (c->isa_level == MIPS_CPU_ISA_M32R1 || 1250 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1251 c->isa_level == MIPS_CPU_ISA_M32R2 || 1251 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
1252 c->isa_level == MIPS_CPU_ISA_M64R1 ||
1253 c->isa_level == MIPS_CPU_ISA_M64R2) {
1254#ifdef CONFIG_MIPS_CPU_SCACHE 1252#ifdef CONFIG_MIPS_CPU_SCACHE
1255 if (mips_sc_init ()) { 1253 if (mips_sc_init ()) {
1256 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; 1254 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 67929251286c..3d0346dbccf4 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -77,10 +77,9 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
77/* 77/*
78 * Not static inline because used by IP27 special magic initialization code 78 * Not static inline because used by IP27 special magic initialization code
79 */ 79 */
80unsigned long setup_zero_pages(void) 80void setup_zero_pages(void)
81{ 81{
82 unsigned int order; 82 unsigned int order, i;
83 unsigned long size;
84 struct page *page; 83 struct page *page;
85 84
86 if (cpu_has_vce) 85 if (cpu_has_vce)
@@ -94,15 +93,10 @@ unsigned long setup_zero_pages(void)
94 93
95 page = virt_to_page((void *)empty_zero_page); 94 page = virt_to_page((void *)empty_zero_page);
96 split_page(page, order); 95 split_page(page, order);
97 while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { 96 for (i = 0; i < (1 << order); i++, page++)
98 SetPageReserved(page); 97 mark_page_reserved(page);
99 page++;
100 }
101
102 size = PAGE_SIZE << order;
103 zero_page_mask = (size - 1) & PAGE_MASK;
104 98
105 return 1UL << order; 99 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
106} 100}
107 101
108#ifdef CONFIG_MIPS_MT_SMTC 102#ifdef CONFIG_MIPS_MT_SMTC
@@ -380,7 +374,7 @@ void __init mem_init(void)
380 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 374 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
381 375
382 totalram_pages += free_all_bootmem(); 376 totalram_pages += free_all_bootmem();
383 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 377 setup_zero_pages(); /* Setup zeroed pages. */
384 378
385 reservedpages = ram = 0; 379 reservedpages = ram = 0;
386 for (tmp = 0; tmp < max_low_pfn; tmp++) 380 for (tmp = 0; tmp < max_low_pfn; tmp++)
@@ -399,12 +393,8 @@ void __init mem_init(void)
399 SetPageReserved(page); 393 SetPageReserved(page);
400 continue; 394 continue;
401 } 395 }
402 ClearPageReserved(page); 396 free_highmem_page(page);
403 init_page_count(page);
404 __free_page(page);
405 totalhigh_pages++;
406 } 397 }
407 totalram_pages += totalhigh_pages;
408 num_physpages += totalhigh_pages; 398 num_physpages += totalhigh_pages;
409#endif 399#endif
410 400
@@ -440,11 +430,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
440 struct page *page = pfn_to_page(pfn); 430 struct page *page = pfn_to_page(pfn);
441 void *addr = phys_to_virt(PFN_PHYS(pfn)); 431 void *addr = phys_to_virt(PFN_PHYS(pfn));
442 432
443 ClearPageReserved(page);
444 init_page_count(page);
445 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); 433 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
446 __free_page(page); 434 free_reserved_page(page);
447 totalram_pages++;
448 } 435 }
449 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 436 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
450} 437}
@@ -452,18 +439,14 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
452#ifdef CONFIG_BLK_DEV_INITRD 439#ifdef CONFIG_BLK_DEV_INITRD
453void free_initrd_mem(unsigned long start, unsigned long end) 440void free_initrd_mem(unsigned long start, unsigned long end)
454{ 441{
455 free_init_pages("initrd memory", 442 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
456 virt_to_phys((void *)start),
457 virt_to_phys((void *)end));
458} 443}
459#endif 444#endif
460 445
461void __init_refok free_initmem(void) 446void __init_refok free_initmem(void)
462{ 447{
463 prom_free_prom_memory(); 448 prom_free_prom_memory();
464 free_init_pages("unused kernel memory", 449 free_initmem_default(POISON_FREE_INITMEM);
465 __pa_symbol(&__init_begin),
466 __pa_symbol(&__init_end));
467} 450}
468 451
469#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 452#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 93d937b4b1ba..df96da7e939b 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void)
98 c->scache.flags |= MIPS_CACHE_NOT_PRESENT; 98 c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
99 99
100 /* Ignore anything but MIPSxx processors */ 100 /* Ignore anything but MIPSxx processors */
101 if (c->isa_level != MIPS_CPU_ISA_M32R1 && 101 if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
102 c->isa_level != MIPS_CPU_ISA_M32R2 && 102 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
103 c->isa_level != MIPS_CPU_ISA_M64R1 &&
104 c->isa_level != MIPS_CPU_ISA_M64R2)
105 return 0; 103 return 0;
106 104
107 /* Does this MIPS32/MIPS64 CPU have a config2 register? */ 105 /* Does this MIPS32/MIPS64 CPU have a config2 register? */
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index 38a80c83fd67..d1faece21b6a 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -19,7 +19,7 @@
19#include <asm/mach-au1x00/au1000.h> 19#include <asm/mach-au1x00/au1000.h>
20#include <asm/tlbmisc.h> 20#include <asm/tlbmisc.h>
21 21
22#ifdef CONFIG_DEBUG_PCI 22#ifdef CONFIG_PCI_DEBUG
23#define DBG(x...) printk(KERN_DEBUG x) 23#define DBG(x...) printk(KERN_DEBUG x)
24#else 24#else
25#define DBG(x...) do {} while (0) 25#define DBG(x...) do {} while (0)
@@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus,
162 if (status & (1 << 29)) { 162 if (status & (1 << 29)) {
163 *data = 0xffffffff; 163 *data = 0xffffffff;
164 error = -1; 164 error = -1;
165 DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", 165 DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
166 access_type, bus->number, device); 166 access_type, bus->number, device);
167 } else if ((status >> 28) & 0xf) { 167 } else if ((status >> 28) & 0xf) {
168 DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", 168 DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 0872f12f268d..594e60d6a43b 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -115,7 +115,6 @@ static void pcibios_scanbus(struct pci_controller *hose)
115 pci_bus_assign_resources(bus); 115 pci_bus_assign_resources(bus);
116 pci_enable_bridges(bus); 116 pci_enable_bridges(bus);
117 } 117 }
118 bus->dev.of_node = hose->of_node;
119 } 118 }
120} 119}
121 120
@@ -169,6 +168,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
169 } 168 }
170 } 169 }
171} 170}
171
172struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
173{
174 struct pci_controller *hose = bus->sysdata;
175
176 return of_node_get(hose->of_node);
177}
172#endif 178#endif
173 179
174static DEFINE_MUTEX(pci_scan_mutex); 180static DEFINE_MUTEX(pci_scan_mutex);
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 3505d08ff2fd..5f2bddb1860e 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -457,7 +457,7 @@ void __init prom_free_prom_memory(void)
457 /* We got nothing to free here ... */ 457 /* We got nothing to free here ... */
458} 458}
459 459
460extern unsigned long setup_zero_pages(void); 460extern void setup_zero_pages(void);
461 461
462void __init paging_init(void) 462void __init paging_init(void)
463{ 463{
@@ -492,7 +492,7 @@ void __init mem_init(void)
492 totalram_pages += free_all_bootmem_node(NODE_DATA(node)); 492 totalram_pages += free_all_bootmem_node(NODE_DATA(node));
493 } 493 }
494 494
495 totalram_pages -= setup_zero_pages(); /* This comes from node 0 */ 495 setup_zero_pages(); /* This comes from node 0 */
496 496
497 codesize = (unsigned long) &_etext - (unsigned long) &_text; 497 codesize = (unsigned long) &_etext - (unsigned long) &_text;
498 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 498 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index b06c7360b1c6..428da175d073 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,7 +8,7 @@ config MN10300
8 select HAVE_ARCH_KGDB 8 select HAVE_ARCH_KGDB
9 select GENERIC_ATOMIC64 9 select GENERIC_ATOMIC64
10 select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER 10 select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
11 select HAVE_VIRT_TO_BUS 11 select VIRT_TO_BUS
12 select GENERIC_CLOCKEVENTS 12 select GENERIC_CLOCKEVENTS
13 select MODULES_USE_ELF_RELA 13 select MODULES_USE_ELF_RELA
14 select OLD_SIGSUSPEND3 14 select OLD_SIGSUSPEND3
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index f90062b0622d..224b4262486d 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -165,8 +165,6 @@ void arch_release_thread_info(struct thread_info *ti);
165#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 165#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
166#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 166#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
167 167
168#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
169
170#endif /* __KERNEL__ */ 168#endif /* __KERNEL__ */
171 169
172#endif /* _ASM_THREAD_INFO_H */ 170#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index 84f4e97e3074..2da39fb8b3b2 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -50,77 +50,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
50void (*pm_power_off)(void); 50void (*pm_power_off)(void);
51EXPORT_SYMBOL(pm_power_off); 51EXPORT_SYMBOL(pm_power_off);
52 52
53#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
54/*
55 * we use this if we don't have any better idle routine
56 */
57static void default_idle(void)
58{
59 local_irq_disable();
60 if (!need_resched())
61 safe_halt();
62 else
63 local_irq_enable();
64}
65
66#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
67/* 53/*
68 * On SMP it's slightly faster (but much more power-consuming!) 54 * On SMP it's slightly faster (but much more power-consuming!)
69 * to poll the ->work.need_resched flag instead of waiting for the 55 * to poll the ->work.need_resched flag instead of waiting for the
70 * cross-CPU IPI to arrive. Use this option with caution. 56 * cross-CPU IPI to arrive. Use this option with caution.
57 *
58 * tglx: No idea why this depends on HOTPLUG_CPU !?!
71 */ 59 */
72static inline void poll_idle(void) 60#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
73{ 61void arch_cpu_idle(void)
74 int oldval;
75
76 local_irq_enable();
77
78 /*
79 * Deal with another CPU just having chosen a thread to
80 * run here:
81 */
82 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
83
84 if (!oldval) {
85 set_thread_flag(TIF_POLLING_NRFLAG);
86 while (!need_resched())
87 cpu_relax();
88 clear_thread_flag(TIF_POLLING_NRFLAG);
89 } else {
90 set_need_resched();
91 }
92}
93#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
94
95/*
96 * the idle thread
97 * - there's no useful work to be done, so just try to conserve power and have
98 * a low exit latency (ie sit in a loop waiting for somebody to say that
99 * they'd like to reschedule)
100 */
101void cpu_idle(void)
102{ 62{
103 /* endless idle loop with no priority at all */ 63 safe_halt();
104 for (;;) {
105 rcu_idle_enter();
106 while (!need_resched()) {
107 void (*idle)(void);
108
109 smp_rmb();
110 if (!idle) {
111#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
112 idle = poll_idle;
113#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
114 idle = default_idle;
115#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
116 }
117 idle();
118 }
119 rcu_idle_exit();
120
121 schedule_preempt_disabled();
122 }
123} 64}
65#endif
124 66
125void release_segments(struct mm_struct *mm) 67void release_segments(struct mm_struct *mm)
126{ 68{
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 5d7e152a23b7..a17f9c9c14c9 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -675,7 +675,7 @@ int __init start_secondary(void *unused)
675#ifdef CONFIG_GENERIC_CLOCKEVENTS 675#ifdef CONFIG_GENERIC_CLOCKEVENTS
676 init_clockevents(); 676 init_clockevents();
677#endif 677#endif
678 cpu_idle(); 678 cpu_startup_entry(CPUHP_ONLINE);
679 return 0; 679 return 0;
680} 680}
681 681
@@ -935,8 +935,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
935 int timeout; 935 int timeout;
936 936
937#ifdef CONFIG_HOTPLUG_CPU 937#ifdef CONFIG_HOTPLUG_CPU
938 if (num_online_cpus() == 1)
939 disable_hlt();
940 if (sleep_mode[cpu]) 938 if (sleep_mode[cpu])
941 run_wakeup_cpu(cpu); 939 run_wakeup_cpu(cpu);
942#endif /* CONFIG_HOTPLUG_CPU */ 940#endif /* CONFIG_HOTPLUG_CPU */
@@ -1003,9 +1001,6 @@ int __cpu_disable(void)
1003void __cpu_die(unsigned int cpu) 1001void __cpu_die(unsigned int cpu)
1004{ 1002{
1005 run_sleep_cpu(cpu); 1003 run_sleep_cpu(cpu);
1006
1007 if (num_online_cpus() == 1)
1008 enable_hlt();
1009} 1004}
1010 1005
1011#ifdef CONFIG_MN10300_CACHE_ENABLED 1006#ifdef CONFIG_MN10300_CACHE_ENABLED
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index e57e5bc23562..5a8ace63a6b4 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -139,30 +139,11 @@ void __init mem_init(void)
139} 139}
140 140
141/* 141/*
142 *
143 */
144void free_init_pages(char *what, unsigned long begin, unsigned long end)
145{
146 unsigned long addr;
147
148 for (addr = begin; addr < end; addr += PAGE_SIZE) {
149 ClearPageReserved(virt_to_page(addr));
150 init_page_count(virt_to_page(addr));
151 memset((void *) addr, 0xcc, PAGE_SIZE);
152 free_page(addr);
153 totalram_pages++;
154 }
155 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
156}
157
158/*
159 * recycle memory containing stuff only required for initialisation 142 * recycle memory containing stuff only required for initialisation
160 */ 143 */
161void free_initmem(void) 144void free_initmem(void)
162{ 145{
163 free_init_pages("unused kernel memory", 146 free_initmem_default(POISON_FREE_INITMEM);
164 (unsigned long) &__init_begin,
165 (unsigned long) &__init_end);
166} 147}
167 148
168/* 149/*
@@ -171,6 +152,6 @@ void free_initmem(void)
171#ifdef CONFIG_BLK_DEV_INITRD 152#ifdef CONFIG_BLK_DEV_INITRD
172void free_initrd_mem(unsigned long start, unsigned long end) 153void free_initrd_mem(unsigned long start, unsigned long end)
173{ 154{
174 free_init_pages("initrd memory", start, end); 155 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
175} 156}
176#endif 157#endif
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 014a6482ed4c..9ab3bf2eca8d 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -9,10 +9,9 @@ config OPENRISC
9 select OF_EARLY_FLATTREE 9 select OF_EARLY_FLATTREE
10 select IRQ_DOMAIN 10 select IRQ_DOMAIN
11 select HAVE_MEMBLOCK 11 select HAVE_MEMBLOCK
12 select ARCH_WANT_OPTIONAL_GPIOLIB 12 select ARCH_REQUIRE_GPIOLIB
13 select HAVE_ARCH_TRACEHOOK 13 select HAVE_ARCH_TRACEHOOK
14 select HAVE_GENERIC_HARDIRQS 14 select HAVE_GENERIC_HARDIRQS
15 select HAVE_VIRT_TO_BUS
16 select GENERIC_IRQ_CHIP 15 select GENERIC_IRQ_CHIP
17 select GENERIC_IRQ_PROBE 16 select GENERIC_IRQ_PROBE
18 select GENERIC_IRQ_SHOW 17 select GENERIC_IRQ_SHOW
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
index 07f3212422ad..d797acc901e4 100644
--- a/arch/openrisc/include/asm/thread_info.h
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -128,8 +128,6 @@ register struct thread_info *current_thread_info_reg asm("r10");
128/* For OpenRISC, this is anything in the LSW other than syscall trace */ 128/* For OpenRISC, this is anything in the LSW other than syscall trace */
129#define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP)) 129#define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP))
130 130
131#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
132
133#endif /* __KERNEL__ */ 131#endif /* __KERNEL__ */
134 132
135#endif /* _ASM_THREAD_INFO_H */ 133#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile
index 35f92ce51c24..ec6d9d37cefd 100644
--- a/arch/openrisc/kernel/Makefile
+++ b/arch/openrisc/kernel/Makefile
@@ -4,7 +4,7 @@
4 4
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6 6
7obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \ 7obj-y := setup.o or32_ksyms.o process.o dma.o \
8 traps.o time.o irq.o entry.o ptrace.o signal.o \ 8 traps.o time.o irq.o entry.o ptrace.o signal.o \
9 sys_call_table.o 9 sys_call_table.o
10 10
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
deleted file mode 100644
index 5e8a3b6d6bc6..000000000000
--- a/arch/openrisc/kernel/idle.c
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * OpenRISC idle.c
3 *
4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
6 * declaration.
7 *
8 * Modifications for the OpenRISC architecture:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * Idle daemon for or32. Idle daemon will handle any action
18 * that needs to be taken when the system becomes idle.
19 */
20
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/stddef.h>
27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/tick.h>
31
32#include <asm/pgtable.h>
33#include <asm/uaccess.h>
34#include <asm/io.h>
35#include <asm/processor.h>
36#include <asm/mmu.h>
37#include <asm/cache.h>
38#include <asm/pgalloc.h>
39
40void (*powersave) (void) = NULL;
41
42void cpu_idle(void)
43{
44 set_thread_flag(TIF_POLLING_NRFLAG);
45
46 /* endless idle loop with no priority at all */
47 while (1) {
48 tick_nohz_idle_enter();
49 rcu_idle_enter();
50
51 while (!need_resched()) {
52 check_pgt_cache();
53 rmb();
54
55 clear_thread_flag(TIF_POLLING_NRFLAG);
56
57 local_irq_disable();
58 /* Don't trace irqs off for idle */
59 stop_critical_timings();
60 if (!need_resched() && powersave != NULL)
61 powersave();
62 start_critical_timings();
63 local_irq_enable();
64 set_thread_flag(TIF_POLLING_NRFLAG);
65 }
66
67 rcu_idle_exit();
68 tick_nohz_idle_exit();
69 preempt_enable_no_resched();
70 schedule();
71 preempt_disable();
72 }
73}
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index e7fdc50c4bf0..b3cbc6703837 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -43,6 +43,7 @@
43#include <asm/kmap_types.h> 43#include <asm/kmap_types.h>
44#include <asm/fixmap.h> 44#include <asm/fixmap.h>
45#include <asm/tlbflush.h> 45#include <asm/tlbflush.h>
46#include <asm/sections.h>
46 47
47int mem_init_done; 48int mem_init_done;
48 49
@@ -201,9 +202,6 @@ void __init paging_init(void)
201 202
202/* References to section boundaries */ 203/* References to section boundaries */
203 204
204extern char _stext, _etext, _edata, __bss_start, _end;
205extern char __init_begin, __init_end;
206
207static int __init free_pages_init(void) 205static int __init free_pages_init(void)
208{ 206{
209 int reservedpages, pfn; 207 int reservedpages, pfn;
@@ -263,30 +261,11 @@ void __init mem_init(void)
263#ifdef CONFIG_BLK_DEV_INITRD 261#ifdef CONFIG_BLK_DEV_INITRD
264void free_initrd_mem(unsigned long start, unsigned long end) 262void free_initrd_mem(unsigned long start, unsigned long end)
265{ 263{
266 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", 264 free_reserved_area(start, end, 0, "initrd");
267 (end - start) >> 10);
268
269 for (; start < end; start += PAGE_SIZE) {
270 ClearPageReserved(virt_to_page(start));
271 init_page_count(virt_to_page(start));
272 free_page(start);
273 totalram_pages++;
274 }
275} 265}
276#endif 266#endif
277 267
278void free_initmem(void) 268void free_initmem(void)
279{ 269{
280 unsigned long addr; 270 free_initmem_default(0);
281
282 addr = (unsigned long)(&__init_begin);
283 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
284 ClearPageReserved(virt_to_page(addr));
285 init_page_count(virt_to_page(addr));
286 free_page(addr);
287 totalram_pages++;
288 }
289 printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
290 ((unsigned long)&__init_end -
291 (unsigned long)&__init_begin) >> 10);
292} 271}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index a9ff712a2864..0339181bf3ac 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -21,7 +21,7 @@ config PARISC
21 select GENERIC_STRNCPY_FROM_USER 21 select GENERIC_STRNCPY_FROM_USER
22 select SYSCTL_ARCH_UNALIGN_ALLOW 22 select SYSCTL_ARCH_UNALIGN_ALLOW
23 select HAVE_MOD_ARCH_SPECIFIC 23 select HAVE_MOD_ARCH_SPECIFIC
24 select HAVE_VIRT_TO_BUS 24 select VIRT_TO_BUS
25 select MODULES_USE_ELF_RELA 25 select MODULES_USE_ELF_RELA
26 select CLONE_BACKWARDS 26 select CLONE_BACKWARDS
27 select TTY # Needed for pdc_cons.c 27 select TTY # Needed for pdc_cons.c
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 01d95e2f0581..113e28206503 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -65,8 +65,10 @@ ifndef CONFIG_FUNCTION_TRACER
65endif 65endif
66 66
67# Use long jumps instead of long branches (needed if your linker fails to 67# Use long jumps instead of long branches (needed if your linker fails to
68# link a too big vmlinux executable) 68# link a too big vmlinux executable). Not enabled for building modules.
69cflags-$(CONFIG_MLONGCALLS) += -mlong-calls 69ifdef CONFIG_MLONGCALLS
70KBUILD_CFLAGS_KERNEL += -mlong-calls
71endif
70 72
71# select which processor to optimise for 73# select which processor to optimise for
72cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100 74cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 79f694f3ad9b..f0e2784e7cca 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -140,7 +140,10 @@ static inline void *kmap(struct page *page)
140 return page_address(page); 140 return page_address(page);
141} 141}
142 142
143#define kunmap(page) kunmap_parisc(page_address(page)) 143static inline void kunmap(struct page *page)
144{
145 kunmap_parisc(page_address(page));
146}
144 147
145static inline void *kmap_atomic(struct page *page) 148static inline void *kmap_atomic(struct page *page)
146{ 149{
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 7df49fad29f9..1e40d7f86be3 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -16,6 +16,8 @@
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/cache.h> 17#include <asm/cache.h>
18 18
19extern spinlock_t pa_dbit_lock;
20
19/* 21/*
20 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel 22 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
21 * memory. For the return value to be meaningful, ADDR must be >= 23 * memory. For the return value to be meaningful, ADDR must be >=
@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
44 46
45#define set_pte_at(mm, addr, ptep, pteval) \ 47#define set_pte_at(mm, addr, ptep, pteval) \
46 do { \ 48 do { \
49 unsigned long flags; \
50 spin_lock_irqsave(&pa_dbit_lock, flags); \
47 set_pte(ptep, pteval); \ 51 set_pte(ptep, pteval); \
48 purge_tlb_entries(mm, addr); \ 52 purge_tlb_entries(mm, addr); \
53 spin_unlock_irqrestore(&pa_dbit_lock, flags); \
49 } while (0) 54 } while (0)
50 55
51#endif /* !__ASSEMBLY__ */ 56#endif /* !__ASSEMBLY__ */
@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
435 440
436static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 441static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
437{ 442{
438#ifdef CONFIG_SMP 443 pte_t pte;
444 unsigned long flags;
445
439 if (!pte_young(*ptep)) 446 if (!pte_young(*ptep))
440 return 0; 447 return 0;
441 return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); 448
442#else 449 spin_lock_irqsave(&pa_dbit_lock, flags);
443 pte_t pte = *ptep; 450 pte = *ptep;
444 if (!pte_young(pte)) 451 if (!pte_young(pte)) {
452 spin_unlock_irqrestore(&pa_dbit_lock, flags);
445 return 0; 453 return 0;
446 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); 454 }
455 set_pte(ptep, pte_mkold(pte));
456 purge_tlb_entries(vma->vm_mm, addr);
457 spin_unlock_irqrestore(&pa_dbit_lock, flags);
447 return 1; 458 return 1;
448#endif
449} 459}
450 460
451extern spinlock_t pa_dbit_lock;
452
453struct mm_struct; 461struct mm_struct;
454static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 462static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
455{ 463{
456 pte_t old_pte; 464 pte_t old_pte;
465 unsigned long flags;
457 466
458 spin_lock(&pa_dbit_lock); 467 spin_lock_irqsave(&pa_dbit_lock, flags);
459 old_pte = *ptep; 468 old_pte = *ptep;
460 pte_clear(mm,addr,ptep); 469 pte_clear(mm,addr,ptep);
461 spin_unlock(&pa_dbit_lock); 470 purge_tlb_entries(mm, addr);
471 spin_unlock_irqrestore(&pa_dbit_lock, flags);
462 472
463 return old_pte; 473 return old_pte;
464} 474}
465 475
466static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 476static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
467{ 477{
468#ifdef CONFIG_SMP 478 unsigned long flags;
469 unsigned long new, old; 479 spin_lock_irqsave(&pa_dbit_lock, flags);
470 480 set_pte(ptep, pte_wrprotect(*ptep));
471 do {
472 old = pte_val(*ptep);
473 new = pte_val(pte_wrprotect(__pte (old)));
474 } while (cmpxchg((unsigned long *) ptep, old, new) != old);
475 purge_tlb_entries(mm, addr); 481 purge_tlb_entries(mm, addr);
476#else 482 spin_unlock_irqrestore(&pa_dbit_lock, flags);
477 pte_t old_pte = *ptep;
478 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
479#endif
480} 483}
481 484
482#define pte_same(A,B) (pte_val(A) == pte_val(B)) 485#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index d1fb79a36f3d..6182832e5b6c 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -77,8 +77,6 @@ struct thread_info {
77#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ 77#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
78 _TIF_BLOCKSTEP) 78 _TIF_BLOCKSTEP)
79 79
80#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
81
82#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
83 81
84#endif /* _ASM_PARISC_THREAD_INFO_H */ 82#endif /* _ASM_PARISC_THREAD_INFO_H */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4ba2c93770f1..e0a82358517e 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -181,30 +181,24 @@ struct exception_data {
181#if !defined(CONFIG_64BIT) 181#if !defined(CONFIG_64BIT)
182 182
183#define __put_kernel_asm64(__val,ptr) do { \ 183#define __put_kernel_asm64(__val,ptr) do { \
184 u64 __val64 = (u64)(__val); \
185 u32 hi = (__val64) >> 32; \
186 u32 lo = (__val64) & 0xffffffff; \
187 __asm__ __volatile__ ( \ 184 __asm__ __volatile__ ( \
188 "\n1:\tstw %2,0(%1)" \ 185 "\n1:\tstw %2,0(%1)" \
189 "\n2:\tstw %3,4(%1)\n\t" \ 186 "\n2:\tstw %R2,4(%1)\n\t" \
190 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ 187 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
191 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ 188 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
192 : "=r"(__pu_err) \ 189 : "=r"(__pu_err) \
193 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ 190 : "r"(ptr), "r"(__val), "0"(__pu_err) \
194 : "r1"); \ 191 : "r1"); \
195} while (0) 192} while (0)
196 193
197#define __put_user_asm64(__val,ptr) do { \ 194#define __put_user_asm64(__val,ptr) do { \
198 u64 __val64 = (u64)(__val); \
199 u32 hi = (__val64) >> 32; \
200 u32 lo = (__val64) & 0xffffffff; \
201 __asm__ __volatile__ ( \ 195 __asm__ __volatile__ ( \
202 "\n1:\tstw %2,0(%%sr3,%1)" \ 196 "\n1:\tstw %2,0(%%sr3,%1)" \
203 "\n2:\tstw %3,4(%%sr3,%1)\n\t" \ 197 "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
204 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ 198 ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
205 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ 199 ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
206 : "=r"(__pu_err) \ 200 : "=r"(__pu_err) \
207 : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ 201 : "r"(ptr), "r"(__val), "0"(__pu_err) \
208 : "r1"); \ 202 : "r1"); \
209} while (0) 203} while (0)
210 204
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 4b12890642eb..83ded26cad06 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
421 /* Note: purge_tlb_entries can be called at startup with 421 /* Note: purge_tlb_entries can be called at startup with
422 no context. */ 422 no context. */
423 423
424 /* Disable preemption while we play with %sr1. */
425 preempt_disable();
426 mtsp(mm->context, 1);
427 purge_tlb_start(flags); 424 purge_tlb_start(flags);
425 mtsp(mm->context, 1);
428 pdtlb(addr); 426 pdtlb(addr);
429 pitlb(addr); 427 pitlb(addr);
430 purge_tlb_end(flags); 428 purge_tlb_end(flags);
431 preempt_enable();
432} 429}
433EXPORT_SYMBOL(purge_tlb_entries); 430EXPORT_SYMBOL(purge_tlb_entries);
434 431
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 6795dc6c995f..568b2c61ea02 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -120,11 +120,13 @@ extern void __ashrdi3(void);
120extern void __ashldi3(void); 120extern void __ashldi3(void);
121extern void __lshrdi3(void); 121extern void __lshrdi3(void);
122extern void __muldi3(void); 122extern void __muldi3(void);
123extern void __ucmpdi2(void);
123 124
124EXPORT_SYMBOL(__ashrdi3); 125EXPORT_SYMBOL(__ashrdi3);
125EXPORT_SYMBOL(__ashldi3); 126EXPORT_SYMBOL(__ashldi3);
126EXPORT_SYMBOL(__lshrdi3); 127EXPORT_SYMBOL(__lshrdi3);
127EXPORT_SYMBOL(__muldi3); 128EXPORT_SYMBOL(__muldi3);
129EXPORT_SYMBOL(__ucmpdi2);
128 130
129asmlinkage void * __canonicalize_funcptr_for_compare(void *); 131asmlinkage void * __canonicalize_funcptr_for_compare(void *);
130EXPORT_SYMBOL(__canonicalize_funcptr_for_compare); 132EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index d13507246c5d..55f92b614182 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -59,28 +59,6 @@
59#include <asm/unwind.h> 59#include <asm/unwind.h>
60#include <asm/sections.h> 60#include <asm/sections.h>
61 61
62/*
63 * The idle thread. There's no useful work to be
64 * done, so just try to conserve power and have a
65 * low exit latency (ie sit in a loop waiting for
66 * somebody to say that they'd like to reschedule)
67 */
68void cpu_idle(void)
69{
70 set_thread_flag(TIF_POLLING_NRFLAG);
71
72 /* endless idle loop with no priority at all */
73 while (1) {
74 rcu_idle_enter();
75 while (!need_resched())
76 barrier();
77 rcu_idle_exit();
78 schedule_preempt_disabled();
79 check_pgt_cache();
80 }
81}
82
83
84#define COMMAND_GLOBAL F_EXTEND(0xfffe0030) 62#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
85#define CMD_RESET 5 /* reset any module */ 63#define CMD_RESET 5 /* reset any module */
86 64
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 6266730efd61..fd1bb1519c2b 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -329,7 +329,7 @@ void __init smp_callin(void)
329 329
330 local_irq_enable(); /* Interrupts have been off until now */ 330 local_irq_enable(); /* Interrupts have been off until now */
331 331
332 cpu_idle(); /* Wait for timer to schedule some work */ 332 cpu_startup_entry(CPUHP_ONLINE);
333 333
334 /* NOTREACHED */ 334 /* NOTREACHED */
335 panic("smp_callin() AAAAaaaaahhhh....\n"); 335 panic("smp_callin() AAAAaaaaahhhh....\n");
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 5f2e6904d14a..5651536ac733 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,6 +2,7 @@
2# Makefile for parisc-specific library files 2# Makefile for parisc-specific library files
3# 3#
4 4
5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o 5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
6 ucmpdi2.o
6 7
7obj-y := iomap.o 8obj-y := iomap.o
diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c
new file mode 100644
index 000000000000..149c016f32c5
--- /dev/null
+++ b/arch/parisc/lib/ucmpdi2.c
@@ -0,0 +1,25 @@
1#include <linux/module.h>
2
3union ull_union {
4 unsigned long long ull;
5 struct {
6 unsigned int high;
7 unsigned int low;
8 } ui;
9};
10
11int __ucmpdi2(unsigned long long a, unsigned long long b)
12{
13 union ull_union au = {.ull = a};
14 union ull_union bu = {.ull = b};
15
16 if (au.ui.high < bu.ui.high)
17 return 0;
18 else if (au.ui.high > bu.ui.high)
19 return 2;
20 if (au.ui.low < bu.ui.low)
21 return 0;
22 else if (au.ui.low > bu.ui.low)
23 return 2;
24 return 1;
25}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3ac462de53a4..157b931e7b09 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -505,7 +505,6 @@ static void __init map_pages(unsigned long start_vaddr,
505 505
506void free_initmem(void) 506void free_initmem(void)
507{ 507{
508 unsigned long addr;
509 unsigned long init_begin = (unsigned long)__init_begin; 508 unsigned long init_begin = (unsigned long)__init_begin;
510 unsigned long init_end = (unsigned long)__init_end; 509 unsigned long init_end = (unsigned long)__init_end;
511 510
@@ -533,19 +532,10 @@ void free_initmem(void)
533 * pages are no-longer executable */ 532 * pages are no-longer executable */
534 flush_icache_range(init_begin, init_end); 533 flush_icache_range(init_begin, init_end);
535 534
536 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { 535 num_physpages += free_initmem_default(0);
537 ClearPageReserved(virt_to_page(addr));
538 init_page_count(virt_to_page(addr));
539 free_page(addr);
540 num_physpages++;
541 totalram_pages++;
542 }
543 536
544 /* set up a new led state on systems shipped LED State panel */ 537 /* set up a new led state on systems shipped LED State panel */
545 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 538 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
546
547 printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
548 (init_end - init_begin) >> 10);
549} 539}
550 540
551 541
@@ -697,6 +687,8 @@ void show_mem(unsigned int filter)
697 687
698 printk(KERN_INFO "Mem-info:\n"); 688 printk(KERN_INFO "Mem-info:\n");
699 show_free_areas(filter); 689 show_free_areas(filter);
690 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
691 return;
700#ifndef CONFIG_DISCONTIGMEM 692#ifndef CONFIG_DISCONTIGMEM
701 i = max_mapnr; 693 i = max_mapnr;
702 while (i-- > 0) { 694 while (i-- > 0) {
@@ -1107,15 +1099,6 @@ void flush_tlb_all(void)
1107#ifdef CONFIG_BLK_DEV_INITRD 1099#ifdef CONFIG_BLK_DEV_INITRD
1108void free_initrd_mem(unsigned long start, unsigned long end) 1100void free_initrd_mem(unsigned long start, unsigned long end)
1109{ 1101{
1110 if (start >= end) 1102 num_physpages += free_reserved_area(start, end, 0, "initrd");
1111 return;
1112 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1113 for (; start < end; start += PAGE_SIZE) {
1114 ClearPageReserved(virt_to_page(start));
1115 init_page_count(virt_to_page(start));
1116 free_page(start);
1117 num_physpages++;
1118 totalram_pages++;
1119 }
1120} 1103}
1121#endif 1104#endif
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b89d7eb730a2..ea5bb045983a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -90,6 +90,7 @@ config GENERIC_GPIO
90config PPC 90config PPC
91 bool 91 bool
92 default y 92 default y
93 select BINFMT_ELF
93 select OF 94 select OF
94 select OF_EARLY_FLATTREE 95 select OF_EARLY_FLATTREE
95 select HAVE_FTRACE_MCOUNT_RECORD 96 select HAVE_FTRACE_MCOUNT_RECORD
@@ -98,7 +99,7 @@ config PPC
98 select HAVE_FUNCTION_GRAPH_TRACER 99 select HAVE_FUNCTION_GRAPH_TRACER
99 select SYSCTL_EXCEPTION_TRACE 100 select SYSCTL_EXCEPTION_TRACE
100 select ARCH_WANT_OPTIONAL_GPIOLIB 101 select ARCH_WANT_OPTIONAL_GPIOLIB
101 select HAVE_VIRT_TO_BUS if !PPC64 102 select VIRT_TO_BUS if !PPC64
102 select HAVE_IDE 103 select HAVE_IDE
103 select HAVE_IOREMAP_PROT 104 select HAVE_IOREMAP_PROT
104 select HAVE_EFFICIENT_UNALIGNED_ACCESS 105 select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 62e11a32c4c2..4fcbd6b14a3a 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -3,6 +3,7 @@
3 3
4#ifdef CONFIG_HUGETLB_PAGE 4#ifdef CONFIG_HUGETLB_PAGE
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm-generic/hugetlb.h>
6 7
7extern struct kmem_cache *hugepte_cache; 8extern struct kmem_cache *hugepte_cache;
8 9
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 2fdb47a19efd..b59e06f507ea 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size);
343/* 343/*
344 * VSID allocation (256MB segment) 344 * VSID allocation (256MB segment)
345 * 345 *
346 * We first generate a 38-bit "proto-VSID". For kernel addresses this 346 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
347 * is equal to the ESID | 1 << 37, for user addresses it is: 347 * from mmu context id and effective segment id of the address.
348 * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
349 * 348 *
350 * This splits the proto-VSID into the below range 349 * For user processes max context id is limited to ((1ul << 19) - 5)
351 * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range 350 * for kernel space, we use the top 4 context ids to map address as below
352 * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range 351 * NOTE: each context only support 64TB now.
353 * 352 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
354 * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 353 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
355 * That is, we assign half of the space to user processes and half 354 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
356 * to the kernel. 355 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
357 * 356 *
358 * The proto-VSIDs are then scrambled into real VSIDs with the 357 * The proto-VSIDs are then scrambled into real VSIDs with the
359 * multiplicative hash: 358 * multiplicative hash:
@@ -363,41 +362,49 @@ extern void slb_set_size(u16 size);
363 * VSID_MULTIPLIER is prime, so in particular it is 362 * VSID_MULTIPLIER is prime, so in particular it is
364 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. 363 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
365 * Because the modulus is 2^n-1 we can compute it efficiently without 364 * Because the modulus is 2^n-1 we can compute it efficiently without
366 * a divide or extra multiply (see below). 365 * a divide or extra multiply (see below). The scramble function gives
367 * 366 * robust scattering in the hash table (at least based on some initial
368 * This scheme has several advantages over older methods: 367 * results).
369 *
370 * - We have VSIDs allocated for every kernel address
371 * (i.e. everything above 0xC000000000000000), except the very top
372 * segment, which simplifies several things.
373 * 368 *
374 * - We allow for USER_ESID_BITS significant bits of ESID and 369 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
375 * CONTEXT_BITS bits of context for user addresses. 370 * bad address. This enables us to consolidate bad address handling in
376 * i.e. 64T (46 bits) of address space for up to half a million contexts. 371 * hash_page.
377 * 372 *
378 * - The scramble function gives robust scattering in the hash 373 * We also need to avoid the last segment of the last context, because that
379 * table (at least based on some initial results). The previous 374 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
380 * method was more susceptible to pathological cases giving excessive 375 * because of the modulo operation in vsid scramble. But the vmemmap
381 * hash collisions. 376 * (which is what uses region 0xf) will never be close to 64TB in size
377 * (it's 56 bytes per page of system memory).
382 */ 378 */
383 379
380#define CONTEXT_BITS 19
381#define ESID_BITS 18
382#define ESID_BITS_1T 6
383
384/*
385 * 256MB segment
386 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
387 * available for user + kernel mapping. The top 4 contexts are used for
388 * kernel mapping. Each segment contains 2^28 bytes. Each
389 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
390 * (19 == 37 + 28 - 46).
391 */
392#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
393
384/* 394/*
385 * This should be computed such that protovosid * vsid_mulitplier 395 * This should be computed such that protovosid * vsid_mulitplier
386 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus 396 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
387 */ 397 */
388#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ 398#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
389#define VSID_BITS_256M 38 399#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
390#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) 400#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
391 401
392#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ 402#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
393#define VSID_BITS_1T 26 403#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
394#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) 404#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
395 405
396#define CONTEXT_BITS 19
397#define USER_ESID_BITS 18
398#define USER_ESID_BITS_1T 6
399 406
400#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 407#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
401 408
402/* 409/*
403 * This macro generates asm code to compute the VSID scramble 410 * This macro generates asm code to compute the VSID scramble
@@ -421,7 +428,8 @@ extern void slb_set_size(u16 size);
421 srdi rx,rt,VSID_BITS_##size; \ 428 srdi rx,rt,VSID_BITS_##size; \
422 clrldi rt,rt,(64-VSID_BITS_##size); \ 429 clrldi rt,rt,(64-VSID_BITS_##size); \
423 add rt,rt,rx; /* add high and low bits */ \ 430 add rt,rt,rx; /* add high and low bits */ \
424 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ 431 /* NOTE: explanation based on VSID_BITS_##size = 36 \
432 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
425 * 2^36-1+2^28-1. That in particular means that if r3 >= \ 433 * 2^36-1+2^28-1. That in particular means that if r3 >= \
426 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ 434 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
427 * the bit clear, r3 already has the answer we want, if it \ 435 * the bit clear, r3 already has the answer we want, if it \
@@ -513,34 +521,6 @@ typedef struct {
513 }) 521 })
514#endif /* 1 */ 522#endif /* 1 */
515 523
516/*
517 * This is only valid for addresses >= PAGE_OFFSET
518 * The proto-VSID space is divided into two class
519 * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
520 * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
521 *
522 * With KERNEL_START at 0xc000000000000000, the proto vsid for
523 * the kernel ends up with 0xc00000000 (36 bits). With 64TB
524 * support we need to have kernel proto-VSID in the
525 * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
526 */
527static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
528{
529 unsigned long proto_vsid;
530 /*
531 * We need to make sure proto_vsid for the kernel is
532 * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
533 */
534 if (ssize == MMU_SEGSIZE_256M) {
535 proto_vsid = ea >> SID_SHIFT;
536 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
537 return vsid_scramble(proto_vsid, 256M);
538 }
539 proto_vsid = ea >> SID_SHIFT_1T;
540 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
541 return vsid_scramble(proto_vsid, 1T);
542}
543
544/* Returns the segment size indicator for a user address */ 524/* Returns the segment size indicator for a user address */
545static inline int user_segment_size(unsigned long addr) 525static inline int user_segment_size(unsigned long addr)
546{ 526{
@@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr)
550 return MMU_SEGSIZE_256M; 530 return MMU_SEGSIZE_256M;
551} 531}
552 532
553/* This is only valid for user addresses (which are below 2^44) */
554static inline unsigned long get_vsid(unsigned long context, unsigned long ea, 533static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
555 int ssize) 534 int ssize)
556{ 535{
536 /*
537 * Bad address. We return VSID 0 for that
538 */
539 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
540 return 0;
541
557 if (ssize == MMU_SEGSIZE_256M) 542 if (ssize == MMU_SEGSIZE_256M)
558 return vsid_scramble((context << USER_ESID_BITS) 543 return vsid_scramble((context << ESID_BITS)
559 | (ea >> SID_SHIFT), 256M); 544 | (ea >> SID_SHIFT), 256M);
560 return vsid_scramble((context << USER_ESID_BITS_1T) 545 return vsid_scramble((context << ESID_BITS_1T)
561 | (ea >> SID_SHIFT_1T), 1T); 546 | (ea >> SID_SHIFT_1T), 1T);
562} 547}
563 548
549/*
550 * This is only valid for addresses >= PAGE_OFFSET
551 *
552 * For kernel space, we use the top 4 context ids to map address as below
553 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
554 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
555 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
556 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
557 */
558static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
559{
560 unsigned long context;
561
562 /*
563 * kernel take the top 4 context from the available range
564 */
565 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
566 return get_vsid(context, ea, ssize);
567}
564#endif /* __ASSEMBLY__ */ 568#endif /* __ASSEMBLY__ */
565 569
566#endif /* _ASM_POWERPC_MMU_HASH64_H_ */ 570#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 406b7b9a1341..8ceea14d6fe4 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -182,8 +182,6 @@ static inline bool test_thread_local_flags(unsigned int flags)
182#define is_32bit_task() (1) 182#define is_32bit_task() (1)
183#endif 183#endif
184 184
185#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
186
187#endif /* !__ASSEMBLY__ */ 185#endif /* !__ASSEMBLY__ */
188 186
189#endif /* __KERNEL__ */ 187#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index b532060d0916..23016020915e 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -51,4 +51,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
51extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); 51extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
52extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); 52extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
53extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 53extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
54extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
54#endif /* _ASM_UPROBES_H */ 55#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 75a3d71b895d..19599ef352bc 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
275 .cpu_features = CPU_FTRS_PPC970, 275 .cpu_features = CPU_FTRS_PPC970,
276 .cpu_user_features = COMMON_USER_POWER4 | 276 .cpu_user_features = COMMON_USER_POWER4 |
277 PPC_FEATURE_HAS_ALTIVEC_COMP, 277 PPC_FEATURE_HAS_ALTIVEC_COMP,
278 .mmu_features = MMU_FTR_HPTE_TABLE, 278 .mmu_features = MMU_FTRS_PPC970,
279 .icache_bsize = 128, 279 .icache_bsize = 128,
280 .dcache_bsize = 128, 280 .dcache_bsize = 128,
281 .num_pmcs = 8, 281 .num_pmcs = 8,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index b3ba5163eae2..9ec3fe174cba 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -150,10 +150,7 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
150 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) 150 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
151 continue; 151 continue;
152 152
153 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); 153 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
154 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
155 free_page((unsigned long)__va(addr));
156 totalram_pages++;
157 } 154 }
158} 155}
159#endif 156#endif
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 256c5bf0adb7..04d69c4a5ac2 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -304,7 +304,7 @@ syscall_exit_work:
304 subi r12,r12,TI_FLAGS 304 subi r12,r12,TI_FLAGS
305 305
3064: /* Anything else left to do? */ 3064: /* Anything else left to do? */
307 SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */ 307 SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
308 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) 308 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
309 beq .ret_from_except_lite 309 beq .ret_from_except_lite
310 310
@@ -657,7 +657,7 @@ resume_kernel:
657 /* Clear _TIF_EMULATE_STACK_STORE flag */ 657 /* Clear _TIF_EMULATE_STACK_STORE flag */
658 lis r11,_TIF_EMULATE_STACK_STORE@h 658 lis r11,_TIF_EMULATE_STACK_STORE@h
659 addi r5,r9,TI_FLAGS 659 addi r5,r9,TI_FLAGS
660 ldarx r4,0,r5 6600: ldarx r4,0,r5
661 andc r4,r4,r11 661 andc r4,r4,r11
662 stdcx. r4,0,r5 662 stdcx. r4,0,r5
663 bne- 0b 663 bne- 0b
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index f3eab8594d9f..d44a571e45a7 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -23,8 +23,10 @@
23#include <asm/code-patching.h> 23#include <asm/code-patching.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25 25
26#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
26extern void epapr_ev_idle(void); 27extern void epapr_ev_idle(void);
27extern u32 epapr_ev_idle_start[]; 28extern u32 epapr_ev_idle_start[];
29#endif
28 30
29bool epapr_paravirt_enabled; 31bool epapr_paravirt_enabled;
30 32
@@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
47 49
48 for (i = 0; i < (len / 4); i++) { 50 for (i = 0; i < (len / 4); i++) {
49 patch_instruction(epapr_hypercall_start + i, insts[i]); 51 patch_instruction(epapr_hypercall_start + i, insts[i]);
52#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
50 patch_instruction(epapr_ev_idle_start + i, insts[i]); 53 patch_instruction(epapr_ev_idle_start + i, insts[i]);
54#endif
51 } 55 }
52 56
57#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
53 if (of_get_property(hyper_node, "has-idle", NULL)) 58 if (of_get_property(hyper_node, "has-idle", NULL))
54 ppc_md.power_save = epapr_ev_idle; 59 ppc_md.power_save = epapr_ev_idle;
60#endif
55 61
56 epapr_paravirt_enabled = true; 62 epapr_paravirt_enabled = true;
57 63
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 87ef8f5ee5bc..56bd92362ce1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1066,78 +1066,6 @@ unrecov_user_slb:
1066#endif /* __DISABLED__ */ 1066#endif /* __DISABLED__ */
1067 1067
1068 1068
1069/*
1070 * r13 points to the PACA, r9 contains the saved CR,
1071 * r12 contain the saved SRR1, SRR0 is still ready for return
1072 * r3 has the faulting address
1073 * r9 - r13 are saved in paca->exslb.
1074 * r3 is saved in paca->slb_r3
1075 * We assume we aren't going to take any exceptions during this procedure.
1076 */
1077_GLOBAL(slb_miss_realmode)
1078 mflr r10
1079#ifdef CONFIG_RELOCATABLE
1080 mtctr r11
1081#endif
1082
1083 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1084 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1085
1086 bl .slb_allocate_realmode
1087
1088 /* All done -- return from exception. */
1089
1090 ld r10,PACA_EXSLB+EX_LR(r13)
1091 ld r3,PACA_EXSLB+EX_R3(r13)
1092 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1093
1094 mtlr r10
1095
1096 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1097 beq- 2f
1098
1099.machine push
1100.machine "power4"
1101 mtcrf 0x80,r9
1102 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1103.machine pop
1104
1105 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1106 ld r9,PACA_EXSLB+EX_R9(r13)
1107 ld r10,PACA_EXSLB+EX_R10(r13)
1108 ld r11,PACA_EXSLB+EX_R11(r13)
1109 ld r12,PACA_EXSLB+EX_R12(r13)
1110 ld r13,PACA_EXSLB+EX_R13(r13)
1111 rfid
1112 b . /* prevent speculative execution */
1113
11142: mfspr r11,SPRN_SRR0
1115 ld r10,PACAKBASE(r13)
1116 LOAD_HANDLER(r10,unrecov_slb)
1117 mtspr SPRN_SRR0,r10
1118 ld r10,PACAKMSR(r13)
1119 mtspr SPRN_SRR1,r10
1120 rfid
1121 b .
1122
1123unrecov_slb:
1124 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1125 DISABLE_INTS
1126 bl .save_nvgprs
11271: addi r3,r1,STACK_FRAME_OVERHEAD
1128 bl .unrecoverable_exception
1129 b 1b
1130
1131
1132#ifdef CONFIG_PPC_970_NAP
1133power4_fixup_nap:
1134 andc r9,r9,r10
1135 std r9,TI_LOCAL_FLAGS(r11)
1136 ld r10,_LINK(r1) /* make idle task do the */
1137 std r10,_NIP(r1) /* equivalent of a blr */
1138 blr
1139#endif
1140
1141 .align 7 1069 .align 7
1142 .globl alignment_common 1070 .globl alignment_common
1143alignment_common: 1071alignment_common:
@@ -1336,6 +1264,78 @@ _GLOBAL(opal_mc_secondary_handler)
1336 1264
1337 1265
1338/* 1266/*
1267 * r13 points to the PACA, r9 contains the saved CR,
1268 * r12 contain the saved SRR1, SRR0 is still ready for return
1269 * r3 has the faulting address
1270 * r9 - r13 are saved in paca->exslb.
1271 * r3 is saved in paca->slb_r3
1272 * We assume we aren't going to take any exceptions during this procedure.
1273 */
1274_GLOBAL(slb_miss_realmode)
1275 mflr r10
1276#ifdef CONFIG_RELOCATABLE
1277 mtctr r11
1278#endif
1279
1280 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1281 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1282
1283 bl .slb_allocate_realmode
1284
1285 /* All done -- return from exception. */
1286
1287 ld r10,PACA_EXSLB+EX_LR(r13)
1288 ld r3,PACA_EXSLB+EX_R3(r13)
1289 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1290
1291 mtlr r10
1292
1293 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1294 beq- 2f
1295
1296.machine push
1297.machine "power4"
1298 mtcrf 0x80,r9
1299 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1300.machine pop
1301
1302 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1303 ld r9,PACA_EXSLB+EX_R9(r13)
1304 ld r10,PACA_EXSLB+EX_R10(r13)
1305 ld r11,PACA_EXSLB+EX_R11(r13)
1306 ld r12,PACA_EXSLB+EX_R12(r13)
1307 ld r13,PACA_EXSLB+EX_R13(r13)
1308 rfid
1309 b . /* prevent speculative execution */
1310
13112: mfspr r11,SPRN_SRR0
1312 ld r10,PACAKBASE(r13)
1313 LOAD_HANDLER(r10,unrecov_slb)
1314 mtspr SPRN_SRR0,r10
1315 ld r10,PACAKMSR(r13)
1316 mtspr SPRN_SRR1,r10
1317 rfid
1318 b .
1319
1320unrecov_slb:
1321 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1322 DISABLE_INTS
1323 bl .save_nvgprs
13241: addi r3,r1,STACK_FRAME_OVERHEAD
1325 bl .unrecoverable_exception
1326 b 1b
1327
1328
1329#ifdef CONFIG_PPC_970_NAP
1330power4_fixup_nap:
1331 andc r9,r9,r10
1332 std r9,TI_LOCAL_FLAGS(r11)
1333 ld r10,_LINK(r1) /* make idle task do the */
1334 std r10,_NIP(r1) /* equivalent of a blr */
1335 blr
1336#endif
1337
1338/*
1339 * Hash table stuff 1339 * Hash table stuff
1340 */ 1340 */
1341 .align 7 1341 .align 7
@@ -1452,20 +1452,36 @@ do_ste_alloc:
1452_GLOBAL(do_stab_bolted) 1452_GLOBAL(do_stab_bolted)
1453 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1453 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1454 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1454 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1455 mfspr r11,SPRN_DAR /* ea */
1455 1456
1457 /*
1458 * check for bad kernel/user address
1459 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1460 */
1461 rldicr. r9,r11,4,(63 - 46 - 4)
1462 li r9,0 /* VSID = 0 for bad address */
1463 bne- 0f
1464
1465 /*
1466 * Calculate VSID:
1467 * This is the kernel vsid, we take the top for context from
1468 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1469 * Here we know that (ea >> 60) == 0xc
1470 */
1471 lis r9,(MAX_USER_CONTEXT + 1)@ha
1472 addi r9,r9,(MAX_USER_CONTEXT + 1)@l
1473
1474 srdi r10,r11,SID_SHIFT
1475 rldimi r10,r9,ESID_BITS,0 /* proto vsid */
1476 ASM_VSID_SCRAMBLE(r10, r9, 256M)
1477 rldic r9,r10,12,16 /* r9 = vsid << 12 */
1478
14790:
1456 /* Hash to the primary group */ 1480 /* Hash to the primary group */
1457 ld r10,PACASTABVIRT(r13) 1481 ld r10,PACASTABVIRT(r13)
1458 mfspr r11,SPRN_DAR 1482 srdi r11,r11,SID_SHIFT
1459 srdi r11,r11,28
1460 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1483 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1461 1484
1462 /* Calculate VSID */
1463 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1464 li r9,0x1
1465 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
1466 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1467 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1468
1469 /* Search the primary group for a free entry */ 1485 /* Search the primary group for a free entry */
14701: ld r11,0(r10) /* Test valid bit of the current ste */ 14861: ld r11,0(r10) /* Test valid bit of the current ste */
1471 andi. r11,r11,0x80 1487 andi. r11,r11,0x80
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 06c8202a69cf..2230fd0ca3e4 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1045,10 +1045,7 @@ static void fadump_release_memory(unsigned long begin, unsigned long end)
1045 if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start)) 1045 if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start))
1046 continue; 1046 continue;
1047 1047
1048 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); 1048 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
1049 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1050 free_page((unsigned long)__va(addr));
1051 totalram_pages++;
1052 } 1049 }
1053} 1050}
1054 1051
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index ea78761aa169..939ea7ef0dc8 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -33,11 +33,6 @@
33#include <asm/runlatch.h> 33#include <asm/runlatch.h>
34#include <asm/smp.h> 34#include <asm/smp.h>
35 35
36#ifdef CONFIG_HOTPLUG_CPU
37#define cpu_should_die() cpu_is_offline(smp_processor_id())
38#else
39#define cpu_should_die() 0
40#endif
41 36
42unsigned long cpuidle_disable = IDLE_NO_OVERRIDE; 37unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
43EXPORT_SYMBOL(cpuidle_disable); 38EXPORT_SYMBOL(cpuidle_disable);
@@ -50,64 +45,38 @@ static int __init powersave_off(char *arg)
50} 45}
51__setup("powersave=off", powersave_off); 46__setup("powersave=off", powersave_off);
52 47
53/* 48#ifdef CONFIG_HOTPLUG_CPU
54 * The body of the idle task. 49void arch_cpu_idle_dead(void)
55 */
56void cpu_idle(void)
57{ 50{
58 set_thread_flag(TIF_POLLING_NRFLAG); 51 sched_preempt_enable_no_resched();
59 while (1) { 52 cpu_die();
60 tick_nohz_idle_enter(); 53}
61 rcu_idle_enter(); 54#endif
62
63 while (!need_resched() && !cpu_should_die()) {
64 ppc64_runlatch_off();
65
66 if (ppc_md.power_save) {
67 clear_thread_flag(TIF_POLLING_NRFLAG);
68 /*
69 * smp_mb is so clearing of TIF_POLLING_NRFLAG
70 * is ordered w.r.t. need_resched() test.
71 */
72 smp_mb();
73 local_irq_disable();
74
75 /* Don't trace irqs off for idle */
76 stop_critical_timings();
77
78 /* check again after disabling irqs */
79 if (!need_resched() && !cpu_should_die())
80 ppc_md.power_save();
81
82 start_critical_timings();
83
84 /* Some power_save functions return with
85 * interrupts enabled, some don't.
86 */
87 if (irqs_disabled())
88 local_irq_enable();
89 set_thread_flag(TIF_POLLING_NRFLAG);
90
91 } else {
92 /*
93 * Go into low thread priority and possibly
94 * low power mode.
95 */
96 HMT_low();
97 HMT_very_low();
98 }
99 }
100 55
101 HMT_medium(); 56void arch_cpu_idle(void)
102 ppc64_runlatch_on(); 57{
103 rcu_idle_exit(); 58 ppc64_runlatch_off();
104 tick_nohz_idle_exit(); 59
105 if (cpu_should_die()) { 60 if (ppc_md.power_save) {
106 sched_preempt_enable_no_resched(); 61 ppc_md.power_save();
107 cpu_die(); 62 /*
108 } 63 * Some power_save functions return with
109 schedule_preempt_disabled(); 64 * interrupts enabled, some don't.
65 */
66 if (irqs_disabled())
67 local_irq_enable();
68 } else {
69 local_irq_enable();
70 /*
71 * Go into low thread priority and possibly
72 * low power mode.
73 */
74 HMT_low();
75 HMT_very_low();
110 } 76 }
77
78 HMT_medium();
79 ppc64_runlatch_on();
111} 80}
112 81
113int powersave_nap; 82int powersave_nap;
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index a61b133c4f99..6782221d49bd 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -756,12 +756,7 @@ static __init void kvm_free_tmp(void)
756 end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK; 756 end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
757 757
758 /* Free the tmp space we don't need */ 758 /* Free the tmp space we don't need */
759 for (; start < end; start += PAGE_SIZE) { 759 free_reserved_area(start, end, 0, NULL);
760 ClearPageReserved(virt_to_page(start));
761 init_page_count(virt_to_page(start));
762 free_page(start);
763 totalram_pages++;
764 }
765} 760}
766 761
767static int __init kvm_guest_init(void) 762static int __init kvm_guest_init(void)
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index bec1e930ed73..48fbc2b97e95 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -511,8 +511,7 @@ int __init nvram_scan_partitions(void)
511 "detected: 0-length partition\n"); 511 "detected: 0-length partition\n");
512 goto out; 512 goto out;
513 } 513 }
514 tmp_part = (struct nvram_partition *) 514 tmp_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
515 kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
516 err = -ENOMEM; 515 err = -ENOMEM;
517 if (!tmp_part) { 516 if (!tmp_part) {
518 printk(KERN_ERR "nvram_scan_partitions: kmalloc failed\n"); 517 printk(KERN_ERR "nvram_scan_partitions: kmalloc failed\n");
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 59dd545fdde1..16e77a81ab4f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
555 new->thread.regs->msr |= 555 new->thread.regs->msr |=
556 (MSR_FP | new->thread.fpexc_mode); 556 (MSR_FP | new->thread.fpexc_mode);
557 } 557 }
558#ifdef CONFIG_ALTIVEC
558 if (msr & MSR_VEC) { 559 if (msr & MSR_VEC) {
559 do_load_up_transact_altivec(&new->thread); 560 do_load_up_transact_altivec(&new->thread);
560 new->thread.regs->msr |= MSR_VEC; 561 new->thread.regs->msr |= MSR_VEC;
561 } 562 }
563#endif
562 /* We may as well turn on VSX too since all the state is restored now */ 564 /* We may as well turn on VSX too since all the state is restored now */
563 if (msr & MSR_VSX) 565 if (msr & MSR_VSX)
564 new->thread.regs->msr |= MSR_VSX; 566 new->thread.regs->msr |= MSR_VSX;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 7f7fb7fd991b..13f8d168b3f1 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2832,11 +2832,13 @@ static void unreloc_toc(void)
2832{ 2832{
2833} 2833}
2834#else 2834#else
2835static void __reloc_toc(void *tocstart, unsigned long offset, 2835static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2836 unsigned long nr_entries)
2837{ 2836{
2838 unsigned long i; 2837 unsigned long i;
2839 unsigned long *toc_entry = (unsigned long *)tocstart; 2838 unsigned long *toc_entry;
2839
2840 /* Get the start of the TOC by using r2 directly. */
2841 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2840 2842
2841 for (i = 0; i < nr_entries; i++) { 2843 for (i = 0; i < nr_entries; i++) {
2842 *toc_entry = *toc_entry + offset; 2844 *toc_entry = *toc_entry + offset;
@@ -2850,8 +2852,7 @@ static void reloc_toc(void)
2850 unsigned long nr_entries = 2852 unsigned long nr_entries =
2851 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 2853 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2852 2854
2853 /* Need to add offset to get at __prom_init_toc_start */ 2855 __reloc_toc(offset, nr_entries);
2854 __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries);
2855 2856
2856 mb(); 2857 mb();
2857} 2858}
@@ -2864,8 +2865,7 @@ static void unreloc_toc(void)
2864 2865
2865 mb(); 2866 mb();
2866 2867
2867 /* __prom_init_toc_start has been relocated, no need to add offset */ 2868 __reloc_toc(-offset, nr_entries);
2868 __reloc_toc(__prom_init_toc_start, -offset, nr_entries);
2869} 2869}
2870#endif 2870#endif
2871#endif 2871#endif
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 245c1b6a0858..f9b30c68ba47 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
1428 1428
1429 brk.address = bp_info->addr & ~7UL; 1429 brk.address = bp_info->addr & ~7UL;
1430 brk.type = HW_BRK_TYPE_TRANSLATE; 1430 brk.type = HW_BRK_TYPE_TRANSLATE;
1431 brk.len = 8;
1431 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) 1432 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1432 brk.type |= HW_BRK_TYPE_READ; 1433 brk.type |= HW_BRK_TYPE_READ;
1433 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) 1434 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 3acb28e245b4..95068bf569ad 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
866 do_load_up_transact_fpu(&current->thread); 866 do_load_up_transact_fpu(&current->thread);
867 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 867 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
868 } 868 }
869#ifdef CONFIG_ALTIVEC
869 if (msr & MSR_VEC) { 870 if (msr & MSR_VEC) {
870 do_load_up_transact_altivec(&current->thread); 871 do_load_up_transact_altivec(&current->thread);
871 regs->msr |= MSR_VEC; 872 regs->msr |= MSR_VEC;
872 } 873 }
874#endif
873 875
874 return 0; 876 return 0;
875} 877}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 995f8543cb57..c1794286098c 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
522 do_load_up_transact_fpu(&current->thread); 522 do_load_up_transact_fpu(&current->thread);
523 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 523 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
524 } 524 }
525#ifdef CONFIG_ALTIVEC
525 if (msr & MSR_VEC) { 526 if (msr & MSR_VEC) {
526 do_load_up_transact_altivec(&current->thread); 527 do_load_up_transact_altivec(&current->thread);
527 regs->msr |= MSR_VEC; 528 regs->msr |= MSR_VEC;
528 } 529 }
530#endif
529 531
530 return err; 532 return err;
531} 533}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 76bd9da8cb71..ee7ac5e6e28a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -669,7 +669,7 @@ __cpuinit void start_secondary(void *unused)
669 669
670 local_irq_enable(); 670 local_irq_enable();
671 671
672 cpu_idle(); 672 cpu_startup_entry(CPUHP_ONLINE);
673 673
674 BUG(); 674 BUG();
675} 675}
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 84dbace657ce..2da67e7a16d5 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)
309 or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */ 309 or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
310 mtmsr r5 310 mtmsr r5
311 311
312#ifdef CONFIG_ALTIVEC
312 /* FP and VEC registers: These are recheckpointed from thread.fpr[] 313 /* FP and VEC registers: These are recheckpointed from thread.fpr[]
313 * and thread.vr[] respectively. The thread.transact_fpr[] version 314 * and thread.vr[] respectively. The thread.transact_fpr[] version
314 * is more modern, and will be loaded subsequently by any FPUnavailable 315 * is more modern, and will be loaded subsequently by any FPUnavailable
@@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)
323 REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ 324 REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
324 ld r5, THREAD_VRSAVE(r3) 325 ld r5, THREAD_VRSAVE(r3)
325 mtspr SPRN_VRSAVE, r5 326 mtspr SPRN_VRSAVE, r5
327#endif
326 328
327dont_restore_vec: 329dont_restore_vec:
328 andi. r0, r4, MSR_FP 330 andi. r0, r4, MSR_FP
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index f9748498fe58..13b867093499 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -156,15 +156,13 @@ static struct console udbg_console = {
156 .index = 0, 156 .index = 0,
157}; 157};
158 158
159static int early_console_initialized;
160
161/* 159/*
162 * Called by setup_system after ppc_md->probe and ppc_md->early_init. 160 * Called by setup_system after ppc_md->probe and ppc_md->early_init.
163 * Call it again after setting udbg_putc in ppc_md->setup_arch. 161 * Call it again after setting udbg_putc in ppc_md->setup_arch.
164 */ 162 */
165void __init register_early_udbg_console(void) 163void __init register_early_udbg_console(void)
166{ 164{
167 if (early_console_initialized) 165 if (early_console)
168 return; 166 return;
169 167
170 if (!udbg_putc) 168 if (!udbg_putc)
@@ -174,7 +172,7 @@ void __init register_early_udbg_console(void)
174 printk(KERN_INFO "early console immortal !\n"); 172 printk(KERN_INFO "early console immortal !\n");
175 udbg_console.flags &= ~CON_BOOT; 173 udbg_console.flags &= ~CON_BOOT;
176 } 174 }
177 early_console_initialized = 1; 175 early_console = &udbg_console;
178 register_console(&udbg_console); 176 register_console(&udbg_console);
179} 177}
180 178
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index bc77834dbf43..59f419b935f2 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -31,6 +31,16 @@
31#define UPROBE_TRAP_NR UINT_MAX 31#define UPROBE_TRAP_NR UINT_MAX
32 32
33/** 33/**
34 * is_trap_insn - check if the instruction is a trap variant
35 * @insn: instruction to be checked.
36 * Returns true if @insn is a trap variant.
37 */
38bool is_trap_insn(uprobe_opcode_t *insn)
39{
40 return (is_trap(*insn));
41}
42
43/**
34 * arch_uprobe_analyze_insn 44 * arch_uprobe_analyze_insn
35 * @mm: the probed address space. 45 * @mm: the probed address space.
36 * @arch_uprobe: the probepoint information. 46 * @arch_uprobe: the probepoint information.
@@ -43,12 +53,6 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
43 if (addr & 0x03) 53 if (addr & 0x03)
44 return -EINVAL; 54 return -EINVAL;
45 55
46 /*
47 * We currently don't support a uprobe on an already
48 * existing breakpoint instruction underneath
49 */
50 if (is_trap(auprobe->ainsn))
51 return -ENOTSUPP;
52 return 0; 56 return 0;
53} 57}
54 58
@@ -188,3 +192,16 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
188 192
189 return false; 193 return false;
190} 194}
195
196unsigned long
197arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
198{
199 unsigned long orig_ret_vaddr;
200
201 orig_ret_vaddr = regs->link;
202
203 /* Replace the return addr with trampoline addr */
204 regs->link = trampoline_vaddr;
205
206 return orig_ret_vaddr;
207}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index ead58e317294..5d7d29a313eb 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
326 vcpu3s->context_id[0] = err; 326 vcpu3s->context_id[0] = err;
327 327
328 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) 328 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
329 << USER_ESID_BITS) - 1; 329 << ESID_BITS) - 1;
330 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; 330 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
331 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; 331 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
332 332
333 kvmppc_mmu_hpte_init(vcpu); 333 kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 5e93438afb06..dbdc15aa8127 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1039,7 +1039,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1039 if (!vcpu_book3s) 1039 if (!vcpu_book3s)
1040 goto out; 1040 goto out;
1041 1041
1042 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) 1042 vcpu_book3s->shadow_vcpu =
1043 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); 1043 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1044 if (!vcpu_book3s->shadow_vcpu) 1044 if (!vcpu_book3s->shadow_vcpu)
1045 goto free_vcpu; 1045 goto free_vcpu;
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 41cefd43655f..33db48a8ce24 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -26,17 +26,20 @@
26#define E500_PID_NUM 3 26#define E500_PID_NUM 3
27#define E500_TLB_NUM 2 27#define E500_TLB_NUM 2
28 28
29#define E500_TLB_VALID 1 29/* entry is mapped somewhere in host TLB */
30#define E500_TLB_BITMAP 2 30#define E500_TLB_VALID (1 << 0)
31/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
32#define E500_TLB_BITMAP (1 << 1)
33/* TLB1 entry is mapped by host TLB0 */
31#define E500_TLB_TLB0 (1 << 2) 34#define E500_TLB_TLB0 (1 << 2)
32 35
33struct tlbe_ref { 36struct tlbe_ref {
34 pfn_t pfn; 37 pfn_t pfn; /* valid only for TLB0, except briefly */
35 unsigned int flags; /* E500_TLB_* */ 38 unsigned int flags; /* E500_TLB_* */
36}; 39};
37 40
38struct tlbe_priv { 41struct tlbe_priv {
39 struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ 42 struct tlbe_ref ref;
40}; 43};
41 44
42#ifdef CONFIG_KVM_E500V2 45#ifdef CONFIG_KVM_E500V2
@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
63 66
64 unsigned int gtlb_nv[E500_TLB_NUM]; 67 unsigned int gtlb_nv[E500_TLB_NUM];
65 68
66 /*
67 * information associated with each host TLB entry --
68 * TLB1 only for now. If/when guest TLB1 entries can be
69 * mapped with host TLB0, this will be used for that too.
70 *
71 * We don't want to use this for guest TLB0 because then we'd
72 * have the overhead of doing the translation again even if
73 * the entry is still in the guest TLB (e.g. we swapped out
74 * and back, and our host TLB entries got evicted).
75 */
76 struct tlbe_ref *tlb_refs[E500_TLB_NUM];
77 unsigned int host_tlb1_nv; 69 unsigned int host_tlb1_nv;
78 70
79 u32 svr; 71 u32 svr;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index a222edfb9a9b..1c6a9d729df4 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
193 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; 193 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
194 194
195 /* Don't bother with unmapped entries */ 195 /* Don't bother with unmapped entries */
196 if (!(ref->flags & E500_TLB_VALID)) 196 if (!(ref->flags & E500_TLB_VALID)) {
197 return; 197 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
198 "%s: flags %x\n", __func__, ref->flags);
199 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
200 }
198 201
199 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { 202 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
200 u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; 203 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
248 pfn_t pfn) 251 pfn_t pfn)
249{ 252{
250 ref->pfn = pfn; 253 ref->pfn = pfn;
251 ref->flags = E500_TLB_VALID; 254 ref->flags |= E500_TLB_VALID;
252 255
253 if (tlbe_is_writable(gtlbe)) 256 if (tlbe_is_writable(gtlbe))
254 kvm_set_pfn_dirty(pfn); 257 kvm_set_pfn_dirty(pfn);
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
257static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) 260static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
258{ 261{
259 if (ref->flags & E500_TLB_VALID) { 262 if (ref->flags & E500_TLB_VALID) {
263 /* FIXME: don't log bogus pfn for TLB1 */
260 trace_kvm_booke206_ref_release(ref->pfn, ref->flags); 264 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
261 ref->flags = 0; 265 ref->flags = 0;
262 } 266 }
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
274 278
275static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) 279static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
276{ 280{
277 int tlbsel = 0; 281 int tlbsel;
278 int i;
279
280 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
281 struct tlbe_ref *ref =
282 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
283 kvmppc_e500_ref_release(ref);
284 }
285}
286
287static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
288{
289 int stlbsel = 1;
290 int i; 282 int i;
291 283
292 kvmppc_e500_tlbil_all(vcpu_e500); 284 for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
293 285 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
294 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { 286 struct tlbe_ref *ref =
295 struct tlbe_ref *ref = 287 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
296 &vcpu_e500->tlb_refs[stlbsel][i]; 288 kvmppc_e500_ref_release(ref);
297 kvmppc_e500_ref_release(ref); 289 }
298 } 290 }
299
300 clear_tlb_privs(vcpu_e500);
301} 291}
302 292
303void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) 293void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
304{ 294{
305 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 295 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
306 clear_tlb_refs(vcpu_e500); 296 kvmppc_e500_tlbil_all(vcpu_e500);
297 clear_tlb_privs(vcpu_e500);
307 clear_tlb1_bitmap(vcpu_e500); 298 clear_tlb1_bitmap(vcpu_e500);
308} 299}
309 300
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
458 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
459 } 450 }
460 451
461 /* Drop old ref and setup new one. */
462 kvmppc_e500_ref_release(ref);
463 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 452 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
464 453
465 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
507 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) 496 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
508 vcpu_e500->host_tlb1_nv = 0; 497 vcpu_e500->host_tlb1_nv = 0;
509 498
510 vcpu_e500->tlb_refs[1][sesel] = *ref;
511 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
512 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
513 if (vcpu_e500->h2g_tlb1_rmap[sesel]) { 499 if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
514 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; 500 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
515 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); 501 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
516 } 502 }
517 vcpu_e500->h2g_tlb1_rmap[sesel] = esel; 503
504 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
505 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
506 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
507 WARN_ON(!(ref->flags & E500_TLB_VALID));
518 508
519 return sesel; 509 return sesel;
520} 510}
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
526 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 516 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
527 struct kvm_book3e_206_tlb_entry *stlbe, int esel) 517 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
528{ 518{
529 struct tlbe_ref ref; 519 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
530 int sesel; 520 int sesel;
531 int r; 521 int r;
532 522
533 ref.flags = 0;
534 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, 523 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
535 &ref); 524 ref);
536 if (r) 525 if (r)
537 return r; 526 return r;
538 527
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
544 } 533 }
545 534
546 /* Otherwise map into TLB1 */ 535 /* Otherwise map into TLB1 */
547 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); 536 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
548 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); 537 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
549 538
550 return 0; 539 return 0;
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
565 case 0: 554 case 0:
566 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; 555 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
567 556
568 /* Triggers after clear_tlb_refs or on initial mapping */ 557 /* Triggers after clear_tlb_privs or on initial mapping */
569 if (!(priv->ref.flags & E500_TLB_VALID)) { 558 if (!(priv->ref.flags & E500_TLB_VALID)) {
570 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); 559 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
571 } else { 560 } else {
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
665 host_tlb_params[0].entries / host_tlb_params[0].ways; 654 host_tlb_params[0].entries / host_tlb_params[0].ways;
666 host_tlb_params[1].sets = 1; 655 host_tlb_params[1].sets = 1;
667 656
668 vcpu_e500->tlb_refs[0] =
669 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
670 GFP_KERNEL);
671 if (!vcpu_e500->tlb_refs[0])
672 goto err;
673
674 vcpu_e500->tlb_refs[1] =
675 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
676 GFP_KERNEL);
677 if (!vcpu_e500->tlb_refs[1])
678 goto err;
679
680 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * 657 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
681 host_tlb_params[1].entries, 658 host_tlb_params[1].entries,
682 GFP_KERNEL); 659 GFP_KERNEL);
683 if (!vcpu_e500->h2g_tlb1_rmap) 660 if (!vcpu_e500->h2g_tlb1_rmap)
684 goto err; 661 return -EINVAL;
685 662
686 return 0; 663 return 0;
687
688err:
689 kfree(vcpu_e500->tlb_refs[0]);
690 kfree(vcpu_e500->tlb_refs[1]);
691 return -EINVAL;
692} 664}
693 665
694void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) 666void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
695{ 667{
696 kfree(vcpu_e500->h2g_tlb1_rmap); 668 kfree(vcpu_e500->h2g_tlb1_rmap);
697 kfree(vcpu_e500->tlb_refs[0]);
698 kfree(vcpu_e500->tlb_refs[1]);
699} 669}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 1f89d26e65fb..2f4baa074b2e 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
108{ 108{
109} 109}
110 110
111static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
112
111void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 113void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
112{ 114{
113 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 115 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
136 mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); 138 mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
137 mtspr(SPRN_GESR, vcpu->arch.shared->esr); 139 mtspr(SPRN_GESR, vcpu->arch.shared->esr);
138 140
139 if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) 141 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
142 __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
140 kvmppc_e500_tlbil_all(vcpu_e500); 143 kvmppc_e500_tlbil_all(vcpu_e500);
144 __get_cpu_var(last_vcpu_on_cpu) = vcpu;
145 }
141 146
142 kvmppc_load_guest_fp(vcpu); 147 kvmppc_load_guest_fp(vcpu);
143} 148}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1b6e1271719f..f410c3e12c1e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
195 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); 195 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
196 unsigned long tprot = prot; 196 unsigned long tprot = prot;
197 197
198 /*
199 * If we hit a bad address return error.
200 */
201 if (!vsid)
202 return -1;
198 /* Make kernel text executable */ 203 /* Make kernel text executable */
199 if (overlaps_kernel_text(vaddr, vaddr + step)) 204 if (overlaps_kernel_text(vaddr, vaddr + step))
200 tprot &= ~HPTE_R_N; 205 tprot &= ~HPTE_R_N;
@@ -759,6 +764,8 @@ void __init early_init_mmu(void)
759 /* Initialize stab / SLB management */ 764 /* Initialize stab / SLB management */
760 if (mmu_has_feature(MMU_FTR_SLB)) 765 if (mmu_has_feature(MMU_FTR_SLB))
761 slb_initialize(); 766 slb_initialize();
767 else
768 stab_initialize(get_paca()->stab_real);
762} 769}
763 770
764#ifdef CONFIG_SMP 771#ifdef CONFIG_SMP
@@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
922 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 929 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
923 ea, access, trap); 930 ea, access, trap);
924 931
925 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
926 DBG_LOW(" out of pgtable range !\n");
927 return 1;
928 }
929
930 /* Get region & vsid */ 932 /* Get region & vsid */
931 switch (REGION_ID(ea)) { 933 switch (REGION_ID(ea)) {
932 case USER_REGION_ID: 934 case USER_REGION_ID:
@@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
957 } 959 }
958 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 960 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
959 961
962 /* Bad address. */
963 if (!vsid) {
964 DBG_LOW("Bad address!\n");
965 return 1;
966 }
960 /* Get pgdir */ 967 /* Get pgdir */
961 pgdir = mm->pgd; 968 pgdir = mm->pgd;
962 if (pgdir == NULL) 969 if (pgdir == NULL)
@@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1126 /* Get VSID */ 1133 /* Get VSID */
1127 ssize = user_segment_size(ea); 1134 ssize = user_segment_size(ea);
1128 vsid = get_vsid(mm->context.id, ea, ssize); 1135 vsid = get_vsid(mm->context.id, ea, ssize);
1136 if (!vsid)
1137 return;
1129 1138
1130 /* Hash doesn't like irqs */ 1139 /* Hash doesn't like irqs */
1131 local_irq_save(flags); 1140 local_irq_save(flags);
@@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1233 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1242 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1234 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1243 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1235 1244
1245 /* Don't create HPTE entries for bad address */
1246 if (!vsid)
1247 return;
1236 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), 1248 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
1237 mode, HPTE_V_BOLTED, 1249 mode, HPTE_V_BOLTED,
1238 mmu_linear_psize, mmu_kernel_ssize); 1250 mmu_linear_psize, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7e2246fb2f31..5a535b73ea18 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -263,19 +263,14 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
263 vmemmap_list = vmem_back; 263 vmemmap_list = vmem_back;
264} 264}
265 265
266int __meminit vmemmap_populate(struct page *start_page, 266int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
267 unsigned long nr_pages, int node)
268{ 267{
269 unsigned long start = (unsigned long)start_page;
270 unsigned long end = (unsigned long)(start_page + nr_pages);
271 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 268 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
272 269
273 /* Align to the page size of the linear mapping. */ 270 /* Align to the page size of the linear mapping. */
274 start = _ALIGN_DOWN(start, page_size); 271 start = _ALIGN_DOWN(start, page_size);
275 272
276 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n", 273 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
277 start_page, nr_pages, node);
278 pr_debug(" -> map %lx..%lx\n", start, end);
279 274
280 for (; start < end; start += page_size) { 275 for (; start < end; start += page_size) {
281 void *p; 276 void *p;
@@ -298,7 +293,7 @@ int __meminit vmemmap_populate(struct page *start_page,
298 return 0; 293 return 0;
299} 294}
300 295
301void vmemmap_free(struct page *memmap, unsigned long nr_pages) 296void vmemmap_free(unsigned long start, unsigned long end)
302{ 297{
303} 298}
304 299
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index f1f7409a4183..cd76c454942f 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -352,13 +352,9 @@ void __init mem_init(void)
352 struct page *page = pfn_to_page(pfn); 352 struct page *page = pfn_to_page(pfn);
353 if (memblock_is_reserved(paddr)) 353 if (memblock_is_reserved(paddr))
354 continue; 354 continue;
355 ClearPageReserved(page); 355 free_highmem_page(page);
356 init_page_count(page);
357 __free_page(page);
358 totalhigh_pages++;
359 reservedpages--; 356 reservedpages--;
360 } 357 }
361 totalram_pages += totalhigh_pages;
362 printk(KERN_DEBUG "High memory: %luk\n", 358 printk(KERN_DEBUG "High memory: %luk\n",
363 totalhigh_pages << (PAGE_SHIFT-10)); 359 totalhigh_pages << (PAGE_SHIFT-10));
364 } 360 }
@@ -405,39 +401,14 @@ void __init mem_init(void)
405 401
406void free_initmem(void) 402void free_initmem(void)
407{ 403{
408 unsigned long addr;
409
410 ppc_md.progress = ppc_printk_progress; 404 ppc_md.progress = ppc_printk_progress;
411 405 free_initmem_default(POISON_FREE_INITMEM);
412 addr = (unsigned long)__init_begin;
413 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
414 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
415 ClearPageReserved(virt_to_page(addr));
416 init_page_count(virt_to_page(addr));
417 free_page(addr);
418 totalram_pages++;
419 }
420 pr_info("Freeing unused kernel memory: %luk freed\n",
421 ((unsigned long)__init_end -
422 (unsigned long)__init_begin) >> 10);
423} 406}
424 407
425#ifdef CONFIG_BLK_DEV_INITRD 408#ifdef CONFIG_BLK_DEV_INITRD
426void __init free_initrd_mem(unsigned long start, unsigned long end) 409void __init free_initrd_mem(unsigned long start, unsigned long end)
427{ 410{
428 if (start >= end) 411 free_reserved_area(start, end, 0, "initrd");
429 return;
430
431 start = _ALIGN_DOWN(start, PAGE_SIZE);
432 end = _ALIGN_UP(end, PAGE_SIZE);
433 pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
434
435 for (; start < end; start += PAGE_SIZE) {
436 ClearPageReserved(virt_to_page(start));
437 init_page_count(virt_to_page(start));
438 free_page(start);
439 totalram_pages++;
440 }
441} 412}
442#endif 413#endif
443 414
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40bc5b0ace54..d1d1b92c5b99 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -29,15 +29,6 @@
29static DEFINE_SPINLOCK(mmu_context_lock); 29static DEFINE_SPINLOCK(mmu_context_lock);
30static DEFINE_IDA(mmu_context_ida); 30static DEFINE_IDA(mmu_context_ida);
31 31
32/*
33 * 256MB segment
34 * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
35 * available for user mappings. Each segment contains 2^28 bytes. Each
36 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
37 * (19 == 37 + 28 - 46).
38 */
39#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
40
41int __init_new_context(void) 32int __init_new_context(void)
42{ 33{
43 int index; 34 int index;
@@ -56,7 +47,7 @@ again:
56 else if (err) 47 else if (err)
57 return err; 48 return err;
58 49
59 if (index > MAX_CONTEXT) { 50 if (index > MAX_USER_CONTEXT) {
60 spin_lock(&mmu_context_lock); 51 spin_lock(&mmu_context_lock);
61 ida_remove(&mmu_context_ida, index); 52 ida_remove(&mmu_context_ida, index);
62 spin_unlock(&mmu_context_lock); 53 spin_unlock(&mmu_context_lock);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index bba87ca2b4d7..fa33c546e778 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -22,6 +22,7 @@
22#include <linux/pfn.h> 22#include <linux/pfn.h>
23#include <linux/cpuset.h> 23#include <linux/cpuset.h>
24#include <linux/node.h> 24#include <linux/node.h>
25#include <linux/slab.h>
25#include <asm/sparsemem.h> 26#include <asm/sparsemem.h>
26#include <asm/prom.h> 27#include <asm/prom.h>
27#include <asm/smp.h> 28#include <asm/smp.h>
@@ -62,14 +63,11 @@ static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
62 */ 63 */
63static void __init setup_node_to_cpumask_map(void) 64static void __init setup_node_to_cpumask_map(void)
64{ 65{
65 unsigned int node, num = 0; 66 unsigned int node;
66 67
67 /* setup nr_node_ids if not done yet */ 68 /* setup nr_node_ids if not done yet */
68 if (nr_node_ids == MAX_NUMNODES) { 69 if (nr_node_ids == MAX_NUMNODES)
69 for_each_node_mask(node, node_possible_map) 70 setup_nr_node_ids();
70 num = node;
71 nr_node_ids = num + 1;
72 }
73 71
74 /* allocate the map */ 72 /* allocate the map */
75 for (node = 0; node < nr_node_ids; node++) 73 for (node = 0; node < nr_node_ids; node++)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e212a271c7a4..654258f165ae 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -61,7 +61,7 @@
61#endif 61#endif
62 62
63#ifdef CONFIG_PPC_STD_MMU_64 63#ifdef CONFIG_PPC_STD_MMU_64
64#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) 64#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
65#error TASK_SIZE_USER64 exceeds user VSID range 65#error TASK_SIZE_USER64 exceeds user VSID range
66#endif 66#endif
67#endif 67#endif
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1a16ca227757..17aa6dfceb34 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -31,10 +31,15 @@
31 * No other registers are examined or changed. 31 * No other registers are examined or changed.
32 */ 32 */
33_GLOBAL(slb_allocate_realmode) 33_GLOBAL(slb_allocate_realmode)
34 /* r3 = faulting address */ 34 /*
35 * check for bad kernel/user address
36 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
37 */
38 rldicr. r9,r3,4,(63 - 46 - 4)
39 bne- 8f
35 40
36 srdi r9,r3,60 /* get region */ 41 srdi r9,r3,60 /* get region */
37 srdi r10,r3,28 /* get esid */ 42 srdi r10,r3,SID_SHIFT /* get esid */
38 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ 43 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
39 44
40 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ 45 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)
56 */ 61 */
57_GLOBAL(slb_miss_kernel_load_linear) 62_GLOBAL(slb_miss_kernel_load_linear)
58 li r11,0 63 li r11,0
59 li r9,0x1
60 /* 64 /*
61 * for 1T we shift 12 bits more. slb_finish_load_1T will do 65 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
62 * the necessary adjustment 66 * r9 = region id.
63 */ 67 */
64 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 68 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
69 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
70
71
65BEGIN_FTR_SECTION 72BEGIN_FTR_SECTION
66 b slb_finish_load 73 b slb_finish_load
67END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 74END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
91 _GLOBAL(slb_miss_kernel_load_io) 98 _GLOBAL(slb_miss_kernel_load_io)
92 li r11,0 99 li r11,0
936: 1006:
94 li r9,0x1
95 /* 101 /*
96 * for 1T we shift 12 bits more. slb_finish_load_1T will do 102 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
97 * the necessary adjustment 103 * r9 = region id.
98 */ 104 */
99 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 105 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
106 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
107
100BEGIN_FTR_SECTION 108BEGIN_FTR_SECTION
101 b slb_finish_load 109 b slb_finish_load
102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 110END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
103 b slb_finish_load_1T 111 b slb_finish_load_1T
104 112
1050: /* user address: proto-VSID = context << 15 | ESID. First check 1130:
106 * if the address is within the boundaries of the user region
107 */
108 srdi. r9,r10,USER_ESID_BITS
109 bne- 8f /* invalid ea bits set */
110
111
112 /* when using slices, we extract the psize off the slice bitmaps 114 /* when using slices, we extract the psize off the slice bitmaps
113 * and then we need to get the sllp encoding off the mmu_psize_defs 115 * and then we need to get the sllp encoding off the mmu_psize_defs
114 * array. 116 * array.
@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
164 ld r9,PACACONTEXTID(r13) 166 ld r9,PACACONTEXTID(r13)
165BEGIN_FTR_SECTION 167BEGIN_FTR_SECTION
166 cmpldi r10,0x1000 168 cmpldi r10,0x1000
167END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
168 rldimi r10,r9,USER_ESID_BITS,0
169BEGIN_FTR_SECTION
170 bge slb_finish_load_1T 169 bge slb_finish_load_1T
171END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 170END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
172 b slb_finish_load 171 b slb_finish_load
173 172
1748: /* invalid EA */ 1738: /* invalid EA */
175 li r10,0 /* BAD_VSID */ 174 li r10,0 /* BAD_VSID */
175 li r9,0 /* BAD_VSID */
176 li r11,SLB_VSID_USER /* flags don't much matter */ 176 li r11,SLB_VSID_USER /* flags don't much matter */
177 b slb_finish_load 177 b slb_finish_load
178 178
@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)
221 221
222 /* get context to calculate proto-VSID */ 222 /* get context to calculate proto-VSID */
223 ld r9,PACACONTEXTID(r13) 223 ld r9,PACACONTEXTID(r13)
224 rldimi r10,r9,USER_ESID_BITS,0
225
226 /* fall through slb_finish_load */ 224 /* fall through slb_finish_load */
227 225
228#endif /* __DISABLED__ */ 226#endif /* __DISABLED__ */
@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)
231/* 229/*
232 * Finish loading of an SLB entry and return 230 * Finish loading of an SLB entry and return
233 * 231 *
234 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET 232 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
235 */ 233 */
236slb_finish_load: 234slb_finish_load:
235 rldimi r10,r9,ESID_BITS,0
237 ASM_VSID_SCRAMBLE(r10,r9,256M) 236 ASM_VSID_SCRAMBLE(r10,r9,256M)
238 /* 237 /*
239 * bits above VSID_BITS_256M need to be ignored from r10 238 * bits above VSID_BITS_256M need to be ignored from r10
@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)
298/* 297/*
299 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. 298 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
300 * 299 *
301 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 300 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
302 */ 301 */
303slb_finish_load_1T: 302slb_finish_load_1T:
304 srdi r10,r10,40-28 /* get 1T ESID */ 303 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
304 rldimi r10,r9,ESID_BITS_1T,0
305 ASM_VSID_SCRAMBLE(r10,r9,1T) 305 ASM_VSID_SCRAMBLE(r10,r9,1T)
306 /* 306 /*
307 * bits above VSID_BITS_1T need to be ignored from r10 307 * bits above VSID_BITS_1T need to be ignored from r10
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 0d82ef50dc3f..023ec8a13f38 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
82 if (!is_kernel_addr(addr)) { 82 if (!is_kernel_addr(addr)) {
83 ssize = user_segment_size(addr); 83 ssize = user_segment_size(addr);
84 vsid = get_vsid(mm->context.id, addr, ssize); 84 vsid = get_vsid(mm->context.id, addr, ssize);
85 WARN_ON(vsid == 0);
86 } else { 85 } else {
87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
88 ssize = mmu_kernel_ssize; 87 ssize = mmu_kernel_ssize;
89 } 88 }
89 WARN_ON(vsid == 0);
90 vpn = hpt_vpn(addr, vsid, ssize); 90 vpn = hpt_vpn(addr, vsid, ssize);
91 rpte = __real_pte(__pte(pte), ptep); 91 rpte = __real_pte(__pte(pte), ptep);
92 92
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index b554879bd31e..3c475d6267c7 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = {
420 .attrs = power7_events_attr, 420 .attrs = power7_events_attr,
421}; 421};
422 422
423PMU_FORMAT_ATTR(event, "config:0-19");
424
425static struct attribute *power7_pmu_format_attr[] = {
426 &format_attr_event.attr,
427 NULL,
428};
429
430struct attribute_group power7_pmu_format_group = {
431 .name = "format",
432 .attrs = power7_pmu_format_attr,
433};
434
423static const struct attribute_group *power7_pmu_attr_groups[] = { 435static const struct attribute_group *power7_pmu_attr_groups[] = {
436 &power7_pmu_format_group,
424 &power7_pmu_events_group, 437 &power7_pmu_events_group,
425 NULL, 438 NULL,
426}; 439};
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 0effe9f5a1ea..7be93367d92f 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -274,6 +274,8 @@ config 440EPX
274 select IBM_EMAC_EMAC4 274 select IBM_EMAC_EMAC4
275 select IBM_EMAC_RGMII 275 select IBM_EMAC_RGMII
276 select IBM_EMAC_ZMII 276 select IBM_EMAC_ZMII
277 select USB_EHCI_BIG_ENDIAN_MMIO
278 select USB_EHCI_BIG_ENDIAN_DESC
277 279
278config 440GRX 280config 440GRX
279 bool 281 bool
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index c16999802ecf..381a592826a2 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -7,6 +7,8 @@ config PPC_MPC512x
7 select PPC_PCI_CHOICE 7 select PPC_PCI_CHOICE
8 select FSL_PCI if PCI 8 select FSL_PCI if PCI
9 select ARCH_WANT_OPTIONAL_GPIOLIB 9 select ARCH_WANT_OPTIONAL_GPIOLIB
10 select USB_EHCI_BIG_ENDIAN_MMIO
11 select USB_EHCI_BIG_ENDIAN_DESC
10 12
11config MPC5121_ADS 13config MPC5121_ADS
12 bool "Freescale MPC5121E ADS" 14 bool "Freescale MPC5121E ADS"
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index d30235b7e3f7..db6ac389ef8c 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -172,12 +172,9 @@ static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb;
172 172
173static inline void mpc512x_free_bootmem(struct page *page) 173static inline void mpc512x_free_bootmem(struct page *page)
174{ 174{
175 __ClearPageReserved(page);
176 BUG_ON(PageTail(page)); 175 BUG_ON(PageTail(page));
177 BUG_ON(atomic_read(&page->_count) > 1); 176 BUG_ON(atomic_read(&page->_count) > 1);
178 atomic_set(&page->_count, 1); 177 free_reserved_page(page);
179 __free_page(page);
180 totalram_pages++;
181} 178}
182 179
183void mpc512x_release_bootmem(void) 180void mpc512x_release_bootmem(void)
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 611e92f291c4..7179726ba5c5 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data)
69 return IRQ_HANDLED; 69 return IRQ_HANDLED;
70}; 70};
71 71
72static int __devinit gpio_halt_probe(struct platform_device *pdev) 72static int gpio_halt_probe(struct platform_device *pdev)
73{ 73{
74 enum of_gpio_flags flags; 74 enum of_gpio_flags flags;
75 struct device_node *node = pdev->dev.of_node; 75 struct device_node *node = pdev->dev.of_node;
@@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev)
128 return 0; 128 return 0;
129} 129}
130 130
131static int __devexit gpio_halt_remove(struct platform_device *pdev) 131static int gpio_halt_remove(struct platform_device *pdev)
132{ 132{
133 if (halt_node) { 133 if (halt_node) {
134 int gpio = of_get_gpio(halt_node, 0); 134 int gpio = of_get_gpio(halt_node, 0);
@@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = {
165 .of_match_table = gpio_halt_match, 165 .of_match_table = gpio_halt_match,
166 }, 166 },
167 .probe = gpio_halt_probe, 167 .probe = gpio_halt_probe,
168 .remove = __devexit_p(gpio_halt_remove), 168 .remove = gpio_halt_remove,
169}; 169};
170 170
171module_platform_driver(gpio_halt_driver); 171module_platform_driver(gpio_halt_driver);
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index cea2f09c4241..18e3b76c78d7 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -124,9 +124,8 @@ config 6xx
124 select PPC_HAVE_PMU_SUPPORT 124 select PPC_HAVE_PMU_SUPPORT
125 125
126config POWER3 126config POWER3
127 bool
128 depends on PPC64 && PPC_BOOK3S 127 depends on PPC64 && PPC_BOOK3S
129 default y if !POWER4_ONLY 128 def_bool y
130 129
131config POWER4 130config POWER4
132 depends on PPC64 && PPC_BOOK3S 131 depends on PPC64 && PPC_BOOK3S
@@ -145,8 +144,7 @@ config TUNE_CELL
145 but somewhat slower on other machines. This option only changes 144 but somewhat slower on other machines. This option only changes
146 the scheduling of instructions, not the selection of instructions 145 the scheduling of instructions, not the selection of instructions
147 itself, so the resulting kernel will keep running on all other 146 itself, so the resulting kernel will keep running on all other
148 machines. When building a kernel that is supposed to run only 147 machines.
149 on Cell, you should also select the POWER4_ONLY option.
150 148
151# this is temp to handle compat with arch=ppc 149# this is temp to handle compat with arch=ppc
152config 8xx 150config 8xx
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2372c609fa2b..9a432de363b8 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -72,6 +72,7 @@ unsigned long memory_block_size_bytes(void)
72 return get_memblock_size(); 72 return get_memblock_size();
73} 73}
74 74
75#ifdef CONFIG_MEMORY_HOTREMOVE
75static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) 76static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
76{ 77{
77 unsigned long start, start_pfn; 78 unsigned long start, start_pfn;
@@ -153,6 +154,17 @@ static int pseries_remove_memory(struct device_node *np)
153 ret = pseries_remove_memblock(base, lmb_size); 154 ret = pseries_remove_memblock(base, lmb_size);
154 return ret; 155 return ret;
155} 156}
157#else
158static inline int pseries_remove_memblock(unsigned long base,
159 unsigned int memblock_size)
160{
161 return -EOPNOTSUPP;
162}
163static inline int pseries_remove_memory(struct device_node *np)
164{
165 return -EOPNOTSUPP;
166}
167#endif /* CONFIG_MEMORY_HOTREMOVE */
156 168
157static int pseries_add_memory(struct device_node *np) 169static int pseries_add_memory(struct device_node *np)
158{ 170{
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 0da39fed355a..299731e9036b 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
186 (0x1UL << 4), &dummy1, &dummy2); 186 (0x1UL << 4), &dummy1, &dummy2);
187 if (lpar_rc == H_SUCCESS) 187 if (lpar_rc == H_SUCCESS)
188 return i; 188 return i;
189 BUG_ON(lpar_rc != H_NOT_FOUND); 189
190 /*
191 * The test for adjunct partition is performed before the
192 * ANDCOND test. H_RESOURCE may be returned, so we need to
193 * check for that as well.
194 */
195 BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
190 196
191 slot_offset++; 197 slot_offset++;
192 slot_offset &= 0x7; 198 slot_offset &= 0x7;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 4b505370a1d5..bda6ba6f3cf5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -134,7 +134,7 @@ config S390
134 select HAVE_SYSCALL_WRAPPERS 134 select HAVE_SYSCALL_WRAPPERS
135 select HAVE_UID16 if 32BIT 135 select HAVE_UID16 if 32BIT
136 select HAVE_VIRT_CPU_ACCOUNTING 136 select HAVE_VIRT_CPU_ACCOUNTING
137 select HAVE_VIRT_TO_BUS 137 select VIRT_TO_BUS
138 select INIT_ALL_POSSIBLE 138 select INIT_ALL_POSSIBLE
139 select KTIME_SCALAR if 32BIT 139 select KTIME_SCALAR if 32BIT
140 select MODULES_USE_ELF_RELA 140 select MODULES_USE_ELF_RELA
@@ -375,19 +375,6 @@ config PACK_STACK
375 375
376 Say Y if you are unsure. 376 Say Y if you are unsure.
377 377
378config SMALL_STACK
379 def_bool n
380 prompt "Use 8kb for kernel stack instead of 16kb"
381 depends on PACK_STACK && 64BIT && !LOCKDEP
382 help
383 If you say Y here and the compiler supports the -mkernel-backchain
384 option the kernel will use a smaller kernel stack size. The reduced
385 size is 8kb instead of 16kb. This allows to run more threads on a
386 system and reduces the pressure on the memory management for higher
387 order page allocations.
388
389 Say N if you are unsure.
390
391config CHECK_STACK 378config CHECK_STACK
392 def_bool y 379 def_bool y
393 prompt "Detect kernel stack overflow" 380 prompt "Detect kernel stack overflow"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 7e3ce78d4290..a7d68a467ce8 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -55,22 +55,12 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
55ifeq ($(call cc-option-yn,-mkernel-backchain),y) 55ifeq ($(call cc-option-yn,-mkernel-backchain),y)
56cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK 56cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
57aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK 57aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
58cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
59aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
60ifdef CONFIG_SMALL_STACK
61STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
62endif
63endif 58endif
64 59
65# new style option for packed stacks 60# new style option for packed stacks
66ifeq ($(call cc-option-yn,-mpacked-stack),y) 61ifeq ($(call cc-option-yn,-mpacked-stack),y)
67cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK 62cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
68aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK 63aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
69cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
70aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
71ifdef CONFIG_SMALL_STACK
72STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
73endif
74endif 64endif
75 65
76ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) 66ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 9fd4a40c6752..bb5dd496614f 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -105,9 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
105int hypfs_dbfs_init(void) 105int hypfs_dbfs_init(void)
106{ 106{
107 dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); 107 dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
108 if (IS_ERR(dbfs_dir)) 108 return PTR_RET(dbfs_dir);
109 return PTR_ERR(dbfs_dir);
110 return 0;
111} 109}
112 110
113void hypfs_dbfs_exit(void) 111void hypfs_dbfs_exit(void)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 15422933c60b..4d8604e311f3 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -61,8 +61,6 @@ extern const char _sb_findmap[];
61 61
62#ifndef CONFIG_64BIT 62#ifndef CONFIG_64BIT
63 63
64#define __BITOPS_ALIGN 3
65#define __BITOPS_WORDSIZE 32
66#define __BITOPS_OR "or" 64#define __BITOPS_OR "or"
67#define __BITOPS_AND "nr" 65#define __BITOPS_AND "nr"
68#define __BITOPS_XOR "xr" 66#define __BITOPS_XOR "xr"
@@ -81,8 +79,6 @@ extern const char _sb_findmap[];
81 79
82#else /* CONFIG_64BIT */ 80#else /* CONFIG_64BIT */
83 81
84#define __BITOPS_ALIGN 7
85#define __BITOPS_WORDSIZE 64
86#define __BITOPS_OR "ogr" 82#define __BITOPS_OR "ogr"
87#define __BITOPS_AND "ngr" 83#define __BITOPS_AND "ngr"
88#define __BITOPS_XOR "xgr" 84#define __BITOPS_XOR "xgr"
@@ -101,8 +97,7 @@ extern const char _sb_findmap[];
101 97
102#endif /* CONFIG_64BIT */ 98#endif /* CONFIG_64BIT */
103 99
104#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 100#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
105#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
106 101
107#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
108/* 103/*
@@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
114 109
115 addr = (unsigned long) ptr; 110 addr = (unsigned long) ptr;
116 /* calculate address for CS */ 111 /* calculate address for CS */
117 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 112 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
118 /* make OR mask */ 113 /* make OR mask */
119 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 114 mask = 1UL << (nr & (BITS_PER_LONG - 1));
120 /* Do the atomic update. */ 115 /* Do the atomic update. */
121 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 116 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
122} 117}
@@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
130 125
131 addr = (unsigned long) ptr; 126 addr = (unsigned long) ptr;
132 /* calculate address for CS */ 127 /* calculate address for CS */
133 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 128 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
134 /* make AND mask */ 129 /* make AND mask */
135 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 130 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
136 /* Do the atomic update. */ 131 /* Do the atomic update. */
137 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 132 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
138} 133}
@@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
146 141
147 addr = (unsigned long) ptr; 142 addr = (unsigned long) ptr;
148 /* calculate address for CS */ 143 /* calculate address for CS */
149 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 144 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
150 /* make XOR mask */ 145 /* make XOR mask */
151 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 146 mask = 1UL << (nr & (BITS_PER_LONG - 1));
152 /* Do the atomic update. */ 147 /* Do the atomic update. */
153 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 148 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
154} 149}
@@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
163 158
164 addr = (unsigned long) ptr; 159 addr = (unsigned long) ptr;
165 /* calculate address for CS */ 160 /* calculate address for CS */
166 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 161 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
167 /* make OR/test mask */ 162 /* make OR/test mask */
168 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 163 mask = 1UL << (nr & (BITS_PER_LONG - 1));
169 /* Do the atomic update. */ 164 /* Do the atomic update. */
170 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 165 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
171 __BITOPS_BARRIER(); 166 barrier();
172 return (old & mask) != 0; 167 return (old & mask) != 0;
173} 168}
174 169
@@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
182 177
183 addr = (unsigned long) ptr; 178 addr = (unsigned long) ptr;
184 /* calculate address for CS */ 179 /* calculate address for CS */
185 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 180 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
186 /* make AND/test mask */ 181 /* make AND/test mask */
187 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 182 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
188 /* Do the atomic update. */ 183 /* Do the atomic update. */
189 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 184 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
190 __BITOPS_BARRIER(); 185 barrier();
191 return (old ^ new) != 0; 186 return (old ^ new) != 0;
192} 187}
193 188
@@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
201 196
202 addr = (unsigned long) ptr; 197 addr = (unsigned long) ptr;
203 /* calculate address for CS */ 198 /* calculate address for CS */
204 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 199 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
205 /* make XOR/test mask */ 200 /* make XOR/test mask */
206 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 201 mask = 1UL << (nr & (BITS_PER_LONG - 1));
207 /* Do the atomic update. */ 202 /* Do the atomic update. */
208 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 203 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
209 __BITOPS_BARRIER(); 204 barrier();
210 return (old & mask) != 0; 205 return (old & mask) != 0;
211} 206}
212#endif /* CONFIG_SMP */ 207#endif /* CONFIG_SMP */
@@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
218{ 213{
219 unsigned long addr; 214 unsigned long addr;
220 215
221 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 216 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
222 asm volatile( 217 asm volatile(
223 " oc %O0(1,%R0),%1" 218 " oc %O0(1,%R0),%1"
224 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 219 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
229{ 224{
230 unsigned long addr; 225 unsigned long addr;
231 226
232 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 227 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
233 *(unsigned char *) addr |= 1 << (nr & 7); 228 *(unsigned char *) addr |= 1 << (nr & 7);
234} 229}
235 230
@@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
246{ 241{
247 unsigned long addr; 242 unsigned long addr;
248 243
249 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 244 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
250 asm volatile( 245 asm volatile(
251 " nc %O0(1,%R0),%1" 246 " nc %O0(1,%R0),%1"
252 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); 247 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
@@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
257{ 252{
258 unsigned long addr; 253 unsigned long addr;
259 254
260 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 255 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
261 *(unsigned char *) addr &= ~(1 << (nr & 7)); 256 *(unsigned char *) addr &= ~(1 << (nr & 7));
262} 257}
263 258
@@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
273{ 268{
274 unsigned long addr; 269 unsigned long addr;
275 270
276 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 271 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
277 asm volatile( 272 asm volatile(
278 " xc %O0(1,%R0),%1" 273 " xc %O0(1,%R0),%1"
279 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 274 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
284{ 279{
285 unsigned long addr; 280 unsigned long addr;
286 281
287 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 282 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
288 *(unsigned char *) addr ^= 1 << (nr & 7); 283 *(unsigned char *) addr ^= 1 << (nr & 7);
289} 284}
290 285
@@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
302 unsigned long addr; 297 unsigned long addr;
303 unsigned char ch; 298 unsigned char ch;
304 299
305 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 300 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
306 ch = *(unsigned char *) addr; 301 ch = *(unsigned char *) addr;
307 asm volatile( 302 asm volatile(
308 " oc %O0(1,%R0),%1" 303 " oc %O0(1,%R0),%1"
@@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
321 unsigned long addr; 316 unsigned long addr;
322 unsigned char ch; 317 unsigned char ch;
323 318
324 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 319 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
325 ch = *(unsigned char *) addr; 320 ch = *(unsigned char *) addr;
326 asm volatile( 321 asm volatile(
327 " nc %O0(1,%R0),%1" 322 " nc %O0(1,%R0),%1"
@@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
340 unsigned long addr; 335 unsigned long addr;
341 unsigned char ch; 336 unsigned char ch;
342 337
343 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 338 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
344 ch = *(unsigned char *) addr; 339 ch = *(unsigned char *) addr;
345 asm volatile( 340 asm volatile(
346 " xc %O0(1,%R0),%1" 341 " xc %O0(1,%R0),%1"
@@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
376 unsigned long addr; 371 unsigned long addr;
377 unsigned char ch; 372 unsigned char ch;
378 373
379 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 374 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
380 ch = *(volatile unsigned char *) addr; 375 ch = *(volatile unsigned char *) addr;
381 return (ch >> (nr & 7)) & 1; 376 return (ch >> (nr & 7)) & 1;
382} 377}
@@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
384static inline int 379static inline int
385__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { 380__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
386 return (((volatile char *) addr) 381 return (((volatile char *) addr)
387 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; 382 [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
388} 383}
389 384
390#define test_bit(nr,addr) \ 385#define test_bit(nr,addr) \
@@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr,
693 688
694 if (offset >= size) 689 if (offset >= size)
695 return size; 690 return size;
696 bit = offset & (__BITOPS_WORDSIZE - 1); 691 bit = offset & (BITS_PER_LONG - 1);
697 offset -= bit; 692 offset -= bit;
698 size -= offset; 693 size -= offset;
699 p = addr + offset / __BITOPS_WORDSIZE; 694 p = addr + offset / BITS_PER_LONG;
700 if (bit) { 695 if (bit) {
701 set = __flo_word(0, *p & (~0UL << bit)); 696 set = __flo_word(0, *p & (~0UL << bit));
702 if (set >= size) 697 if (set >= size)
703 return size + offset; 698 return size + offset;
704 if (set < __BITOPS_WORDSIZE) 699 if (set < BITS_PER_LONG)
705 return set + offset; 700 return set + offset;
706 offset += __BITOPS_WORDSIZE; 701 offset += BITS_PER_LONG;
707 size -= __BITOPS_WORDSIZE; 702 size -= BITS_PER_LONG;
708 p++; 703 p++;
709 } 704 }
710 return offset + find_first_bit_left(p, size); 705 return offset + find_first_bit_left(p, size);
@@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr,
736 731
737 if (offset >= size) 732 if (offset >= size)
738 return size; 733 return size;
739 bit = offset & (__BITOPS_WORDSIZE - 1); 734 bit = offset & (BITS_PER_LONG - 1);
740 offset -= bit; 735 offset -= bit;
741 size -= offset; 736 size -= offset;
742 p = addr + offset / __BITOPS_WORDSIZE; 737 p = addr + offset / BITS_PER_LONG;
743 if (bit) { 738 if (bit) {
744 /* 739 /*
745 * __ffz_word returns __BITOPS_WORDSIZE 740 * __ffz_word returns BITS_PER_LONG
746 * if no zero bit is present in the word. 741 * if no zero bit is present in the word.
747 */ 742 */
748 set = __ffz_word(bit, *p >> bit); 743 set = __ffz_word(bit, *p >> bit);
749 if (set >= size) 744 if (set >= size)
750 return size + offset; 745 return size + offset;
751 if (set < __BITOPS_WORDSIZE) 746 if (set < BITS_PER_LONG)
752 return set + offset; 747 return set + offset;
753 offset += __BITOPS_WORDSIZE; 748 offset += BITS_PER_LONG;
754 size -= __BITOPS_WORDSIZE; 749 size -= BITS_PER_LONG;
755 p++; 750 p++;
756 } 751 }
757 return offset + find_first_zero_bit(p, size); 752 return offset + find_first_zero_bit(p, size);
@@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr,
773 768
774 if (offset >= size) 769 if (offset >= size)
775 return size; 770 return size;
776 bit = offset & (__BITOPS_WORDSIZE - 1); 771 bit = offset & (BITS_PER_LONG - 1);
777 offset -= bit; 772 offset -= bit;
778 size -= offset; 773 size -= offset;
779 p = addr + offset / __BITOPS_WORDSIZE; 774 p = addr + offset / BITS_PER_LONG;
780 if (bit) { 775 if (bit) {
781 /* 776 /*
782 * __ffs_word returns __BITOPS_WORDSIZE 777 * __ffs_word returns BITS_PER_LONG
783 * if no one bit is present in the word. 778 * if no one bit is present in the word.
784 */ 779 */
785 set = __ffs_word(0, *p & (~0UL << bit)); 780 set = __ffs_word(0, *p & (~0UL << bit));
786 if (set >= size) 781 if (set >= size)
787 return size + offset; 782 return size + offset;
788 if (set < __BITOPS_WORDSIZE) 783 if (set < BITS_PER_LONG)
789 return set + offset; 784 return set + offset;
790 offset += __BITOPS_WORDSIZE; 785 offset += BITS_PER_LONG;
791 size -= __BITOPS_WORDSIZE; 786 size -= BITS_PER_LONG;
792 p++; 787 p++;
793 } 788 }
794 return offset + find_first_bit(p, size); 789 return offset + find_first_bit(p, size);
@@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
843 838
844 if (offset >= size) 839 if (offset >= size)
845 return size; 840 return size;
846 bit = offset & (__BITOPS_WORDSIZE - 1); 841 bit = offset & (BITS_PER_LONG - 1);
847 offset -= bit; 842 offset -= bit;
848 size -= offset; 843 size -= offset;
849 p = addr + offset / __BITOPS_WORDSIZE; 844 p = addr + offset / BITS_PER_LONG;
850 if (bit) { 845 if (bit) {
851 /* 846 /*
852 * s390 version of ffz returns __BITOPS_WORDSIZE 847 * s390 version of ffz returns BITS_PER_LONG
853 * if no zero bit is present in the word. 848 * if no zero bit is present in the word.
854 */ 849 */
855 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); 850 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
856 if (set >= size) 851 if (set >= size)
857 return size + offset; 852 return size + offset;
858 if (set < __BITOPS_WORDSIZE) 853 if (set < BITS_PER_LONG)
859 return set + offset; 854 return set + offset;
860 offset += __BITOPS_WORDSIZE; 855 offset += BITS_PER_LONG;
861 size -= __BITOPS_WORDSIZE; 856 size -= BITS_PER_LONG;
862 p++; 857 p++;
863 } 858 }
864 return offset + find_first_zero_bit_le(p, size); 859 return offset + find_first_zero_bit_le(p, size);
@@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
885 880
886 if (offset >= size) 881 if (offset >= size)
887 return size; 882 return size;
888 bit = offset & (__BITOPS_WORDSIZE - 1); 883 bit = offset & (BITS_PER_LONG - 1);
889 offset -= bit; 884 offset -= bit;
890 size -= offset; 885 size -= offset;
891 p = addr + offset / __BITOPS_WORDSIZE; 886 p = addr + offset / BITS_PER_LONG;
892 if (bit) { 887 if (bit) {
893 /* 888 /*
894 * s390 version of ffz returns __BITOPS_WORDSIZE 889 * s390 version of ffz returns BITS_PER_LONG
895 * if no zero bit is present in the word. 890 * if no zero bit is present in the word.
896 */ 891 */
897 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); 892 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
898 if (set >= size) 893 if (set >= size)
899 return size + offset; 894 return size + offset;
900 if (set < __BITOPS_WORDSIZE) 895 if (set < BITS_PER_LONG)
901 return set + offset; 896 return set + offset;
902 offset += __BITOPS_WORDSIZE; 897 offset += BITS_PER_LONG;
903 size -= __BITOPS_WORDSIZE; 898 size -= BITS_PER_LONG;
904 p++; 899 p++;
905 } 900 }
906 return offset + find_first_bit_le(p, size); 901 return offset + find_first_bit_le(p, size);
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index e6061617a50b..f201af8be580 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
220#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) 220#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
221 221
222extern struct ccw_device *ccw_device_probe_console(void); 222extern struct ccw_device *ccw_device_probe_console(void);
223extern int ccw_device_force_console(void); 223extern void ccw_device_wait_idle(struct ccw_device *);
224extern int ccw_device_force_console(struct ccw_device *);
224 225
225int ccw_device_siosl(struct ccw_device *); 226int ccw_device_siosl(struct ccw_device *);
226 227
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index ad2b924167d7..ffb898961c8d 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
296 return 0; 296 return 0;
297} 297}
298 298
299extern void wait_cons_dev(void);
300
301extern void css_schedule_reprobe(void); 299extern void css_schedule_reprobe(void);
302 300
303extern void reipl_ccw_dev(struct ccw_dev_id *id); 301extern void reipl_ccw_dev(struct ccw_dev_id *id);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index f8c6df6cd1f0..c1e7c646727c 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -70,6 +70,22 @@ typedef u32 compat_ulong_t;
70typedef u64 compat_u64; 70typedef u64 compat_u64;
71typedef u32 compat_uptr_t; 71typedef u32 compat_uptr_t;
72 72
73typedef struct {
74 u32 mask;
75 u32 addr;
76} __aligned(8) psw_compat_t;
77
78typedef struct {
79 psw_compat_t psw;
80 u32 gprs[NUM_GPRS];
81 u32 acrs[NUM_ACRS];
82 u32 orig_gpr2;
83} s390_compat_regs;
84
85typedef struct {
86 u32 gprs_high[NUM_GPRS];
87} s390_compat_regs_high;
88
73struct compat_timespec { 89struct compat_timespec {
74 compat_time_t tv_sec; 90 compat_time_t tv_sec;
75 s32 tv_nsec; 91 s32 tv_nsec;
@@ -124,18 +140,33 @@ struct compat_flock64 {
124}; 140};
125 141
126struct compat_statfs { 142struct compat_statfs {
127 s32 f_type; 143 u32 f_type;
128 s32 f_bsize; 144 u32 f_bsize;
129 s32 f_blocks; 145 u32 f_blocks;
130 s32 f_bfree; 146 u32 f_bfree;
131 s32 f_bavail; 147 u32 f_bavail;
132 s32 f_files; 148 u32 f_files;
133 s32 f_ffree; 149 u32 f_ffree;
150 compat_fsid_t f_fsid;
151 u32 f_namelen;
152 u32 f_frsize;
153 u32 f_flags;
154 u32 f_spare[4];
155};
156
157struct compat_statfs64 {
158 u32 f_type;
159 u32 f_bsize;
160 u64 f_blocks;
161 u64 f_bfree;
162 u64 f_bavail;
163 u64 f_files;
164 u64 f_ffree;
134 compat_fsid_t f_fsid; 165 compat_fsid_t f_fsid;
135 s32 f_namelen; 166 u32 f_namelen;
136 s32 f_frsize; 167 u32 f_frsize;
137 s32 f_flags; 168 u32 f_flags;
138 s32 f_spare[5]; 169 u32 f_spare[4];
139}; 170};
140 171
141#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff 172#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
@@ -248,8 +279,6 @@ static inline int is_compat_task(void)
248 return is_32bit_task(); 279 return is_32bit_task();
249} 280}
250 281
251#endif
252
253static inline void __user *arch_compat_alloc_user_space(long len) 282static inline void __user *arch_compat_alloc_user_space(long len)
254{ 283{
255 unsigned long stack; 284 unsigned long stack;
@@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len)
260 return (void __user *) (stack - len); 289 return (void __user *) (stack - len);
261} 290}
262 291
292#endif
293
263struct compat_ipc64_perm { 294struct compat_ipc64_perm {
264 compat_key_t key; 295 compat_key_t key;
265 __compat_uid32_t uid; 296 __compat_uid32_t uid;
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index f1eddd150dd7..c879fad404c8 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -12,6 +12,7 @@
12#ifndef _ASM_S390_CPU_MF_H 12#ifndef _ASM_S390_CPU_MF_H
13#define _ASM_S390_CPU_MF_H 13#define _ASM_S390_CPU_MF_H
14 14
15#include <linux/errno.h>
15#include <asm/facility.h> 16#include <asm/facility.h>
16 17
17#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */ 18#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index 8d4847191ecc..dc9200ca32ed 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -34,6 +34,8 @@ struct arsb {
34 u32 reserved[4]; 34 u32 reserved[4];
35} __packed; 35} __packed;
36 36
37#define EQC_WR_PROHIBIT 22
38
37struct msb { 39struct msb {
38 u8 fmt:4; 40 u8 fmt:4;
39 u8 oc:4; 41 u8 oc:4;
@@ -96,11 +98,13 @@ struct scm_device {
96#define OP_STATE_TEMP_ERR 2 98#define OP_STATE_TEMP_ERR 2
97#define OP_STATE_PERM_ERR 3 99#define OP_STATE_PERM_ERR 3
98 100
101enum scm_event {SCM_CHANGE, SCM_AVAIL};
102
99struct scm_driver { 103struct scm_driver {
100 struct device_driver drv; 104 struct device_driver drv;
101 int (*probe) (struct scm_device *scmdev); 105 int (*probe) (struct scm_device *scmdev);
102 int (*remove) (struct scm_device *scmdev); 106 int (*remove) (struct scm_device *scmdev);
103 void (*notify) (struct scm_device *scmdev); 107 void (*notify) (struct scm_device *scmdev, enum scm_event event);
104 void (*handler) (struct scm_device *scmdev, void *data, int error); 108 void (*handler) (struct scm_device *scmdev, void *data, int error);
105}; 109};
106 110
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1bfdf24b85a2..78f4f8711d58 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -119,6 +119,8 @@
119 */ 119 */
120 120
121#include <asm/ptrace.h> 121#include <asm/ptrace.h>
122#include <asm/compat.h>
123#include <asm/syscall.h>
122#include <asm/user.h> 124#include <asm/user.h>
123 125
124typedef s390_fp_regs elf_fpregset_t; 126typedef s390_fp_regs elf_fpregset_t;
@@ -180,18 +182,31 @@ extern unsigned long elf_hwcap;
180extern char elf_platform[]; 182extern char elf_platform[];
181#define ELF_PLATFORM (elf_platform) 183#define ELF_PLATFORM (elf_platform)
182 184
183#ifdef CONFIG_64BIT 185#ifndef CONFIG_COMPAT
186#define SET_PERSONALITY(ex) \
187do { \
188 set_personality(PER_LINUX | \
189 (current->personality & (~PER_MASK))); \
190 current_thread_info()->sys_call_table = \
191 (unsigned long) &sys_call_table; \
192} while (0)
193#else /* CONFIG_COMPAT */
184#define SET_PERSONALITY(ex) \ 194#define SET_PERSONALITY(ex) \
185do { \ 195do { \
186 if (personality(current->personality) != PER_LINUX32) \ 196 if (personality(current->personality) != PER_LINUX32) \
187 set_personality(PER_LINUX | \ 197 set_personality(PER_LINUX | \
188 (current->personality & ~PER_MASK)); \ 198 (current->personality & ~PER_MASK)); \
189 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ 199 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
190 set_thread_flag(TIF_31BIT); \ 200 set_thread_flag(TIF_31BIT); \
191 else \ 201 current_thread_info()->sys_call_table = \
202 (unsigned long) &sys_call_table_emu; \
203 } else { \
192 clear_thread_flag(TIF_31BIT); \ 204 clear_thread_flag(TIF_31BIT); \
205 current_thread_info()->sys_call_table = \
206 (unsigned long) &sys_call_table; \
207 } \
193} while (0) 208} while (0)
194#endif /* CONFIG_64BIT */ 209#endif /* CONFIG_COMPAT */
195 210
196#define STACK_RND_MASK 0x7ffUL 211#define STACK_RND_MASK 0x7ffUL
197 212
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 593753ee07f3..bd90359d6d22 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -114,7 +114,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
114#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ 114#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
115({ \ 115({ \
116 pte_t __pte = huge_ptep_get(__ptep); \ 116 pte_t __pte = huge_ptep_get(__ptep); \
117 if (pte_write(__pte)) { \ 117 if (huge_pte_write(__pte)) { \
118 huge_ptep_invalidate(__mm, __addr, __ptep); \ 118 huge_ptep_invalidate(__mm, __addr, __ptep); \
119 set_huge_pte_at(__mm, __addr, __ptep, \ 119 set_huge_pte_at(__mm, __addr, __ptep, \
120 huge_pte_wrprotect(__pte)); \ 120 huge_pte_wrprotect(__pte)); \
@@ -127,4 +127,58 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
127 huge_ptep_invalidate(vma->vm_mm, address, ptep); 127 huge_ptep_invalidate(vma->vm_mm, address, ptep);
128} 128}
129 129
130static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
131{
132 pte_t pte;
133 pmd_t pmd;
134
135 pmd = mk_pmd_phys(page_to_phys(page), pgprot);
136 pte_val(pte) = pmd_val(pmd);
137 return pte;
138}
139
140static inline int huge_pte_write(pte_t pte)
141{
142 pmd_t pmd;
143
144 pmd_val(pmd) = pte_val(pte);
145 return pmd_write(pmd);
146}
147
148static inline int huge_pte_dirty(pte_t pte)
149{
150 /* No dirty bit in the segment table entry. */
151 return 0;
152}
153
154static inline pte_t huge_pte_mkwrite(pte_t pte)
155{
156 pmd_t pmd;
157
158 pmd_val(pmd) = pte_val(pte);
159 pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
160 return pte;
161}
162
163static inline pte_t huge_pte_mkdirty(pte_t pte)
164{
165 /* No dirty bit in the segment table entry. */
166 return pte;
167}
168
169static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
170{
171 pmd_t pmd;
172
173 pmd_val(pmd) = pte_val(pte);
174 pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
175 return pte;
176}
177
178static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
179 pte_t *ptep)
180{
181 pmd_clear((pmd_t *) ptep);
182}
183
130#endif /* _ASM_S390_HUGETLB_H */ 184#endif /* _ASM_S390_HUGETLB_H */
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 27cb32185ce1..379d96e2105e 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
50#define ioremap_nocache(addr, size) ioremap(addr, size) 50#define ioremap_nocache(addr, size) ioremap(addr, size)
51#define ioremap_wc ioremap_nocache 51#define ioremap_wc ioremap_nocache
52 52
53/* TODO: s390 cannot support io_remap_pfn_range... */
54#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
55 remap_pfn_range(vma, vaddr, pfn, size, prot)
56
57static inline void __iomem *ioremap(unsigned long offset, unsigned long size) 53static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
58{ 54{
59 return (void __iomem *) offset; 55 return (void __iomem *) offset;
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 05333b7f0469..6c1801235db9 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
140struct zpci_dev *zpci_alloc_device(void); 140struct zpci_dev *zpci_alloc_device(void);
141int zpci_create_device(struct zpci_dev *); 141int zpci_create_device(struct zpci_dev *);
142int zpci_enable_device(struct zpci_dev *); 142int zpci_enable_device(struct zpci_dev *);
143int zpci_disable_device(struct zpci_dev *);
143void zpci_stop_device(struct zpci_dev *); 144void zpci_stop_device(struct zpci_dev *);
144void zpci_free_device(struct zpci_dev *); 145void zpci_free_device(struct zpci_dev *);
145int zpci_scan_device(struct zpci_dev *); 146int zpci_scan_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 6bbec4265b6e..1ca5d1047c71 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id;
7extern debug_info_t *pci_debug_err_id; 7extern debug_info_t *pci_debug_err_id;
8 8
9#ifdef CONFIG_PCI_DEBUG 9#ifdef CONFIG_PCI_DEBUG
10#define zpci_dbg(fmt, args...) \ 10#define zpci_dbg(imp, fmt, args...) \
11 do { \ 11 debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
12 if (pci_debug_msg_id->level >= 2) \
13 debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\
14 } while (0)
15 12
16#else /* !CONFIG_PCI_DEBUG */ 13#else /* !CONFIG_PCI_DEBUG */
17#define zpci_dbg(fmt, args...) do { } while (0) 14#define zpci_dbg(imp, fmt, args...) do { } while (0)
18#endif 15#endif
19 16
20#define zpci_err(text...) \ 17#define zpci_err(text...) \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 1486a98d5dad..e6a2bdd4d705 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -1,10 +1,6 @@
1#ifndef _ASM_S390_PCI_INSN_H 1#ifndef _ASM_S390_PCI_INSN_H
2#define _ASM_S390_PCI_INSN_H 2#define _ASM_S390_PCI_INSN_H
3 3
4#include <linux/delay.h>
5
6#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
7
8/* Load/Store status codes */ 4/* Load/Store status codes */
9#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4 5#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
10#define ZPCI_PCI_ST_FUNC_IN_ERR 8 6#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@@ -82,199 +78,12 @@ struct zpci_fib {
82 u64 reserved7; 78 u64 reserved7;
83} __packed; 79} __packed;
84 80
85/* Modify PCI Function Controls */
86static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
87{
88 u8 cc;
89
90 asm volatile (
91 " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
92 " ipm %[cc]\n"
93 " srl %[cc],28\n"
94 : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
95 : : "cc");
96 *status = req >> 24 & 0xff;
97 return cc;
98}
99
100static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
101{
102 u8 cc, status;
103
104 do {
105 cc = __mpcifc(req, fib, &status);
106 if (cc == 2)
107 msleep(ZPCI_INSN_BUSY_DELAY);
108 } while (cc == 2);
109
110 if (cc)
111 printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
112 __func__, cc, status);
113 return (cc) ? -EIO : 0;
114}
115
116/* Refresh PCI Translations */
117static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
118{
119 register u64 __addr asm("2") = addr;
120 register u64 __range asm("3") = range;
121 u8 cc;
122
123 asm volatile (
124 " .insn rre,0xb9d30000,%[fn],%[addr]\n"
125 " ipm %[cc]\n"
126 " srl %[cc],28\n"
127 : [cc] "=d" (cc), [fn] "+d" (fn)
128 : [addr] "d" (__addr), "d" (__range)
129 : "cc");
130 *status = fn >> 24 & 0xff;
131 return cc;
132}
133
134static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
135{
136 u8 cc, status;
137
138 do {
139 cc = __rpcit(fn, addr, range, &status);
140 if (cc == 2)
141 udelay(ZPCI_INSN_BUSY_DELAY);
142 } while (cc == 2);
143
144 if (cc)
145 printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
146 __func__, cc, status, addr, range);
147 return (cc) ? -EIO : 0;
148}
149
150/* Store PCI function controls */
151static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
152{
153 u64 fn = (u64) handle << 32 | space << 16;
154 u8 cc;
155
156 asm volatile (
157 " .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
158 " ipm %[cc]\n"
159 " srl %[cc],28\n"
160 : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
161 : : "cc");
162 *status = fn >> 24 & 0xff;
163 return cc;
164}
165
166/* Set Interruption Controls */
167static inline void sic_instr(u16 ctl, char *unused, u8 isc)
168{
169 asm volatile (
170 " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
171 : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
172}
173
174/* PCI Load */
175static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
176{
177 register u64 __req asm("2") = req;
178 register u64 __offset asm("3") = offset;
179 u64 __data;
180 u8 cc;
181
182 asm volatile (
183 " .insn rre,0xb9d20000,%[data],%[req]\n"
184 " ipm %[cc]\n"
185 " srl %[cc],28\n"
186 : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
187 : "d" (__offset)
188 : "cc");
189 *status = __req >> 24 & 0xff;
190 *data = __data;
191 return cc;
192}
193
194static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
195{
196 u8 cc, status;
197
198 do {
199 cc = __pcilg(data, req, offset, &status);
200 if (cc == 2)
201 udelay(ZPCI_INSN_BUSY_DELAY);
202 } while (cc == 2);
203
204 if (cc) {
205 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
206 __func__, cc, status, req, offset);
207 /* TODO: on IO errors set data to 0xff...
208 * here or in users of pcilg (le conversion)?
209 */
210 }
211 return (cc) ? -EIO : 0;
212}
213
214/* PCI Store */
215static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
216{
217 register u64 __req asm("2") = req;
218 register u64 __offset asm("3") = offset;
219 u8 cc;
220
221 asm volatile (
222 " .insn rre,0xb9d00000,%[data],%[req]\n"
223 " ipm %[cc]\n"
224 " srl %[cc],28\n"
225 : [cc] "=d" (cc), [req] "+d" (__req)
226 : "d" (__offset), [data] "d" (data)
227 : "cc");
228 *status = __req >> 24 & 0xff;
229 return cc;
230}
231
232static inline int pcistg_instr(u64 data, u64 req, u64 offset)
233{
234 u8 cc, status;
235
236 do {
237 cc = __pcistg(data, req, offset, &status);
238 if (cc == 2)
239 udelay(ZPCI_INSN_BUSY_DELAY);
240 } while (cc == 2);
241
242 if (cc)
243 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
244 __func__, cc, status, req, offset);
245 return (cc) ? -EIO : 0;
246}
247
248/* PCI Store Block */
249static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
250{
251 u8 cc;
252
253 asm volatile (
254 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
255 " ipm %[cc]\n"
256 " srl %[cc],28\n"
257 : [cc] "=d" (cc), [req] "+d" (req)
258 : [offset] "d" (offset), [data] "Q" (*data)
259 : "cc");
260 *status = req >> 24 & 0xff;
261 return cc;
262}
263
264static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
265{
266 u8 cc, status;
267
268 do {
269 cc = __pcistb(data, req, offset, &status);
270 if (cc == 2)
271 udelay(ZPCI_INSN_BUSY_DELAY);
272 } while (cc == 2);
273 81
274 if (cc) 82int s390pci_mod_fc(u64 req, struct zpci_fib *fib);
275 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", 83int s390pci_refresh_trans(u64 fn, u64 addr, u64 range);
276 __func__, cc, status, req, offset); 84int s390pci_load(u64 *data, u64 req, u64 offset);
277 return (cc) ? -EIO : 0; 85int s390pci_store(u64 data, u64 req, u64 offset);
278} 86int s390pci_store_block(const u64 *data, u64 req, u64 offset);
87void set_irq_ctrl(u16 ctl, char *unused, u8 isc);
279 88
280#endif 89#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 5fd81f31d6c7..83a9caa6ae53 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
36 u64 data; \ 36 u64 data; \
37 int rc; \ 37 int rc; \
38 \ 38 \
39 rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \ 39 rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \
40 if (rc) \ 40 if (rc) \
41 data = -1ULL; \ 41 data = -1ULL; \
42 return (RETTYPE) data; \ 42 return (RETTYPE) data; \
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
50 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ 50 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
51 u64 data = (VALTYPE) val; \ 51 u64 data = (VALTYPE) val; \
52 \ 52 \
53 pcistg_instr(data, req, ZPCI_OFFSET(addr)); \ 53 s390pci_store(data, req, ZPCI_OFFSET(addr)); \
54} 54}
55 55
56zpci_read(8, u64) 56zpci_read(8, u64)
@@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
83 val = 0; /* let FW report error */ 83 val = 0; /* let FW report error */
84 break; 84 break;
85 } 85 }
86 return pcistg_instr(val, req, offset); 86 return s390pci_store(val, req, offset);
87} 87}
88 88
89static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) 89static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
90{ 90{
91 u64 data; 91 u64 data;
92 u8 cc; 92 int cc;
93
94 cc = s390pci_load(&data, req, offset);
95 if (cc)
96 goto out;
93 97
94 cc = pcilg_instr(&data, req, offset);
95 switch (len) { 98 switch (len) {
96 case 1: 99 case 1:
97 *((u8 *) dst) = (u8) data; 100 *((u8 *) dst) = (u8) data;
@@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
106 *((u64 *) dst) = (u64) data; 109 *((u64 *) dst) = (u64) data;
107 break; 110 break;
108 } 111 }
112out:
109 return cc; 113 return cc;
110} 114}
111 115
112static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) 116static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
113{ 117{
114 return pcistb_instr(data, req, offset); 118 return s390pci_store_block(data, req, offset);
115} 119}
116 120
117static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) 121static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 4a2930844d43..b4622915bd15 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -57,6 +57,10 @@ extern unsigned long zero_page_mask;
57 (((unsigned long)(vaddr)) &zero_page_mask)))) 57 (((unsigned long)(vaddr)) &zero_page_mask))))
58#define __HAVE_COLOR_ZERO_PAGE 58#define __HAVE_COLOR_ZERO_PAGE
59 59
60/* TODO: s390 cannot support io_remap_pfn_range... */
61#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
62 remap_pfn_range(vma, vaddr, pfn, size, prot)
63
60#endif /* !__ASSEMBLY__ */ 64#endif /* !__ASSEMBLY__ */
61 65
62/* 66/*
@@ -344,6 +348,7 @@ extern unsigned long MODULES_END;
344#define _REGION3_ENTRY_CO 0x100 /* change-recording override */ 348#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
345 349
346/* Bits in the segment table entry */ 350/* Bits in the segment table entry */
351#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
347#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 352#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
348#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 353#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
349#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 354#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
@@ -419,6 +424,13 @@ extern unsigned long MODULES_END;
419#define __S110 PAGE_RW 424#define __S110 PAGE_RW
420#define __S111 PAGE_RW 425#define __S111 PAGE_RW
421 426
427/*
428 * Segment entry (large page) protection definitions.
429 */
430#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
431#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
432#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
433
422static inline int mm_exclusive(struct mm_struct *mm) 434static inline int mm_exclusive(struct mm_struct *mm)
423{ 435{
424 return likely(mm == current->active_mm && 436 return likely(mm == current->active_mm &&
@@ -759,6 +771,8 @@ void gmap_disable(struct gmap *gmap);
759int gmap_map_segment(struct gmap *gmap, unsigned long from, 771int gmap_map_segment(struct gmap *gmap, unsigned long from,
760 unsigned long to, unsigned long length); 772 unsigned long to, unsigned long length);
761int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 773int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
774unsigned long __gmap_translate(unsigned long address, struct gmap *);
775unsigned long gmap_translate(unsigned long address, struct gmap *);
762unsigned long __gmap_fault(unsigned long address, struct gmap *); 776unsigned long __gmap_fault(unsigned long address, struct gmap *);
763unsigned long gmap_fault(unsigned long address, struct gmap *); 777unsigned long gmap_fault(unsigned long address, struct gmap *);
764void gmap_discard(unsigned long from, unsigned long to, struct gmap *); 778void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
@@ -907,26 +921,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
907#ifdef CONFIG_HUGETLB_PAGE 921#ifdef CONFIG_HUGETLB_PAGE
908static inline pte_t pte_mkhuge(pte_t pte) 922static inline pte_t pte_mkhuge(pte_t pte)
909{ 923{
910 /*
911 * PROT_NONE needs to be remapped from the pte type to the ste type.
912 * The HW invalid bit is also different for pte and ste. The pte
913 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
914 * bit, so we don't have to clear it.
915 */
916 if (pte_val(pte) & _PAGE_INVALID) {
917 if (pte_val(pte) & _PAGE_SWT)
918 pte_val(pte) |= _HPAGE_TYPE_NONE;
919 pte_val(pte) |= _SEGMENT_ENTRY_INV;
920 }
921 /*
922 * Clear SW pte bits, there are no SW bits in a segment table entry.
923 */
924 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC |
925 _PAGE_SWR | _PAGE_SWW);
926 /*
927 * Also set the change-override bit because we don't need dirty bit
928 * tracking for hugetlbfs pages.
929 */
930 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); 924 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
931 return pte; 925 return pte;
932} 926}
@@ -1271,31 +1265,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1271 } 1265 }
1272} 1266}
1273 1267
1274#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1268#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1275
1276#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
1277#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
1278#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
1279
1280#define __HAVE_ARCH_PGTABLE_DEPOSIT
1281extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
1282
1283#define __HAVE_ARCH_PGTABLE_WITHDRAW
1284extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
1285
1286static inline int pmd_trans_splitting(pmd_t pmd)
1287{
1288 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1289}
1290
1291static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1292 pmd_t *pmdp, pmd_t entry)
1293{
1294 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
1295 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1296 *pmdp = entry;
1297}
1298
1299static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1269static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1300{ 1270{
1301 /* 1271 /*
@@ -1316,10 +1286,11 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1316 return pmd; 1286 return pmd;
1317} 1287}
1318 1288
1319static inline pmd_t pmd_mkhuge(pmd_t pmd) 1289static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1320{ 1290{
1321 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1291 pmd_t __pmd;
1322 return pmd; 1292 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1293 return __pmd;
1323} 1294}
1324 1295
1325static inline pmd_t pmd_mkwrite(pmd_t pmd) 1296static inline pmd_t pmd_mkwrite(pmd_t pmd)
@@ -1329,6 +1300,34 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
1329 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; 1300 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
1330 return pmd; 1301 return pmd;
1331} 1302}
1303#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1304
1305#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1306
1307#define __HAVE_ARCH_PGTABLE_DEPOSIT
1308extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
1309
1310#define __HAVE_ARCH_PGTABLE_WITHDRAW
1311extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
1312
1313static inline int pmd_trans_splitting(pmd_t pmd)
1314{
1315 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1316}
1317
1318static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1319 pmd_t *pmdp, pmd_t entry)
1320{
1321 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
1322 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1323 *pmdp = entry;
1324}
1325
1326static inline pmd_t pmd_mkhuge(pmd_t pmd)
1327{
1328 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1329 return pmd;
1330}
1332 1331
1333static inline pmd_t pmd_wrprotect(pmd_t pmd) 1332static inline pmd_t pmd_wrprotect(pmd_t pmd)
1334{ 1333{
@@ -1425,13 +1424,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1425 } 1424 }
1426} 1425}
1427 1426
1428static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1429{
1430 pmd_t __pmd;
1431 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1432 return __pmd;
1433}
1434
1435#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) 1427#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1436#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1428#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1437 1429
@@ -1531,7 +1523,8 @@ extern int s390_enable_sie(void);
1531/* 1523/*
1532 * No page table caches to initialise 1524 * No page table caches to initialise
1533 */ 1525 */
1534#define pgtable_cache_init() do { } while (0) 1526static inline void pgtable_cache_init(void) { }
1527static inline void check_pgt_cache(void) { }
1535 1528
1536#include <asm-generic/pgtable.h> 1529#include <asm-generic/pgtable.h>
1537 1530
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 94e749c90230..6b499870662f 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
161 161
162extern void show_code(struct pt_regs *regs); 162extern void show_code(struct pt_regs *regs);
163extern void print_fn_code(unsigned char *code, unsigned long len); 163extern void print_fn_code(unsigned char *code, unsigned long len);
164extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]); 164extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
165 unsigned int len);
165 166
166unsigned long get_wchan(struct task_struct *p); 167unsigned long get_wchan(struct task_struct *p);
167#define task_pt_regs(tsk) ((struct pt_regs *) \ 168#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 3ee5da3bc10c..559512a455da 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -9,9 +9,7 @@
9#include <uapi/asm/ptrace.h> 9#include <uapi/asm/ptrace.h>
10 10
11#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
12#ifndef __s390x__ 12
13#else /* __s390x__ */
14#endif /* __s390x__ */
15extern long psw_kernel_bits; 13extern long psw_kernel_bits;
16extern long psw_user_bits; 14extern long psw_user_bits;
17 15
@@ -77,8 +75,6 @@ struct per_struct_kernel {
77#define PER_CONTROL_SUSPENSION 0x00400000UL 75#define PER_CONTROL_SUSPENSION 0x00400000UL
78#define PER_CONTROL_ALTERATION 0x00200000UL 76#define PER_CONTROL_ALTERATION 0x00200000UL
79 77
80#ifdef __s390x__
81#endif /* __s390x__ */
82/* 78/*
83 * These are defined as per linux/ptrace.h, which see. 79 * These are defined as per linux/ptrace.h, which see.
84 */ 80 */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index fe7b99759e12..cd29d2f4e4f3 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -23,6 +23,7 @@
23 * type here is what we want [need] for both 32 bit and 64 bit systems. 23 * type here is what we want [need] for both 32 bit and 64 bit systems.
24 */ 24 */
25extern const unsigned int sys_call_table[]; 25extern const unsigned int sys_call_table[];
26extern const unsigned int sys_call_table_emu[];
26 27
27static inline long syscall_get_nr(struct task_struct *task, 28static inline long syscall_get_nr(struct task_struct *task,
28 struct pt_regs *regs) 29 struct pt_regs *regs)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 9e2cfe0349c3..eb5f64d26d06 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -14,13 +14,8 @@
14#define THREAD_ORDER 1 14#define THREAD_ORDER 1
15#define ASYNC_ORDER 1 15#define ASYNC_ORDER 1
16#else /* CONFIG_64BIT */ 16#else /* CONFIG_64BIT */
17#ifndef __SMALL_STACK
18#define THREAD_ORDER 2 17#define THREAD_ORDER 2
19#define ASYNC_ORDER 2 18#define ASYNC_ORDER 2
20#else
21#define THREAD_ORDER 1
22#define ASYNC_ORDER 1
23#endif
24#endif /* CONFIG_64BIT */ 19#endif /* CONFIG_64BIT */
25 20
26#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
@@ -41,6 +36,7 @@ struct thread_info {
41 struct task_struct *task; /* main task structure */ 36 struct task_struct *task; /* main task structure */
42 struct exec_domain *exec_domain; /* execution domain */ 37 struct exec_domain *exec_domain; /* execution domain */
43 unsigned long flags; /* low level flags */ 38 unsigned long flags; /* low level flags */
39 unsigned long sys_call_table; /* System call table address */
44 unsigned int cpu; /* current CPU */ 40 unsigned int cpu; /* current CPU */
45 int preempt_count; /* 0 => preemptable, <0 => BUG */ 41 int preempt_count; /* 0 => preemptable, <0 => BUG */
46 struct restart_block restart_block; 42 struct restart_block restart_block;
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 1d8fe2b17ef6..6b32af30878c 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
74 74
75static inline void __tlb_flush_mm(struct mm_struct * mm) 75static inline void __tlb_flush_mm(struct mm_struct * mm)
76{ 76{
77 if (unlikely(cpumask_empty(mm_cpumask(mm))))
78 return;
79 /* 77 /*
80 * If the machine has IDTE we prefer to do a per mm flush 78 * If the machine has IDTE we prefer to do a per mm flush
81 * on all cpus instead of doing a local flush if the mm 79 * on all cpus instead of doing a local flush if the mm
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index a5ca214b34fd..3aa9f1ec5b29 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -215,12 +215,6 @@ typedef struct
215 unsigned long addr; 215 unsigned long addr;
216} __attribute__ ((aligned(8))) psw_t; 216} __attribute__ ((aligned(8))) psw_t;
217 217
218typedef struct
219{
220 __u32 mask;
221 __u32 addr;
222} __attribute__ ((aligned(8))) psw_compat_t;
223
224#ifndef __s390x__ 218#ifndef __s390x__
225 219
226#define PSW_MASK_PER 0x40000000UL 220#define PSW_MASK_PER 0x40000000UL
@@ -295,20 +289,6 @@ typedef struct
295 unsigned long orig_gpr2; 289 unsigned long orig_gpr2;
296} s390_regs; 290} s390_regs;
297 291
298typedef struct
299{
300 psw_compat_t psw;
301 __u32 gprs[NUM_GPRS];
302 __u32 acrs[NUM_ACRS];
303 __u32 orig_gpr2;
304} s390_compat_regs;
305
306typedef struct
307{
308 __u32 gprs_high[NUM_GPRS];
309} s390_compat_regs_high;
310
311
312/* 292/*
313 * Now for the user space program event recording (trace) definitions. 293 * Now for the user space program event recording (trace) definitions.
314 * The following structures are used only for the ptrace interface, don't 294 * The following structures are used only for the ptrace interface, don't
diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h
index 5acca0a34c20..a61d538756f2 100644
--- a/arch/s390/include/uapi/asm/statfs.h
+++ b/arch/s390/include/uapi/asm/statfs.h
@@ -7,9 +7,6 @@
7#ifndef _S390_STATFS_H 7#ifndef _S390_STATFS_H
8#define _S390_STATFS_H 8#define _S390_STATFS_H
9 9
10#ifndef __s390x__
11#include <asm-generic/statfs.h>
12#else
13/* 10/*
14 * We can't use <asm-generic/statfs.h> because in 64-bit mode 11 * We can't use <asm-generic/statfs.h> because in 64-bit mode
15 * we mix ints of different sizes in our struct statfs. 12 * we mix ints of different sizes in our struct statfs.
@@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t;
21#endif 18#endif
22 19
23struct statfs { 20struct statfs {
24 int f_type; 21 unsigned int f_type;
25 int f_bsize; 22 unsigned int f_bsize;
26 long f_blocks; 23 unsigned long f_blocks;
27 long f_bfree; 24 unsigned long f_bfree;
28 long f_bavail; 25 unsigned long f_bavail;
29 long f_files; 26 unsigned long f_files;
30 long f_ffree; 27 unsigned long f_ffree;
31 __kernel_fsid_t f_fsid; 28 __kernel_fsid_t f_fsid;
32 int f_namelen; 29 unsigned int f_namelen;
33 int f_frsize; 30 unsigned int f_frsize;
34 int f_flags; 31 unsigned int f_flags;
35 int f_spare[4]; 32 unsigned int f_spare[4];
36}; 33};
37 34
38struct statfs64 { 35struct statfs64 {
39 int f_type; 36 unsigned int f_type;
40 int f_bsize; 37 unsigned int f_bsize;
41 long f_blocks; 38 unsigned long f_blocks;
42 long f_bfree; 39 unsigned long f_bfree;
43 long f_bavail; 40 unsigned long f_bavail;
44 long f_files; 41 unsigned long f_files;
45 long f_ffree; 42 unsigned long f_ffree;
46 __kernel_fsid_t f_fsid; 43 __kernel_fsid_t f_fsid;
47 int f_namelen; 44 unsigned int f_namelen;
48 int f_frsize; 45 unsigned int f_frsize;
49 int f_flags; 46 unsigned int f_flags;
50 int f_spare[4]; 47 unsigned int f_spare[4];
51}; 48};
52 49
53struct compat_statfs64 {
54 __u32 f_type;
55 __u32 f_bsize;
56 __u64 f_blocks;
57 __u64 f_bfree;
58 __u64 f_bavail;
59 __u64 f_files;
60 __u64 f_ffree;
61 __kernel_fsid_t f_fsid;
62 __u32 f_namelen;
63 __u32 f_frsize;
64 __u32 f_flags;
65 __u32 f_spare[4];
66};
67
68#endif /* __s390x__ */
69#endif 50#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2ac311ef5c9b..1386fcaf4ef6 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -14,16 +14,25 @@ endif
14CFLAGS_smp.o := -Wno-nonnull 14CFLAGS_smp.o := -Wno-nonnull
15 15
16# 16#
17# Disable tailcall optimizations for stack / callchain walking functions
18# since this might generate broken code when accessing register 15 and
19# passing its content to other functions.
20#
21CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
22CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
23
24#
17# Pass UTS_MACHINE for user_regset definition 25# Pass UTS_MACHINE for user_regset definition
18# 26#
19CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' 27CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
20 28
21CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
22 30
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ 31obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ 32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ 33obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o
26 sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
35obj-y += dumpstack.o
27 36
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 37obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 38obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fface87056eb..7a82f9f70100 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -35,6 +35,7 @@ int main(void)
35 DEFINE(__TI_task, offsetof(struct thread_info, task)); 35 DEFINE(__TI_task, offsetof(struct thread_info, task));
36 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); 36 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
37 DEFINE(__TI_flags, offsetof(struct thread_info, flags)); 37 DEFINE(__TI_flags, offsetof(struct thread_info, flags));
38 DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
38 DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); 39 DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
39 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); 40 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
40 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); 41 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6de049fbe62d..c439ac9ced09 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -362,6 +362,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
362 /* set extra registers only for synchronous signals */ 362 /* set extra registers only for synchronous signals */
363 regs->gprs[4] = regs->int_code & 127; 363 regs->gprs[4] = regs->int_code & 127;
364 regs->gprs[5] = regs->int_parm_long; 364 regs->gprs[5] = regs->int_parm_long;
365 regs->gprs[6] = task_thread_info(current)->last_break;
365 } 366 }
366 367
367 /* Place signal number on stack to allow backtrace from handler. */ 368 /* Place signal number on stack to allow backtrace from handler. */
@@ -421,6 +422,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
421 regs->gprs[2] = map_signal(sig); 422 regs->gprs[2] = map_signal(sig);
422 regs->gprs[3] = (__force __u64) &frame->info; 423 regs->gprs[3] = (__force __u64) &frame->info;
423 regs->gprs[4] = (__force __u64) &frame->uc; 424 regs->gprs[4] = (__force __u64) &frame->uc;
425 regs->gprs[5] = task_thread_info(current)->last_break;
424 return 0; 426 return 0;
425 427
426give_sigsegv: 428give_sigsegv:
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 3ad5e9540160..7f4a4a8c847c 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1696,14 +1696,15 @@ static struct insn *find_insn(unsigned char *code)
1696 * insn_to_mnemonic - decode an s390 instruction 1696 * insn_to_mnemonic - decode an s390 instruction
1697 * @instruction: instruction to decode 1697 * @instruction: instruction to decode
1698 * @buf: buffer to fill with mnemonic 1698 * @buf: buffer to fill with mnemonic
1699 * @len: length of buffer
1699 * 1700 *
1700 * Decode the instruction at @instruction and store the corresponding 1701 * Decode the instruction at @instruction and store the corresponding
1701 * mnemonic into @buf. 1702 * mnemonic into @buf of length @len.
1702 * @buf is left unchanged if the instruction could not be decoded. 1703 * @buf is left unchanged if the instruction could not be decoded.
1703 * Returns: 1704 * Returns:
1704 * %0 on success, %-ENOENT if the instruction was not found. 1705 * %0 on success, %-ENOENT if the instruction was not found.
1705 */ 1706 */
1706int insn_to_mnemonic(unsigned char *instruction, char buf[8]) 1707int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
1707{ 1708{
1708 struct insn *insn; 1709 struct insn *insn;
1709 1710
@@ -1711,10 +1712,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8])
1711 if (!insn) 1712 if (!insn)
1712 return -ENOENT; 1713 return -ENOENT;
1713 if (insn->name[0] == '\0') 1714 if (insn->name[0] == '\0')
1714 snprintf(buf, 8, "%s", 1715 snprintf(buf, len, "%s",
1715 long_insn_name[(int) insn->name[1]]); 1716 long_insn_name[(int) insn->name[1]]);
1716 else 1717 else
1717 snprintf(buf, 8, "%.5s", insn->name); 1718 snprintf(buf, len, "%.5s", insn->name);
1718 return 0; 1719 return 0;
1719} 1720}
1720EXPORT_SYMBOL_GPL(insn_to_mnemonic); 1721EXPORT_SYMBOL_GPL(insn_to_mnemonic);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
new file mode 100644
index 000000000000..03dce39d01ee
--- /dev/null
+++ b/arch/s390/kernel/dumpstack.c
@@ -0,0 +1,236 @@
1/*
2 * Stack dumping functions
3 *
4 * Copyright IBM Corp. 1999, 2013
5 */
6
7#include <linux/kallsyms.h>
8#include <linux/hardirq.h>
9#include <linux/kprobes.h>
10#include <linux/utsname.h>
11#include <linux/export.h>
12#include <linux/kdebug.h>
13#include <linux/ptrace.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <asm/processor.h>
17#include <asm/debug.h>
18#include <asm/ipl.h>
19
20#ifndef CONFIG_64BIT
21#define LONG "%08lx "
22#define FOURLONG "%08lx %08lx %08lx %08lx\n"
23static int kstack_depth_to_print = 12;
24#else /* CONFIG_64BIT */
25#define LONG "%016lx "
26#define FOURLONG "%016lx %016lx %016lx %016lx\n"
27static int kstack_depth_to_print = 20;
28#endif /* CONFIG_64BIT */
29
30/*
31 * For show_trace we have tree different stack to consider:
32 * - the panic stack which is used if the kernel stack has overflown
33 * - the asynchronous interrupt stack (cpu related)
34 * - the synchronous kernel stack (process related)
35 * The stack trace can start at any of the three stack and can potentially
36 * touch all of them. The order is: panic stack, async stack, sync stack.
37 */
38static unsigned long
39__show_trace(unsigned long sp, unsigned long low, unsigned long high)
40{
41 struct stack_frame *sf;
42 struct pt_regs *regs;
43
44 while (1) {
45 sp = sp & PSW_ADDR_INSN;
46 if (sp < low || sp > high - sizeof(*sf))
47 return sp;
48 sf = (struct stack_frame *) sp;
49 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
50 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
51 /* Follow the backchain. */
52 while (1) {
53 low = sp;
54 sp = sf->back_chain & PSW_ADDR_INSN;
55 if (!sp)
56 break;
57 if (sp <= low || sp > high - sizeof(*sf))
58 return sp;
59 sf = (struct stack_frame *) sp;
60 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
61 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
62 }
63 /* Zero backchain detected, check for interrupt frame. */
64 sp = (unsigned long) (sf + 1);
65 if (sp <= low || sp > high - sizeof(*regs))
66 return sp;
67 regs = (struct pt_regs *) sp;
68 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
69 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
70 low = sp;
71 sp = regs->gprs[15];
72 }
73}
74
75static void show_trace(struct task_struct *task, unsigned long *stack)
76{
77 register unsigned long __r15 asm ("15");
78 unsigned long sp;
79
80 sp = (unsigned long) stack;
81 if (!sp)
82 sp = task ? task->thread.ksp : __r15;
83 printk("Call Trace:\n");
84#ifdef CONFIG_CHECK_STACK
85 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
86 S390_lowcore.panic_stack);
87#endif
88 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
89 S390_lowcore.async_stack);
90 if (task)
91 __show_trace(sp, (unsigned long) task_stack_page(task),
92 (unsigned long) task_stack_page(task) + THREAD_SIZE);
93 else
94 __show_trace(sp, S390_lowcore.thread_info,
95 S390_lowcore.thread_info + THREAD_SIZE);
96 if (!task)
97 task = current;
98 debug_show_held_locks(task);
99}
100
101void show_stack(struct task_struct *task, unsigned long *sp)
102{
103 register unsigned long *__r15 asm ("15");
104 unsigned long *stack;
105 int i;
106
107 if (!sp)
108 stack = task ? (unsigned long *) task->thread.ksp : __r15;
109 else
110 stack = sp;
111
112 for (i = 0; i < kstack_depth_to_print; i++) {
113 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
114 break;
115 if ((i * sizeof(long) % 32) == 0)
116 printk("%s ", i == 0 ? "" : "\n");
117 printk(LONG, *stack++);
118 }
119 printk("\n");
120 show_trace(task, sp);
121}
122
123static void show_last_breaking_event(struct pt_regs *regs)
124{
125#ifdef CONFIG_64BIT
126 printk("Last Breaking-Event-Address:\n");
127 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
128 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
129#endif
130}
131
132/*
133 * The architecture-independent dump_stack generator
134 */
135void dump_stack(void)
136{
137 printk("CPU: %d %s %s %.*s\n",
138 task_thread_info(current)->cpu, print_tainted(),
139 init_utsname()->release,
140 (int)strcspn(init_utsname()->version, " "),
141 init_utsname()->version);
142 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
143 current->comm, current->pid, current,
144 (void *) current->thread.ksp);
145 show_stack(NULL, NULL);
146}
147EXPORT_SYMBOL(dump_stack);
148
149static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
150{
151 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
152}
153
154void show_registers(struct pt_regs *regs)
155{
156 char *mode;
157
158 mode = user_mode(regs) ? "User" : "Krnl";
159 printk("%s PSW : %p %p",
160 mode, (void *) regs->psw.mask,
161 (void *) regs->psw.addr);
162 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
163 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
164 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
165 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
166 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
167 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
168 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
169 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
170#ifdef CONFIG_64BIT
171 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
172#endif
173 printk("\n%s GPRS: " FOURLONG, mode,
174 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
175 printk(" " FOURLONG,
176 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
177 printk(" " FOURLONG,
178 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
179 printk(" " FOURLONG,
180 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
181 show_code(regs);
182}
183
184void show_regs(struct pt_regs *regs)
185{
186 printk("CPU: %d %s %s %.*s\n",
187 task_thread_info(current)->cpu, print_tainted(),
188 init_utsname()->release,
189 (int)strcspn(init_utsname()->version, " "),
190 init_utsname()->version);
191 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
192 current->comm, current->pid, current,
193 (void *) current->thread.ksp);
194 show_registers(regs);
195 /* Show stack backtrace if pt_regs is from kernel mode */
196 if (!user_mode(regs))
197 show_trace(NULL, (unsigned long *) regs->gprs[15]);
198 show_last_breaking_event(regs);
199}
200
201static DEFINE_SPINLOCK(die_lock);
202
203void die(struct pt_regs *regs, const char *str)
204{
205 static int die_counter;
206
207 oops_enter();
208 lgr_info_log();
209 debug_stop_all();
210 console_verbose();
211 spin_lock_irq(&die_lock);
212 bust_spinlocks(1);
213 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
214#ifdef CONFIG_PREEMPT
215 printk("PREEMPT ");
216#endif
217#ifdef CONFIG_SMP
218 printk("SMP ");
219#endif
220#ifdef CONFIG_DEBUG_PAGEALLOC
221 printk("DEBUG_PAGEALLOC");
222#endif
223 printk("\n");
224 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
225 print_modules();
226 show_regs(regs);
227 bust_spinlocks(0);
228 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
229 spin_unlock_irq(&die_lock);
230 if (in_interrupt())
231 panic("Fatal exception in interrupt");
232 if (panic_on_oops)
233 panic("Fatal exception: panic_on_oops");
234 oops_exit();
235 do_exit(SIGSEGV);
236}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 550228523267..4d5e6f8a7978 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -45,6 +45,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
45 45
46STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 46STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
47STACK_SIZE = 1 << STACK_SHIFT 47STACK_SIZE = 1 << STACK_SHIFT
48STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
48 49
49#define BASED(name) name-system_call(%r13) 50#define BASED(name) name-system_call(%r13)
50 51
@@ -97,10 +98,10 @@ STACK_SIZE = 1 << STACK_SHIFT
97 sra %r14,\shift 98 sra %r14,\shift
98 jnz 1f 99 jnz 1f
99 CHECK_STACK 1<<\shift,\savearea 100 CHECK_STACK 1<<\shift,\savearea
101 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
100 j 2f 102 j 2f
1011: l %r15,\stack # load target stack 1031: l %r15,\stack # load target stack
1022: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 1042: la %r11,STACK_FRAME_OVERHEAD(%r15)
103 la %r11,STACK_FRAME_OVERHEAD(%r15)
104 .endm 105 .endm
105 106
106 .macro ADD64 high,low,timer 107 .macro ADD64 high,low,timer
@@ -150,7 +151,7 @@ ENTRY(__switch_to)
150 l %r4,__THREAD_info(%r2) # get thread_info of prev 151 l %r4,__THREAD_info(%r2) # get thread_info of prev
151 l %r5,__THREAD_info(%r3) # get thread_info of next 152 l %r5,__THREAD_info(%r3) # get thread_info of next
152 lr %r15,%r5 153 lr %r15,%r5
153 ahi %r15,STACK_SIZE # end of kernel stack of next 154 ahi %r15,STACK_INIT # end of kernel stack of next
154 st %r3,__LC_CURRENT # store task struct of next 155 st %r3,__LC_CURRENT # store task struct of next
155 st %r5,__LC_THREAD_INFO # store thread info of next 156 st %r5,__LC_THREAD_INFO # store thread info of next
156 st %r15,__LC_KERNEL_STACK # store end of kernel stack 157 st %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -178,7 +179,6 @@ sysc_stm:
178 l %r13,__LC_SVC_NEW_PSW+4 179 l %r13,__LC_SVC_NEW_PSW+4
179sysc_per: 180sysc_per:
180 l %r15,__LC_KERNEL_STACK 181 l %r15,__LC_KERNEL_STACK
181 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
182 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 182 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
183sysc_vtime: 183sysc_vtime:
184 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 184 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
@@ -188,6 +188,7 @@ sysc_vtime:
188 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 188 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
189sysc_do_svc: 189sysc_do_svc:
190 oi __TI_flags+3(%r12),_TIF_SYSCALL 190 oi __TI_flags+3(%r12),_TIF_SYSCALL
191 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
191 lh %r8,__PT_INT_CODE+2(%r11) 192 lh %r8,__PT_INT_CODE+2(%r11)
192 sla %r8,2 # shift and test for svc0 193 sla %r8,2 # shift and test for svc0
193 jnz sysc_nr_ok 194 jnz sysc_nr_ok
@@ -198,7 +199,6 @@ sysc_do_svc:
198 lr %r8,%r1 199 lr %r8,%r1
199 sla %r8,2 200 sla %r8,2
200sysc_nr_ok: 201sysc_nr_ok:
201 l %r10,BASED(.Lsys_call_table) # 31 bit system call table
202 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 202 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
203 st %r2,__PT_ORIG_GPR2(%r11) 203 st %r2,__PT_ORIG_GPR2(%r11)
204 st %r7,STACK_FRAME_OVERHEAD(%r15) 204 st %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -359,11 +359,11 @@ ENTRY(pgm_check_handler)
359 tm __LC_PGM_ILC+3,0x80 # check for per exception 359 tm __LC_PGM_ILC+3,0x80 # check for per exception
360 jnz pgm_svcper # -> single stepped svc 360 jnz pgm_svcper # -> single stepped svc
3610: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 3610: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
362 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
362 j 2f 363 j 2f
3631: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 3641: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
364 l %r15,__LC_KERNEL_STACK 365 l %r15,__LC_KERNEL_STACK
3652: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 3662: la %r11,STACK_FRAME_OVERHEAD(%r15)
366 la %r11,STACK_FRAME_OVERHEAD(%r15)
367 stm %r0,%r7,__PT_R0(%r11) 367 stm %r0,%r7,__PT_R0(%r11)
368 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 368 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
369 stm %r8,%r9,__PT_PSW(%r11) 369 stm %r8,%r9,__PT_PSW(%r11)
@@ -485,7 +485,6 @@ io_work:
485# 485#
486io_work_user: 486io_work_user:
487 l %r1,__LC_KERNEL_STACK 487 l %r1,__LC_KERNEL_STACK
488 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
489 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 488 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
490 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 489 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
491 la %r11,STACK_FRAME_OVERHEAD(%r1) 490 la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -636,7 +635,8 @@ ENTRY(mcck_int_handler)
636 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 635 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
637mcck_skip: 636mcck_skip:
638 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 637 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
639 mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA 638 stm %r0,%r7,__PT_R0(%r11)
639 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
640 stm %r8,%r9,__PT_PSW(%r11) 640 stm %r8,%r9,__PT_PSW(%r11)
641 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 641 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
642 l %r1,BASED(.Ldo_machine_check) 642 l %r1,BASED(.Ldo_machine_check)
@@ -645,7 +645,6 @@ mcck_skip:
645 tm __PT_PSW+1(%r11),0x01 # returning to user ? 645 tm __PT_PSW+1(%r11),0x01 # returning to user ?
646 jno mcck_return 646 jno mcck_return
647 l %r1,__LC_KERNEL_STACK # switch to kernel stack 647 l %r1,__LC_KERNEL_STACK # switch to kernel stack
648 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
649 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 648 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
650 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 649 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
651 la %r11,STACK_FRAME_OVERHEAD(%r15) 650 la %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -673,6 +672,7 @@ mcck_panic:
673 sra %r14,PAGE_SHIFT 672 sra %r14,PAGE_SHIFT
674 jz 0f 673 jz 0f
675 l %r15,__LC_PANIC_STACK 674 l %r15,__LC_PANIC_STACK
675 j mcck_skip
6760: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 6760: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
677 j mcck_skip 677 j mcck_skip
678 678
@@ -713,12 +713,10 @@ ENTRY(restart_int_handler)
713 */ 713 */
714stack_overflow: 714stack_overflow:
715 l %r15,__LC_PANIC_STACK # change to panic stack 715 l %r15,__LC_PANIC_STACK # change to panic stack
716 ahi %r15,-__PT_SIZE # create pt_regs 716 la %r11,STACK_FRAME_OVERHEAD(%r15)
717 stm %r0,%r7,__PT_R0(%r15) 717 stm %r0,%r7,__PT_R0(%r11)
718 stm %r8,%r9,__PT_PSW(%r15) 718 stm %r8,%r9,__PT_PSW(%r11)
719 mvc __PT_R8(32,%r11),0(%r14) 719 mvc __PT_R8(32,%r11),0(%r14)
720 lr %r15,%r11
721 ahi %r15,-STACK_FRAME_OVERHEAD
722 l %r1,BASED(1f) 720 l %r1,BASED(1f)
723 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 721 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
724 lr %r2,%r11 # pass pointer to pt_regs 722 lr %r2,%r11 # pass pointer to pt_regs
@@ -798,15 +796,14 @@ cleanup_system_call:
798 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 796 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
799 # set up saved register 11 797 # set up saved register 11
800 l %r15,__LC_KERNEL_STACK 798 l %r15,__LC_KERNEL_STACK
801 ahi %r15,-__PT_SIZE 799 la %r9,STACK_FRAME_OVERHEAD(%r15)
802 st %r15,12(%r11) # r11 pt_regs pointer 800 st %r9,12(%r11) # r11 pt_regs pointer
803 # fill pt_regs 801 # fill pt_regs
804 mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC 802 mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
805 stm %r0,%r7,__PT_R0(%r15) 803 stm %r0,%r7,__PT_R0(%r9)
806 mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW 804 mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
807 mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC 805 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
808 # setup saved register 15 806 # setup saved register 15
809 ahi %r15,-STACK_FRAME_OVERHEAD
810 st %r15,28(%r11) # r15 stack pointer 807 st %r15,28(%r11) # r15 stack pointer
811 # set new psw address and exit 808 # set new psw address and exit
812 l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 809 l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
@@ -909,7 +906,6 @@ cleanup_idle_wait:
909.Ltrace_enter: .long do_syscall_trace_enter 906.Ltrace_enter: .long do_syscall_trace_enter
910.Ltrace_exit: .long do_syscall_trace_exit 907.Ltrace_exit: .long do_syscall_trace_exit
911.Lschedule_tail: .long schedule_tail 908.Lschedule_tail: .long schedule_tail
912.Lsys_call_table: .long sys_call_table
913.Lsysc_per: .long sysc_per + 0x80000000 909.Lsysc_per: .long sysc_per + 0x80000000
914#ifdef CONFIG_TRACE_IRQFLAGS 910#ifdef CONFIG_TRACE_IRQFLAGS
915.Lhardirqs_on: .long trace_hardirqs_on_caller 911.Lhardirqs_on: .long trace_hardirqs_on_caller
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index c3a736a3ed44..aa0ab02e9595 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -7,6 +7,7 @@
7#include <asm/cputime.h> 7#include <asm/cputime.h>
8 8
9extern void *restart_stack; 9extern void *restart_stack;
10extern unsigned long suspend_zero_pages;
10 11
11void system_call(void); 12void system_call(void);
12void pgm_check_handler(void); 13void pgm_check_handler(void);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 9c837c101297..4c17eece707e 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -39,6 +39,7 @@ __PT_R15 = __PT_GPRS + 120
39 39
40STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 40STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
41STACK_SIZE = 1 << STACK_SHIFT 41STACK_SIZE = 1 << STACK_SHIFT
42STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
42 43
43_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 44_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
44 _TIF_MCCK_PENDING | _TIF_PER_TRAP ) 45 _TIF_MCCK_PENDING | _TIF_PER_TRAP )
@@ -124,10 +125,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
124 srag %r14,%r14,\shift 125 srag %r14,%r14,\shift
125 jnz 1f 126 jnz 1f
126 CHECK_STACK 1<<\shift,\savearea 127 CHECK_STACK 1<<\shift,\savearea
128 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
127 j 2f 129 j 2f
1281: lg %r15,\stack # load target stack 1301: lg %r15,\stack # load target stack
1292: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 1312: la %r11,STACK_FRAME_OVERHEAD(%r15)
130 la %r11,STACK_FRAME_OVERHEAD(%r15)
131 .endm 132 .endm
132 133
133 .macro UPDATE_VTIME scratch,enter_timer 134 .macro UPDATE_VTIME scratch,enter_timer
@@ -177,7 +178,7 @@ ENTRY(__switch_to)
177 lg %r4,__THREAD_info(%r2) # get thread_info of prev 178 lg %r4,__THREAD_info(%r2) # get thread_info of prev
178 lg %r5,__THREAD_info(%r3) # get thread_info of next 179 lg %r5,__THREAD_info(%r3) # get thread_info of next
179 lgr %r15,%r5 180 lgr %r15,%r5
180 aghi %r15,STACK_SIZE # end of kernel stack of next 181 aghi %r15,STACK_INIT # end of kernel stack of next
181 stg %r3,__LC_CURRENT # store task struct of next 182 stg %r3,__LC_CURRENT # store task struct of next
182 stg %r5,__LC_THREAD_INFO # store thread info of next 183 stg %r5,__LC_THREAD_INFO # store thread info of next
183 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 184 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -203,10 +204,8 @@ sysc_stmg:
203 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 204 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
204 lg %r10,__LC_LAST_BREAK 205 lg %r10,__LC_LAST_BREAK
205 lg %r12,__LC_THREAD_INFO 206 lg %r12,__LC_THREAD_INFO
206 larl %r13,system_call
207sysc_per: 207sysc_per:
208 lg %r15,__LC_KERNEL_STACK 208 lg %r15,__LC_KERNEL_STACK
209 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
210 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 209 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
211sysc_vtime: 210sysc_vtime:
212 UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER 211 UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
@@ -217,6 +216,7 @@ sysc_vtime:
217 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 216 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
218sysc_do_svc: 217sysc_do_svc:
219 oi __TI_flags+7(%r12),_TIF_SYSCALL 218 oi __TI_flags+7(%r12),_TIF_SYSCALL
219 lg %r10,__TI_sysc_table(%r12) # address of system call table
220 llgh %r8,__PT_INT_CODE+2(%r11) 220 llgh %r8,__PT_INT_CODE+2(%r11)
221 slag %r8,%r8,2 # shift and test for svc 0 221 slag %r8,%r8,2 # shift and test for svc 0
222 jnz sysc_nr_ok 222 jnz sysc_nr_ok
@@ -227,13 +227,6 @@ sysc_do_svc:
227 sth %r1,__PT_INT_CODE+2(%r11) 227 sth %r1,__PT_INT_CODE+2(%r11)
228 slag %r8,%r1,2 228 slag %r8,%r1,2
229sysc_nr_ok: 229sysc_nr_ok:
230 larl %r10,sys_call_table # 64 bit system call table
231#ifdef CONFIG_COMPAT
232 tm __TI_flags+5(%r12),(_TIF_31BIT>>16)
233 jno sysc_noemu
234 larl %r10,sys_call_table_emu # 31 bit system call table
235sysc_noemu:
236#endif
237 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 230 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
238 stg %r2,__PT_ORIG_GPR2(%r11) 231 stg %r2,__PT_ORIG_GPR2(%r11)
239 stg %r7,STACK_FRAME_OVERHEAD(%r15) 232 stg %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -389,6 +382,7 @@ ENTRY(pgm_check_handler)
389 tm __LC_PGM_ILC+3,0x80 # check for per exception 382 tm __LC_PGM_ILC+3,0x80 # check for per exception
390 jnz pgm_svcper # -> single stepped svc 383 jnz pgm_svcper # -> single stepped svc
3910: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 3840: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
385 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
392 j 2f 386 j 2f
3931: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER 3871: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
394 LAST_BREAK %r14 388 LAST_BREAK %r14
@@ -398,8 +392,7 @@ ENTRY(pgm_check_handler)
398 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 392 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
399 jz 2f 393 jz 2f
400 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 394 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
4012: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 3952: la %r11,STACK_FRAME_OVERHEAD(%r15)
402 la %r11,STACK_FRAME_OVERHEAD(%r15)
403 stmg %r0,%r7,__PT_R0(%r11) 396 stmg %r0,%r7,__PT_R0(%r11)
404 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 397 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
405 stmg %r8,%r9,__PT_PSW(%r11) 398 stmg %r8,%r9,__PT_PSW(%r11)
@@ -526,7 +519,6 @@ io_work:
526# 519#
527io_work_user: 520io_work_user:
528 lg %r1,__LC_KERNEL_STACK 521 lg %r1,__LC_KERNEL_STACK
529 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
530 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 522 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
531 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 523 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
532 la %r11,STACK_FRAME_OVERHEAD(%r1) 524 la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -678,8 +670,9 @@ ENTRY(mcck_int_handler)
678 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER 670 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
679 LAST_BREAK %r14 671 LAST_BREAK %r14
680mcck_skip: 672mcck_skip:
681 lghi %r14,__LC_GPREGS_SAVE_AREA 673 lghi %r14,__LC_GPREGS_SAVE_AREA+64
682 mvc __PT_R0(128,%r11),0(%r14) 674 stmg %r0,%r7,__PT_R0(%r11)
675 mvc __PT_R8(64,%r11),0(%r14)
683 stmg %r8,%r9,__PT_PSW(%r11) 676 stmg %r8,%r9,__PT_PSW(%r11)
684 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 677 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
685 lgr %r2,%r11 # pass pointer to pt_regs 678 lgr %r2,%r11 # pass pointer to pt_regs
@@ -687,7 +680,6 @@ mcck_skip:
687 tm __PT_PSW+1(%r11),0x01 # returning to user ? 680 tm __PT_PSW+1(%r11),0x01 # returning to user ?
688 jno mcck_return 681 jno mcck_return
689 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 682 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
690 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
691 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 683 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
692 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 684 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
693 la %r11,STACK_FRAME_OVERHEAD(%r1) 685 la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -754,14 +746,12 @@ ENTRY(restart_int_handler)
754 * Setup a pt_regs so that show_trace can provide a good call trace. 746 * Setup a pt_regs so that show_trace can provide a good call trace.
755 */ 747 */
756stack_overflow: 748stack_overflow:
757 lg %r11,__LC_PANIC_STACK # change to panic stack 749 lg %r15,__LC_PANIC_STACK # change to panic stack
758 aghi %r11,-__PT_SIZE # create pt_regs 750 la %r11,STACK_FRAME_OVERHEAD(%r15)
759 stmg %r0,%r7,__PT_R0(%r11) 751 stmg %r0,%r7,__PT_R0(%r11)
760 stmg %r8,%r9,__PT_PSW(%r11) 752 stmg %r8,%r9,__PT_PSW(%r11)
761 mvc __PT_R8(64,%r11),0(%r14) 753 mvc __PT_R8(64,%r11),0(%r14)
762 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 754 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
763 lgr %r15,%r11
764 aghi %r15,-STACK_FRAME_OVERHEAD
765 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 755 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
766 lgr %r2,%r11 # pass pointer to pt_regs 756 lgr %r2,%r11 # pass pointer to pt_regs
767 jg kernel_stack_overflow 757 jg kernel_stack_overflow
@@ -845,15 +835,14 @@ cleanup_system_call:
845 mvc __TI_last_break(8,%r12),16(%r11) 835 mvc __TI_last_break(8,%r12),16(%r11)
8460: # set up saved register r11 8360: # set up saved register r11
847 lg %r15,__LC_KERNEL_STACK 837 lg %r15,__LC_KERNEL_STACK
848 aghi %r15,-__PT_SIZE 838 la %r9,STACK_FRAME_OVERHEAD(%r15)
849 stg %r15,24(%r11) # r11 pt_regs pointer 839 stg %r9,24(%r11) # r11 pt_regs pointer
850 # fill pt_regs 840 # fill pt_regs
851 mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC 841 mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
852 stmg %r0,%r7,__PT_R0(%r15) 842 stmg %r0,%r7,__PT_R0(%r9)
853 mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW 843 mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
854 mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC 844 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
855 # setup saved register r15 845 # setup saved register r15
856 aghi %r15,-STACK_FRAME_OVERHEAD
857 stg %r15,56(%r11) # r15 stack pointer 846 stg %r15,56(%r11) # r15 stack pointer
858 # set new psw address and exit 847 # set new psw address and exit
859 larl %r9,sysc_do_svc 848 larl %r9,sysc_do_svc
@@ -1010,6 +999,7 @@ sys_call_table:
1010#ifdef CONFIG_COMPAT 999#ifdef CONFIG_COMPAT
1011 1000
1012#define SYSCALL(esa,esame,emu) .long emu 1001#define SYSCALL(esa,esame,emu) .long emu
1002 .globl sys_call_table_emu
1013sys_call_table_emu: 1003sys_call_table_emu:
1014#include "syscalls.S" 1004#include "syscalls.S"
1015#undef SYSCALL 1005#undef SYSCALL
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1630f439cd2a..4f5ef62934a4 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -33,7 +33,7 @@ struct irq_class {
33}; 33};
34 34
35/* 35/*
36 * The list of "main" irq classes on s390. This is the list of interrrupts 36 * The list of "main" irq classes on s390. This is the list of interrupts
37 * that appear both in /proc/stat ("intr" line) and /proc/interrupts. 37 * that appear both in /proc/stat ("intr" line) and /proc/interrupts.
38 * Historically only external and I/O interrupts have been part of /proc/stat. 38 * Historically only external and I/O interrupts have been part of /proc/stat.
39 * We can't add the split external and I/O sub classes since the first field 39 * We can't add the split external and I/O sub classes since the first field
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b3de27700016..ac2178161ec3 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
13#include <linux/reboot.h> 13#include <linux/reboot.h>
14#include <linux/ftrace.h> 14#include <linux/ftrace.h>
15#include <linux/debug_locks.h> 15#include <linux/debug_locks.h>
16#include <linux/suspend.h>
16#include <asm/cio.h> 17#include <asm/cio.h>
17#include <asm/setup.h> 18#include <asm/setup.h>
18#include <asm/pgtable.h> 19#include <asm/pgtable.h>
@@ -67,6 +68,35 @@ void setup_regs(void)
67 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); 68 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
68} 69}
69 70
71/*
72 * PM notifier callback for kdump
73 */
74static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
75 void *ptr)
76{
77 switch (action) {
78 case PM_SUSPEND_PREPARE:
79 case PM_HIBERNATION_PREPARE:
80 if (crashk_res.start)
81 crash_map_reserved_pages();
82 break;
83 case PM_POST_SUSPEND:
84 case PM_POST_HIBERNATION:
85 if (crashk_res.start)
86 crash_unmap_reserved_pages();
87 break;
88 default:
89 return NOTIFY_DONE;
90 }
91 return NOTIFY_OK;
92}
93
94static int __init machine_kdump_pm_init(void)
95{
96 pm_notifier(machine_kdump_pm_cb, 0);
97 return 0;
98}
99arch_initcall(machine_kdump_pm_init);
70#endif 100#endif
71 101
72/* 102/*
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 536d64579d9a..2bc3eddae34a 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -61,18 +61,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
61 return sf->gprs[8]; 61 return sf->gprs[8];
62} 62}
63 63
64/* 64void arch_cpu_idle(void)
65 * The idle loop on a S390...
66 */
67static void default_idle(void)
68{ 65{
69 if (cpu_is_offline(smp_processor_id()))
70 cpu_die();
71 local_irq_disable();
72 if (need_resched()) {
73 local_irq_enable();
74 return;
75 }
76 local_mcck_disable(); 66 local_mcck_disable();
77 if (test_thread_flag(TIF_MCCK_PENDING)) { 67 if (test_thread_flag(TIF_MCCK_PENDING)) {
78 local_mcck_enable(); 68 local_mcck_enable();
@@ -83,19 +73,15 @@ static void default_idle(void)
83 vtime_stop_cpu(); 73 vtime_stop_cpu();
84} 74}
85 75
86void cpu_idle(void) 76void arch_cpu_idle_exit(void)
87{ 77{
88 for (;;) { 78 if (test_thread_flag(TIF_MCCK_PENDING))
89 tick_nohz_idle_enter(); 79 s390_handle_mcck();
90 rcu_idle_enter(); 80}
91 while (!need_resched() && !test_thread_flag(TIF_MCCK_PENDING)) 81
92 default_idle(); 82void arch_cpu_idle_dead(void)
93 rcu_idle_exit(); 83{
94 tick_nohz_idle_exit(); 84 cpu_die();
95 if (test_thread_flag(TIF_MCCK_PENDING))
96 s390_handle_mcck();
97 schedule_preempt_disabled();
98 }
99} 85}
100 86
101extern void __kprobes kernel_thread_starter(void); 87extern void __kprobes kernel_thread_starter(void);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5360de85ec7..0f419c5765c8 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -377,11 +377,14 @@ static void __init setup_lowcore(void)
377 PSW_MASK_DAT | PSW_MASK_MCHECK; 377 PSW_MASK_DAT | PSW_MASK_MCHECK;
378 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 378 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
379 lc->clock_comparator = -1ULL; 379 lc->clock_comparator = -1ULL;
380 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 380 lc->kernel_stack = ((unsigned long) &init_thread_union)
381 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
381 lc->async_stack = (unsigned long) 382 lc->async_stack = (unsigned long)
382 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; 383 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
384 + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
383 lc->panic_stack = (unsigned long) 385 lc->panic_stack = (unsigned long)
384 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; 386 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
387 + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
385 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 388 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
386 lc->thread_info = (unsigned long) &init_thread_union; 389 lc->thread_info = (unsigned long) &init_thread_union;
387 lc->machine_flags = S390_lowcore.machine_flags; 390 lc->machine_flags = S390_lowcore.machine_flags;
@@ -571,6 +574,8 @@ static void __init setup_memory_end(void)
571 574
572 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 575 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
573 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); 576 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
577 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
578 tmp = SECTION_ALIGN_UP(tmp);
574 tmp = VMALLOC_START - tmp * sizeof(struct page); 579 tmp = VMALLOC_START - tmp * sizeof(struct page);
575 tmp &= ~((vmax >> 11) - 1); /* align to page table level */ 580 tmp &= ~((vmax >> 11) - 1); /* align to page table level */
576 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); 581 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 549c9d173c0f..8074cb4b7cbf 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -181,8 +181,10 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
181 lc = pcpu->lowcore; 181 lc = pcpu->lowcore;
182 memcpy(lc, &S390_lowcore, 512); 182 memcpy(lc, &S390_lowcore, 512);
183 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 183 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
184 lc->async_stack = pcpu->async_stack + ASYNC_SIZE; 184 lc->async_stack = pcpu->async_stack + ASYNC_SIZE
185 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE; 185 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
186 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
187 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
186 lc->cpu_nr = cpu; 188 lc->cpu_nr = cpu;
187#ifndef CONFIG_64BIT 189#ifndef CONFIG_64BIT
188 if (MACHINE_HAS_IEEE) { 190 if (MACHINE_HAS_IEEE) {
@@ -253,7 +255,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
253 struct _lowcore *lc = pcpu->lowcore; 255 struct _lowcore *lc = pcpu->lowcore;
254 struct thread_info *ti = task_thread_info(tsk); 256 struct thread_info *ti = task_thread_info(tsk);
255 257
256 lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE; 258 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
259 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
257 lc->thread_info = (unsigned long) task_thread_info(tsk); 260 lc->thread_info = (unsigned long) task_thread_info(tsk);
258 lc->current_task = (unsigned long) tsk; 261 lc->current_task = (unsigned long) tsk;
259 lc->user_timer = ti->user_timer; 262 lc->user_timer = ti->user_timer;
@@ -711,8 +714,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
711 set_cpu_online(smp_processor_id(), true); 714 set_cpu_online(smp_processor_id(), true);
712 inc_irq_stat(CPU_RST); 715 inc_irq_stat(CPU_RST);
713 local_irq_enable(); 716 local_irq_enable();
714 /* cpu_idle will call schedule for us */ 717 cpu_startup_entry(CPUHP_ONLINE);
715 cpu_idle();
716} 718}
717 719
718/* Upping and downing of CPUs */ 720/* Upping and downing of CPUs */
@@ -810,8 +812,10 @@ void __init smp_prepare_boot_cpu(void)
810 pcpu->state = CPU_STATE_CONFIGURED; 812 pcpu->state = CPU_STATE_CONFIGURED;
811 pcpu->address = boot_cpu_address; 813 pcpu->address = boot_cpu_address;
812 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); 814 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
813 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; 815 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
814 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; 816 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
817 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
818 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
815 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 819 S390_lowcore.percpu_offset = __per_cpu_offset[0];
816 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 820 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
817 set_cpu_present(0, true); 821 set_cpu_present(0, true);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index aa1494d0e380..c479d2f9605b 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -41,6 +41,7 @@ struct page_key_data {
41static struct page_key_data *page_key_data; 41static struct page_key_data *page_key_data;
42static struct page_key_data *page_key_rp, *page_key_wp; 42static struct page_key_data *page_key_rp, *page_key_wp;
43static unsigned long page_key_rx, page_key_wx; 43static unsigned long page_key_rx, page_key_wx;
44unsigned long suspend_zero_pages;
44 45
45/* 46/*
46 * For each page in the hibernation image one additional byte is 47 * For each page in the hibernation image one additional byte is
@@ -149,6 +150,36 @@ int pfn_is_nosave(unsigned long pfn)
149 return 0; 150 return 0;
150} 151}
151 152
153/*
154 * PM notifier callback for suspend
155 */
156static int suspend_pm_cb(struct notifier_block *nb, unsigned long action,
157 void *ptr)
158{
159 switch (action) {
160 case PM_SUSPEND_PREPARE:
161 case PM_HIBERNATION_PREPARE:
162 suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER);
163 if (!suspend_zero_pages)
164 return NOTIFY_BAD;
165 break;
166 case PM_POST_SUSPEND:
167 case PM_POST_HIBERNATION:
168 free_pages(suspend_zero_pages, LC_ORDER);
169 break;
170 default:
171 return NOTIFY_DONE;
172 }
173 return NOTIFY_OK;
174}
175
176static int __init suspend_pm_init(void)
177{
178 pm_notifier(suspend_pm_cb, 0);
179 return 0;
180}
181arch_initcall(suspend_pm_init);
182
152void save_processor_state(void) 183void save_processor_state(void)
153{ 184{
154 /* swsusp_arch_suspend() actually saves all cpu register contents. 185 /* swsusp_arch_suspend() actually saves all cpu register contents.
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index d4ca4e0617b5..c487be4cfc81 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -36,8 +36,8 @@ ENTRY(swsusp_arch_suspend)
36 /* Store prefix register on stack */ 36 /* Store prefix register on stack */
37 stpx __SF_EMPTY(%r15) 37 stpx __SF_EMPTY(%r15)
38 38
39 /* Save prefix register contents for lowcore */ 39 /* Save prefix register contents for lowcore copy */
40 llgf %r4,__SF_EMPTY(%r15) 40 llgf %r10,__SF_EMPTY(%r15)
41 41
42 /* Get pointer to save area */ 42 /* Get pointer to save area */
43 lghi %r1,0x1000 43 lghi %r1,0x1000
@@ -91,7 +91,18 @@ ENTRY(swsusp_arch_suspend)
91 xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) 91 xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
92 spx __SF_EMPTY(%r15) 92 spx __SF_EMPTY(%r15)
93 93
94 /* Save absolute zero pages */
95 larl %r2,suspend_zero_pages
96 lg %r2,0(%r2)
97 lghi %r4,0
98 lghi %r3,2*PAGE_SIZE
99 lghi %r5,2*PAGE_SIZE
1001: mvcle %r2,%r4,0
101 jo 1b
102
103 /* Copy lowcore to absolute zero lowcore */
94 lghi %r2,0 104 lghi %r2,0
105 lgr %r4,%r10
95 lghi %r3,2*PAGE_SIZE 106 lghi %r3,2*PAGE_SIZE
96 lghi %r5,2*PAGE_SIZE 107 lghi %r5,2*PAGE_SIZE
971: mvcle %r2,%r4,0 1081: mvcle %r2,%r4,0
@@ -248,8 +259,20 @@ restore_registers:
248 /* Load old stack */ 259 /* Load old stack */
249 lg %r15,0x2f8(%r13) 260 lg %r15,0x2f8(%r13)
250 261
262 /* Save prefix register */
263 mvc __SF_EMPTY(4,%r15),0x318(%r13)
264
265 /* Restore absolute zero pages */
266 lghi %r2,0
267 larl %r4,suspend_zero_pages
268 lg %r4,0(%r4)
269 lghi %r3,2*PAGE_SIZE
270 lghi %r5,2*PAGE_SIZE
2711: mvcle %r2,%r4,0
272 jo 1b
273
251 /* Restore prefix register */ 274 /* Restore prefix register */
252 spx 0x318(%r13) 275 spx __SF_EMPTY(%r15)
253 276
254 /* Activate DAT */ 277 /* Activate DAT */
255 stosm __SF_EMPTY(%r15),0x04 278 stosm __SF_EMPTY(%r15),0x04
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 13dd63fba367..c5762324d9ee 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -12,49 +12,16 @@
12 * 'Traps.c' handles hardware traps and faults after we have saved some 12 * 'Traps.c' handles hardware traps and faults after we have saved some
13 * state in 'asm.s'. 13 * state in 'asm.s'.
14 */ 14 */
15#include <linux/sched.h> 15#include <linux/kprobes.h>
16#include <linux/kernel.h> 16#include <linux/kdebug.h>
17#include <linux/string.h> 17#include <linux/module.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h> 18#include <linux/ptrace.h>
20#include <linux/timer.h> 19#include <linux/sched.h>
21#include <linux/mm.h> 20#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/seq_file.h>
26#include <linux/delay.h>
27#include <linux/module.h>
28#include <linux/kdebug.h>
29#include <linux/kallsyms.h>
30#include <linux/reboot.h>
31#include <linux/kprobes.h>
32#include <linux/bug.h>
33#include <linux/utsname.h>
34#include <asm/uaccess.h>
35#include <asm/io.h>
36#include <linux/atomic.h>
37#include <asm/mathemu.h>
38#include <asm/cpcmd.h>
39#include <asm/lowcore.h>
40#include <asm/debug.h>
41#include <asm/ipl.h>
42#include "entry.h" 21#include "entry.h"
43 22
44int show_unhandled_signals = 1; 23int show_unhandled_signals = 1;
45 24
46#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
47
48#ifndef CONFIG_64BIT
49#define LONG "%08lx "
50#define FOURLONG "%08lx %08lx %08lx %08lx\n"
51static int kstack_depth_to_print = 12;
52#else /* CONFIG_64BIT */
53#define LONG "%016lx "
54#define FOURLONG "%016lx %016lx %016lx %016lx\n"
55static int kstack_depth_to_print = 20;
56#endif /* CONFIG_64BIT */
57
58static inline void __user *get_trap_ip(struct pt_regs *regs) 25static inline void __user *get_trap_ip(struct pt_regs *regs)
59{ 26{
60#ifdef CONFIG_64BIT 27#ifdef CONFIG_64BIT
@@ -72,215 +39,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
72#endif 39#endif
73} 40}
74 41
75/*
76 * For show_trace we have tree different stack to consider:
77 * - the panic stack which is used if the kernel stack has overflown
78 * - the asynchronous interrupt stack (cpu related)
79 * - the synchronous kernel stack (process related)
80 * The stack trace can start at any of the three stack and can potentially
81 * touch all of them. The order is: panic stack, async stack, sync stack.
82 */
83static unsigned long
84__show_trace(unsigned long sp, unsigned long low, unsigned long high)
85{
86 struct stack_frame *sf;
87 struct pt_regs *regs;
88
89 while (1) {
90 sp = sp & PSW_ADDR_INSN;
91 if (sp < low || sp > high - sizeof(*sf))
92 return sp;
93 sf = (struct stack_frame *) sp;
94 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
95 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
96 /* Follow the backchain. */
97 while (1) {
98 low = sp;
99 sp = sf->back_chain & PSW_ADDR_INSN;
100 if (!sp)
101 break;
102 if (sp <= low || sp > high - sizeof(*sf))
103 return sp;
104 sf = (struct stack_frame *) sp;
105 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
106 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
107 }
108 /* Zero backchain detected, check for interrupt frame. */
109 sp = (unsigned long) (sf + 1);
110 if (sp <= low || sp > high - sizeof(*regs))
111 return sp;
112 regs = (struct pt_regs *) sp;
113 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
114 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
115 low = sp;
116 sp = regs->gprs[15];
117 }
118}
119
120static void show_trace(struct task_struct *task, unsigned long *stack)
121{
122 register unsigned long __r15 asm ("15");
123 unsigned long sp;
124
125 sp = (unsigned long) stack;
126 if (!sp)
127 sp = task ? task->thread.ksp : __r15;
128 printk("Call Trace:\n");
129#ifdef CONFIG_CHECK_STACK
130 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
131 S390_lowcore.panic_stack);
132#endif
133 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
134 S390_lowcore.async_stack);
135 if (task)
136 __show_trace(sp, (unsigned long) task_stack_page(task),
137 (unsigned long) task_stack_page(task) + THREAD_SIZE);
138 else
139 __show_trace(sp, S390_lowcore.thread_info,
140 S390_lowcore.thread_info + THREAD_SIZE);
141 if (!task)
142 task = current;
143 debug_show_held_locks(task);
144}
145
146void show_stack(struct task_struct *task, unsigned long *sp)
147{
148 register unsigned long * __r15 asm ("15");
149 unsigned long *stack;
150 int i;
151
152 if (!sp)
153 stack = task ? (unsigned long *) task->thread.ksp : __r15;
154 else
155 stack = sp;
156
157 for (i = 0; i < kstack_depth_to_print; i++) {
158 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
159 break;
160 if ((i * sizeof(long) % 32) == 0)
161 printk("%s ", i == 0 ? "" : "\n");
162 printk(LONG, *stack++);
163 }
164 printk("\n");
165 show_trace(task, sp);
166}
167
168static void show_last_breaking_event(struct pt_regs *regs)
169{
170#ifdef CONFIG_64BIT
171 printk("Last Breaking-Event-Address:\n");
172 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
173 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
174#endif
175}
176
177/*
178 * The architecture-independent dump_stack generator
179 */
180void dump_stack(void)
181{
182 printk("CPU: %d %s %s %.*s\n",
183 task_thread_info(current)->cpu, print_tainted(),
184 init_utsname()->release,
185 (int)strcspn(init_utsname()->version, " "),
186 init_utsname()->version);
187 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
188 current->comm, current->pid, current,
189 (void *) current->thread.ksp);
190 show_stack(NULL, NULL);
191}
192EXPORT_SYMBOL(dump_stack);
193
194static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
195{
196 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
197}
198
199void show_registers(struct pt_regs *regs)
200{
201 char *mode;
202
203 mode = user_mode(regs) ? "User" : "Krnl";
204 printk("%s PSW : %p %p",
205 mode, (void *) regs->psw.mask,
206 (void *) regs->psw.addr);
207 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
208 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
209 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
210 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
211 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
212 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
213 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
214 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
215#ifdef CONFIG_64BIT
216 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
217#endif
218 printk("\n%s GPRS: " FOURLONG, mode,
219 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
220 printk(" " FOURLONG,
221 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
222 printk(" " FOURLONG,
223 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
224 printk(" " FOURLONG,
225 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
226
227 show_code(regs);
228}
229
230void show_regs(struct pt_regs *regs)
231{
232 printk("CPU: %d %s %s %.*s\n",
233 task_thread_info(current)->cpu, print_tainted(),
234 init_utsname()->release,
235 (int)strcspn(init_utsname()->version, " "),
236 init_utsname()->version);
237 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
238 current->comm, current->pid, current,
239 (void *) current->thread.ksp);
240 show_registers(regs);
241 /* Show stack backtrace if pt_regs is from kernel mode */
242 if (!user_mode(regs))
243 show_trace(NULL, (unsigned long *) regs->gprs[15]);
244 show_last_breaking_event(regs);
245}
246
247static DEFINE_SPINLOCK(die_lock);
248
249void die(struct pt_regs *regs, const char *str)
250{
251 static int die_counter;
252
253 oops_enter();
254 lgr_info_log();
255 debug_stop_all();
256 console_verbose();
257 spin_lock_irq(&die_lock);
258 bust_spinlocks(1);
259 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
260#ifdef CONFIG_PREEMPT
261 printk("PREEMPT ");
262#endif
263#ifdef CONFIG_SMP
264 printk("SMP ");
265#endif
266#ifdef CONFIG_DEBUG_PAGEALLOC
267 printk("DEBUG_PAGEALLOC");
268#endif
269 printk("\n");
270 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
271 print_modules();
272 show_regs(regs);
273 bust_spinlocks(0);
274 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
275 spin_unlock_irq(&die_lock);
276 if (in_interrupt())
277 panic("Fatal exception in interrupt");
278 if (panic_on_oops)
279 panic("Fatal exception: panic_on_oops");
280 oops_exit();
281 do_exit(SIGSEGV);
282}
283
284static inline void report_user_fault(struct pt_regs *regs, int signr) 42static inline void report_user_fault(struct pt_regs *regs, int signr)
285{ 43{
286 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 44 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index a0042acbd989..3fb09359eda6 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -158,8 +158,6 @@ void __kprobes vtime_stop_cpu(void)
158 unsigned long psw_mask; 158 unsigned long psw_mask;
159 159
160 trace_hardirqs_on(); 160 trace_hardirqs_on();
161 /* Don't trace preempt off for idle. */
162 stop_critical_timings();
163 161
164 /* Wait for external, I/O or machine check interrupt. */ 162 /* Wait for external, I/O or machine check interrupt. */
165 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | 163 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
@@ -169,9 +167,6 @@ void __kprobes vtime_stop_cpu(void)
169 /* Call the assembler magic in entry.S */ 167 /* Call the assembler magic in entry.S */
170 psw_idle(idle, psw_mask); 168 psw_idle(idle, psw_mask);
171 169
172 /* Reenable preemption tracer. */
173 start_critical_timings();
174
175 /* Account time spent with enabled wait psw loaded as idle time. */ 170 /* Account time spent with enabled wait psw loaded as idle time. */
176 idle->sequence++; 171 idle->sequence++;
177 smp_wmb(); 172 smp_wmb();
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index 2b29e62351d3..c2f582bb1cb2 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -67,7 +67,7 @@ TRACE_EVENT(kvm_s390_sie_fault,
67#define sie_intercept_code \ 67#define sie_intercept_code \
68 {0x04, "Instruction"}, \ 68 {0x04, "Instruction"}, \
69 {0x08, "Program interruption"}, \ 69 {0x08, "Program interruption"}, \
70 {0x0C, "Instruction and program interuption"}, \ 70 {0x0C, "Instruction and program interruption"}, \
71 {0x10, "External request"}, \ 71 {0x10, "External request"}, \
72 {0x14, "External interruption"}, \ 72 {0x14, "External interruption"}, \
73 {0x18, "I/O request"}, \ 73 {0x18, "I/O request"}, \
@@ -117,7 +117,7 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
117 __entry->instruction, 117 __entry->instruction,
118 insn_to_mnemonic((unsigned char *) 118 insn_to_mnemonic((unsigned char *)
119 &__entry->instruction, 119 &__entry->instruction,
120 __entry->insn) ? 120 __entry->insn, sizeof(__entry->insn)) ?
121 "unknown" : __entry->insn) 121 "unknown" : __entry->insn)
122 ); 122 );
123 123
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index dff631d34b45..466fb3383960 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to,
77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address 77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
78 * contains the (negative) exception code. 78 * contains the (negative) exception code.
79 */ 79 */
80static __always_inline unsigned long follow_table(struct mm_struct *mm, 80#ifdef CONFIG_64BIT
81 unsigned long addr, int write) 81static unsigned long follow_table(struct mm_struct *mm,
82 unsigned long address, int write)
82{ 83{
83 pgd_t *pgd; 84 unsigned long *table = (unsigned long *)__pa(mm->pgd);
84 pud_t *pud; 85
85 pmd_t *pmd; 86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
86 pte_t *ptep; 87 case _ASCE_TYPE_REGION1:
88 table = table + ((address >> 53) & 0x7ff);
89 if (unlikely(*table & _REGION_ENTRY_INV))
90 return -0x39UL;
91 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
92 case _ASCE_TYPE_REGION2:
93 table = table + ((address >> 42) & 0x7ff);
94 if (unlikely(*table & _REGION_ENTRY_INV))
95 return -0x3aUL;
96 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
97 case _ASCE_TYPE_REGION3:
98 table = table + ((address >> 31) & 0x7ff);
99 if (unlikely(*table & _REGION_ENTRY_INV))
100 return -0x3bUL;
101 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
102 case _ASCE_TYPE_SEGMENT:
103 table = table + ((address >> 20) & 0x7ff);
104 if (unlikely(*table & _SEGMENT_ENTRY_INV))
105 return -0x10UL;
106 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
107 if (write && (*table & _SEGMENT_ENTRY_RO))
108 return -0x04UL;
109 return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
110 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
111 }
112 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
113 }
114 table = table + ((address >> 12) & 0xff);
115 if (unlikely(*table & _PAGE_INVALID))
116 return -0x11UL;
117 if (write && (*table & _PAGE_RO))
118 return -0x04UL;
119 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
120}
87 121
88 pgd = pgd_offset(mm, addr); 122#else /* CONFIG_64BIT */
89 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
90 return -0x3aUL;
91 123
92 pud = pud_offset(pgd, addr); 124static unsigned long follow_table(struct mm_struct *mm,
93 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 125 unsigned long address, int write)
94 return -0x3bUL; 126{
127 unsigned long *table = (unsigned long *)__pa(mm->pgd);
95 128
96 pmd = pmd_offset(pud, addr); 129 table = table + ((address >> 20) & 0x7ff);
97 if (pmd_none(*pmd)) 130 if (unlikely(*table & _SEGMENT_ENTRY_INV))
98 return -0x10UL; 131 return -0x10UL;
99 if (pmd_large(*pmd)) { 132 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
100 if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) 133 table = table + ((address >> 12) & 0xff);
101 return -0x04UL; 134 if (unlikely(*table & _PAGE_INVALID))
102 return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
103 }
104 if (unlikely(pmd_bad(*pmd)))
105 return -0x10UL;
106
107 ptep = pte_offset_map(pmd, addr);
108 if (!pte_present(*ptep))
109 return -0x11UL; 135 return -0x11UL;
110 if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) 136 if (write && (*table & _PAGE_RO))
111 return -0x04UL; 137 return -0x04UL;
112 138 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
113 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
114} 139}
115 140
141#endif /* CONFIG_64BIT */
142
116static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, 143static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
117 size_t n, int write_user) 144 size_t n, int write_user)
118{ 145{
@@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
197 224
198static size_t clear_user_pt(size_t n, void __user *to) 225static size_t clear_user_pt(size_t n, void __user *to)
199{ 226{
200 void *zpage = &empty_zero_page; 227 void *zpage = (void *) empty_zero_page;
201 long done, size, ret; 228 long done, size, ret;
202 229
203 done = 0; 230 done = 0;
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 479e94282910..9d84a1feefef 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -458,12 +458,10 @@ static int __init cmm_init(void)
458 if (rc) 458 if (rc)
459 goto out_pm; 459 goto out_pm;
460 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); 460 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
461 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; 461 if (!IS_ERR(cmm_thread_ptr))
462 if (rc) 462 return 0;
463 goto out_kthread;
464 return 0;
465 463
466out_kthread: 464 rc = PTR_ERR(cmm_thread_ptr);
467 unregister_pm_notifier(&cmm_power_notifier); 465 unregister_pm_notifier(&cmm_power_notifier);
468out_pm: 466out_pm:
469 unregister_oom_notifier(&cmm_oom_nb); 467 unregister_oom_notifier(&cmm_oom_nb);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 2fb9e63b8fc4..047c3e4c59a2 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
395 int fault; 395 int fault;
396 396
397 trans_exc_code = regs->int_parm_long; 397 trans_exc_code = regs->int_parm_long;
398 /* Protection exception is suppressing, decrement psw address. */ 398 /*
399 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); 399 * Protection exceptions are suppressing, decrement psw address.
400 * The exception to this rule are aborted transactions, for these
401 * the PSW already points to the correct location.
402 */
403 if (!(regs->int_code & 0x200))
404 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
400 /* 405 /*
401 * Check for low-address protection. This needs to be treated 406 * Check for low-address protection. This needs to be treated
402 * as a special case because the translation exception code 407 * as a special case because the translation exception code
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 532525ec88c1..121089d57802 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -39,7 +39,7 @@ int arch_prepare_hugepage(struct page *page)
39 if (!ptep) 39 if (!ptep)
40 return -ENOMEM; 40 return -ENOMEM;
41 41
42 pte = mk_pte(page, PAGE_RW); 42 pte_val(pte) = addr;
43 for (i = 0; i < PTRS_PER_PTE; i++) { 43 for (i = 0; i < PTRS_PER_PTE; i++) {
44 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); 44 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
45 pte_val(pte) += PAGE_SIZE; 45 pte_val(pte) += PAGE_SIZE;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 49ce6bb2c641..0b09b2342302 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -42,11 +42,10 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
42unsigned long empty_zero_page, zero_page_mask; 42unsigned long empty_zero_page, zero_page_mask;
43EXPORT_SYMBOL(empty_zero_page); 43EXPORT_SYMBOL(empty_zero_page);
44 44
45static unsigned long __init setup_zero_pages(void) 45static void __init setup_zero_pages(void)
46{ 46{
47 struct cpuid cpu_id; 47 struct cpuid cpu_id;
48 unsigned int order; 48 unsigned int order;
49 unsigned long size;
50 struct page *page; 49 struct page *page;
51 int i; 50 int i;
52 51
@@ -63,10 +62,18 @@ static unsigned long __init setup_zero_pages(void)
63 break; 62 break;
64 case 0x2097: /* z10 */ 63 case 0x2097: /* z10 */
65 case 0x2098: /* z10 */ 64 case 0x2098: /* z10 */
66 default: 65 case 0x2817: /* z196 */
66 case 0x2818: /* z196 */
67 order = 2; 67 order = 2;
68 break; 68 break;
69 case 0x2827: /* zEC12 */
70 default:
71 order = 5;
72 break;
69 } 73 }
74 /* Limit number of empty zero pages for small memory sizes */
75 if (order > 2 && totalram_pages <= 16384)
76 order = 2;
70 77
71 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 78 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
72 if (!empty_zero_page) 79 if (!empty_zero_page)
@@ -75,14 +82,11 @@ static unsigned long __init setup_zero_pages(void)
75 page = virt_to_page((void *) empty_zero_page); 82 page = virt_to_page((void *) empty_zero_page);
76 split_page(page, order); 83 split_page(page, order);
77 for (i = 1 << order; i > 0; i--) { 84 for (i = 1 << order; i > 0; i--) {
78 SetPageReserved(page); 85 mark_page_reserved(page);
79 page++; 86 page++;
80 } 87 }
81 88
82 size = PAGE_SIZE << order; 89 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
83 zero_page_mask = (size - 1) & PAGE_MASK;
84
85 return 1UL << order;
86} 90}
87 91
88/* 92/*
@@ -139,7 +143,7 @@ void __init mem_init(void)
139 143
140 /* this will put all low memory onto the freelists */ 144 /* this will put all low memory onto the freelists */
141 totalram_pages += free_all_bootmem(); 145 totalram_pages += free_all_bootmem();
142 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 146 setup_zero_pages(); /* Setup zeroed pages. */
143 147
144 reservedpages = 0; 148 reservedpages = 0;
145 149
@@ -158,34 +162,15 @@ void __init mem_init(void)
158 PFN_ALIGN((unsigned long)&_eshared) - 1); 162 PFN_ALIGN((unsigned long)&_eshared) - 1);
159} 163}
160 164
161void free_init_pages(char *what, unsigned long begin, unsigned long end)
162{
163 unsigned long addr = begin;
164
165 if (begin >= end)
166 return;
167 for (; addr < end; addr += PAGE_SIZE) {
168 ClearPageReserved(virt_to_page(addr));
169 init_page_count(virt_to_page(addr));
170 memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
171 PAGE_SIZE);
172 free_page(addr);
173 totalram_pages++;
174 }
175 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
176}
177
178void free_initmem(void) 165void free_initmem(void)
179{ 166{
180 free_init_pages("unused kernel memory", 167 free_initmem_default(0);
181 (unsigned long)&__init_begin,
182 (unsigned long)&__init_end);
183} 168}
184 169
185#ifdef CONFIG_BLK_DEV_INITRD 170#ifdef CONFIG_BLK_DEV_INITRD
186void __init free_initrd_mem(unsigned long start, unsigned long end) 171void __init free_initrd_mem(unsigned long start, unsigned long end)
187{ 172{
188 free_init_pages("initrd memory", start, end); 173 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
189} 174}
190#endif 175#endif
191 176
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d21040ed5e59..80adfbf75065 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -9,31 +9,25 @@
9#include <asm/pgtable.h> 9#include <asm/pgtable.h>
10#include <asm/page.h> 10#include <asm/page.h>
11 11
12static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
13{
14 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
15 : [addr] "+a" (addr) : [skey] "d" (skey));
16 return addr;
17}
18
12void storage_key_init_range(unsigned long start, unsigned long end) 19void storage_key_init_range(unsigned long start, unsigned long end)
13{ 20{
14 unsigned long boundary, function, size; 21 unsigned long boundary, size;
15 22
16 while (start < end) { 23 while (start < end) {
17 if (MACHINE_HAS_EDAT2) {
18 /* set storage keys for a 2GB frame */
19 function = 0x22000 | PAGE_DEFAULT_KEY;
20 size = 1UL << 31;
21 boundary = (start + size) & ~(size - 1);
22 if (boundary <= end) {
23 do {
24 start = pfmf(function, start);
25 } while (start < boundary);
26 continue;
27 }
28 }
29 if (MACHINE_HAS_EDAT1) { 24 if (MACHINE_HAS_EDAT1) {
30 /* set storage keys for a 1MB frame */ 25 /* set storage keys for a 1MB frame */
31 function = 0x21000 | PAGE_DEFAULT_KEY;
32 size = 1UL << 20; 26 size = 1UL << 20;
33 boundary = (start + size) & ~(size - 1); 27 boundary = (start + size) & ~(size - 1);
34 if (boundary <= end) { 28 if (boundary <= end) {
35 do { 29 do {
36 start = pfmf(function, start); 30 start = sske_frame(start, PAGE_DEFAULT_KEY);
37 } while (start < boundary); 31 } while (start < boundary);
38 continue; 32 continue;
39 } 33 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ae44d2a34313..bd954e96f51c 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -379,75 +379,183 @@ out_unmap:
379} 379}
380EXPORT_SYMBOL_GPL(gmap_map_segment); 380EXPORT_SYMBOL_GPL(gmap_map_segment);
381 381
382/* 382static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
383 * this function is assumed to be called with mmap_sem held
384 */
385unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
386{ 383{
387 unsigned long *table, vmaddr, segment; 384 unsigned long *table;
388 struct mm_struct *mm;
389 struct gmap_pgtable *mp;
390 struct gmap_rmap *rmap;
391 struct vm_area_struct *vma;
392 struct page *page;
393 pgd_t *pgd;
394 pud_t *pud;
395 pmd_t *pmd;
396 385
397 current->thread.gmap_addr = address;
398 mm = gmap->mm;
399 /* Walk the gmap address space page table */
400 table = gmap->table + ((address >> 53) & 0x7ff); 386 table = gmap->table + ((address >> 53) & 0x7ff);
401 if (unlikely(*table & _REGION_ENTRY_INV)) 387 if (unlikely(*table & _REGION_ENTRY_INV))
402 return -EFAULT; 388 return ERR_PTR(-EFAULT);
403 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 389 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
404 table = table + ((address >> 42) & 0x7ff); 390 table = table + ((address >> 42) & 0x7ff);
405 if (unlikely(*table & _REGION_ENTRY_INV)) 391 if (unlikely(*table & _REGION_ENTRY_INV))
406 return -EFAULT; 392 return ERR_PTR(-EFAULT);
407 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 393 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
408 table = table + ((address >> 31) & 0x7ff); 394 table = table + ((address >> 31) & 0x7ff);
409 if (unlikely(*table & _REGION_ENTRY_INV)) 395 if (unlikely(*table & _REGION_ENTRY_INV))
410 return -EFAULT; 396 return ERR_PTR(-EFAULT);
411 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 397 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
412 table = table + ((address >> 20) & 0x7ff); 398 table = table + ((address >> 20) & 0x7ff);
399 return table;
400}
401
402/**
403 * __gmap_translate - translate a guest address to a user space address
404 * @address: guest address
405 * @gmap: pointer to guest mapping meta data structure
406 *
407 * Returns user space address which corresponds to the guest address or
408 * -EFAULT if no such mapping exists.
409 * This function does not establish potentially missing page table entries.
410 * The mmap_sem of the mm that belongs to the address space must be held
411 * when this function gets called.
412 */
413unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
414{
415 unsigned long *segment_ptr, vmaddr, segment;
416 struct gmap_pgtable *mp;
417 struct page *page;
413 418
419 current->thread.gmap_addr = address;
420 segment_ptr = gmap_table_walk(address, gmap);
421 if (IS_ERR(segment_ptr))
422 return PTR_ERR(segment_ptr);
414 /* Convert the gmap address to an mm address. */ 423 /* Convert the gmap address to an mm address. */
415 segment = *table; 424 segment = *segment_ptr;
416 if (likely(!(segment & _SEGMENT_ENTRY_INV))) { 425 if (!(segment & _SEGMENT_ENTRY_INV)) {
417 page = pfn_to_page(segment >> PAGE_SHIFT); 426 page = pfn_to_page(segment >> PAGE_SHIFT);
418 mp = (struct gmap_pgtable *) page->index; 427 mp = (struct gmap_pgtable *) page->index;
419 return mp->vmaddr | (address & ~PMD_MASK); 428 return mp->vmaddr | (address & ~PMD_MASK);
420 } else if (segment & _SEGMENT_ENTRY_RO) { 429 } else if (segment & _SEGMENT_ENTRY_RO) {
421 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; 430 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
422 vma = find_vma(mm, vmaddr); 431 return vmaddr | (address & ~PMD_MASK);
423 if (!vma || vma->vm_start > vmaddr) 432 }
424 return -EFAULT; 433 return -EFAULT;
425 434}
426 /* Walk the parent mm page table */ 435EXPORT_SYMBOL_GPL(__gmap_translate);
427 pgd = pgd_offset(mm, vmaddr); 436
428 pud = pud_alloc(mm, pgd, vmaddr); 437/**
429 if (!pud) 438 * gmap_translate - translate a guest address to a user space address
430 return -ENOMEM; 439 * @address: guest address
431 pmd = pmd_alloc(mm, pud, vmaddr); 440 * @gmap: pointer to guest mapping meta data structure
432 if (!pmd) 441 *
433 return -ENOMEM; 442 * Returns user space address which corresponds to the guest address or
434 if (!pmd_present(*pmd) && 443 * -EFAULT if no such mapping exists.
435 __pte_alloc(mm, vma, pmd, vmaddr)) 444 * This function does not establish potentially missing page table entries.
436 return -ENOMEM; 445 */
437 /* pmd now points to a valid segment table entry. */ 446unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
438 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); 447{
439 if (!rmap) 448 unsigned long rc;
440 return -ENOMEM; 449
441 /* Link gmap segment table entry location to page table. */ 450 down_read(&gmap->mm->mmap_sem);
442 page = pmd_page(*pmd); 451 rc = __gmap_translate(address, gmap);
443 mp = (struct gmap_pgtable *) page->index; 452 up_read(&gmap->mm->mmap_sem);
444 rmap->entry = table; 453 return rc;
445 spin_lock(&mm->page_table_lock); 454}
455EXPORT_SYMBOL_GPL(gmap_translate);
456
457static int gmap_connect_pgtable(unsigned long segment,
458 unsigned long *segment_ptr,
459 struct gmap *gmap)
460{
461 unsigned long vmaddr;
462 struct vm_area_struct *vma;
463 struct gmap_pgtable *mp;
464 struct gmap_rmap *rmap;
465 struct mm_struct *mm;
466 struct page *page;
467 pgd_t *pgd;
468 pud_t *pud;
469 pmd_t *pmd;
470
471 mm = gmap->mm;
472 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
473 vma = find_vma(mm, vmaddr);
474 if (!vma || vma->vm_start > vmaddr)
475 return -EFAULT;
476 /* Walk the parent mm page table */
477 pgd = pgd_offset(mm, vmaddr);
478 pud = pud_alloc(mm, pgd, vmaddr);
479 if (!pud)
480 return -ENOMEM;
481 pmd = pmd_alloc(mm, pud, vmaddr);
482 if (!pmd)
483 return -ENOMEM;
484 if (!pmd_present(*pmd) &&
485 __pte_alloc(mm, vma, pmd, vmaddr))
486 return -ENOMEM;
487 /* pmd now points to a valid segment table entry. */
488 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
489 if (!rmap)
490 return -ENOMEM;
491 /* Link gmap segment table entry location to page table. */
492 page = pmd_page(*pmd);
493 mp = (struct gmap_pgtable *) page->index;
494 rmap->entry = segment_ptr;
495 spin_lock(&mm->page_table_lock);
496 if (*segment_ptr == segment) {
446 list_add(&rmap->list, &mp->mapper); 497 list_add(&rmap->list, &mp->mapper);
447 spin_unlock(&mm->page_table_lock);
448 /* Set gmap segment table entry to page table. */ 498 /* Set gmap segment table entry to page table. */
449 *table = pmd_val(*pmd) & PAGE_MASK; 499 *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
450 return vmaddr | (address & ~PMD_MASK); 500 rmap = NULL;
501 }
502 spin_unlock(&mm->page_table_lock);
503 kfree(rmap);
504 return 0;
505}
506
507static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
508{
509 struct gmap_rmap *rmap, *next;
510 struct gmap_pgtable *mp;
511 struct page *page;
512 int flush;
513
514 flush = 0;
515 spin_lock(&mm->page_table_lock);
516 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
517 mp = (struct gmap_pgtable *) page->index;
518 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
519 *rmap->entry =
520 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
521 list_del(&rmap->list);
522 kfree(rmap);
523 flush = 1;
524 }
525 spin_unlock(&mm->page_table_lock);
526 if (flush)
527 __tlb_flush_global();
528}
529
530/*
531 * this function is assumed to be called with mmap_sem held
532 */
533unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
534{
535 unsigned long *segment_ptr, segment;
536 struct gmap_pgtable *mp;
537 struct page *page;
538 int rc;
539
540 current->thread.gmap_addr = address;
541 segment_ptr = gmap_table_walk(address, gmap);
542 if (IS_ERR(segment_ptr))
543 return -EFAULT;
544 /* Convert the gmap address to an mm address. */
545 while (1) {
546 segment = *segment_ptr;
547 if (!(segment & _SEGMENT_ENTRY_INV)) {
548 /* Page table is present */
549 page = pfn_to_page(segment >> PAGE_SHIFT);
550 mp = (struct gmap_pgtable *) page->index;
551 return mp->vmaddr | (address & ~PMD_MASK);
552 }
553 if (!(segment & _SEGMENT_ENTRY_RO))
554 /* Nothing mapped in the gmap address space. */
555 break;
556 rc = gmap_connect_pgtable(segment, segment_ptr, gmap);
557 if (rc)
558 return rc;
451 } 559 }
452 return -EFAULT; 560 return -EFAULT;
453} 561}
@@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
511} 619}
512EXPORT_SYMBOL_GPL(gmap_discard); 620EXPORT_SYMBOL_GPL(gmap_discard);
513 621
514void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
515{
516 struct gmap_rmap *rmap, *next;
517 struct gmap_pgtable *mp;
518 struct page *page;
519 int flush;
520
521 flush = 0;
522 spin_lock(&mm->page_table_lock);
523 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
524 mp = (struct gmap_pgtable *) page->index;
525 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
526 *rmap->entry =
527 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
528 list_del(&rmap->list);
529 kfree(rmap);
530 flush = 1;
531 }
532 spin_unlock(&mm->page_table_lock);
533 if (flush)
534 __tlb_flush_global();
535}
536
537static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 622static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
538 unsigned long vmaddr) 623 unsigned long vmaddr)
539{ 624{
@@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table)
586{ 671{
587} 672}
588 673
589static inline void gmap_unmap_notifier(struct mm_struct *mm, 674static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
590 unsigned long *table) 675 unsigned long *table)
591{ 676{
592} 677}
593 678
@@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
653 unsigned int bit, mask; 738 unsigned int bit, mask;
654 739
655 if (mm_has_pgste(mm)) { 740 if (mm_has_pgste(mm)) {
656 gmap_unmap_notifier(mm, table); 741 gmap_disconnect_pgtable(mm, table);
657 return page_table_free_pgste(table); 742 return page_table_free_pgste(table);
658 } 743 }
659 /* Free 1K/2K page table fragment of a 4K page */ 744 /* Free 1K/2K page table fragment of a 4K page */
@@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
696 781
697 mm = tlb->mm; 782 mm = tlb->mm;
698 if (mm_has_pgste(mm)) { 783 if (mm_has_pgste(mm)) {
699 gmap_unmap_notifier(mm, table); 784 gmap_disconnect_pgtable(mm, table);
700 table = (unsigned long *) (__pa(table) | FRAG_MASK); 785 table = (unsigned long *) (__pa(table) | FRAG_MASK);
701 tlb_remove_table(tlb, table); 786 tlb_remove_table(tlb, table);
702 return; 787 return;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index ffab84db6907..35837054f734 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -191,19 +191,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
191/* 191/*
192 * Add a backed mem_map array to the virtual mem_map array. 192 * Add a backed mem_map array to the virtual mem_map array.
193 */ 193 */
194int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 194int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
195{ 195{
196 unsigned long address, start_addr, end_addr; 196 unsigned long address = start;
197 pgd_t *pg_dir; 197 pgd_t *pg_dir;
198 pud_t *pu_dir; 198 pud_t *pu_dir;
199 pmd_t *pm_dir; 199 pmd_t *pm_dir;
200 pte_t *pt_dir; 200 pte_t *pt_dir;
201 int ret = -ENOMEM; 201 int ret = -ENOMEM;
202 202
203 start_addr = (unsigned long) start; 203 for (address = start; address < end;) {
204 end_addr = (unsigned long) (start + nr);
205
206 for (address = start_addr; address < end_addr;) {
207 pg_dir = pgd_offset_k(address); 204 pg_dir = pgd_offset_k(address);
208 if (pgd_none(*pg_dir)) { 205 if (pgd_none(*pg_dir)) {
209 pu_dir = vmem_pud_alloc(); 206 pu_dir = vmem_pud_alloc();
@@ -262,14 +259,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
262 } 259 }
263 address += PAGE_SIZE; 260 address += PAGE_SIZE;
264 } 261 }
265 memset(start, 0, nr * sizeof(struct page)); 262 memset((void *)start, 0, end - start);
266 ret = 0; 263 ret = 0;
267out: 264out:
268 flush_tlb_kernel_range(start_addr, end_addr); 265 flush_tlb_kernel_range(start, end);
269 return ret; 266 return ret;
270} 267}
271 268
272void vmemmap_free(struct page *memmap, unsigned long nr_pages) 269void vmemmap_free(unsigned long start, unsigned long end)
273{ 270{
274} 271}
275 272
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 0972e91cced2..82f165f8078c 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -747,10 +747,9 @@ void bpf_jit_compile(struct sk_filter *fp)
747 747
748 if (!bpf_jit_enable) 748 if (!bpf_jit_enable)
749 return; 749 return;
750 addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL); 750 addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
751 if (addrs == NULL) 751 if (addrs == NULL)
752 return; 752 return;
753 memset(addrs, 0, fp->len * sizeof(*addrs));
754 memset(&jit, 0, sizeof(cjit)); 753 memset(&jit, 0, sizeof(cjit));
755 memset(&cjit, 0, sizeof(cjit)); 754 memset(&cjit, 0, sizeof(cjit));
756 755
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 584b93674ea4..ffeb17ce7f31 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -440,6 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
440 switch (id.machine) { 440 switch (id.machine) {
441 case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; 441 case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
442 case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; 442 case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
443 case 0x2827: ops->cpu_type = "s390/zEC12"; break;
443 default: return -ENODEV; 444 default: return -ENODEV;
444 } 445 }
445 } 446 }
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index f0f426a113ce..086a2e37935d 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -2,5 +2,5 @@
2# Makefile for the s390 PCI subsystem. 2# Makefile for the s390 PCI subsystem.
3# 3#
4 4
5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \ 5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \
6 pci_sysfs.o pci_event.o pci_debug.o 6 pci_event.o pci_debug.o pci_insn.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 27b4c17855b9..e6f15b5d8b7d 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -99,9 +99,6 @@ static int __read_mostly aisb_max;
99static struct kmem_cache *zdev_irq_cache; 99static struct kmem_cache *zdev_irq_cache;
100static struct kmem_cache *zdev_fmb_cache; 100static struct kmem_cache *zdev_fmb_cache;
101 101
102debug_info_t *pci_debug_msg_id;
103debug_info_t *pci_debug_err_id;
104
105static inline int irq_to_msi_nr(unsigned int irq) 102static inline int irq_to_msi_nr(unsigned int irq)
106{ 103{
107 return irq & ZPCI_MSI_MASK; 104 return irq & ZPCI_MSI_MASK;
@@ -179,7 +176,7 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
179 fib->aisb = (u64) bucket->aisb + aisb / 8; 176 fib->aisb = (u64) bucket->aisb + aisb / 8;
180 fib->aisbo = aisb & ZPCI_MSI_MASK; 177 fib->aisbo = aisb & ZPCI_MSI_MASK;
181 178
182 rc = mpcifc_instr(req, fib); 179 rc = s390pci_mod_fc(req, fib);
183 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); 180 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
184 181
185 free_page((unsigned long) fib); 182 free_page((unsigned long) fib);
@@ -209,7 +206,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args
209 fib->iota = args->iota; 206 fib->iota = args->iota;
210 fib->fmb_addr = args->fmb_addr; 207 fib->fmb_addr = args->fmb_addr;
211 208
212 rc = mpcifc_instr(req, fib); 209 rc = s390pci_mod_fc(req, fib);
213 free_page((unsigned long) fib); 210 free_page((unsigned long) fib);
214 return rc; 211 return rc;
215} 212}
@@ -249,10 +246,9 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
249 if (zdev->fmb) 246 if (zdev->fmb)
250 return -EINVAL; 247 return -EINVAL;
251 248
252 zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL); 249 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
253 if (!zdev->fmb) 250 if (!zdev->fmb)
254 return -ENOMEM; 251 return -ENOMEM;
255 memset(zdev->fmb, 0, sizeof(*zdev->fmb));
256 WARN_ON((u64) zdev->fmb & 0xf); 252 WARN_ON((u64) zdev->fmb & 0xf);
257 253
258 args.fmb_addr = virt_to_phys(zdev->fmb); 254 args.fmb_addr = virt_to_phys(zdev->fmb);
@@ -284,12 +280,12 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
284 u64 data; 280 u64 data;
285 int rc; 281 int rc;
286 282
287 rc = pcilg_instr(&data, req, offset); 283 rc = s390pci_load(&data, req, offset);
288 data = data << ((8 - len) * 8); 284 if (!rc) {
289 data = le64_to_cpu(data); 285 data = data << ((8 - len) * 8);
290 if (!rc) 286 data = le64_to_cpu(data);
291 *val = (u32) data; 287 *val = (u32) data;
292 else 288 } else
293 *val = 0xffffffff; 289 *val = 0xffffffff;
294 return rc; 290 return rc;
295} 291}
@@ -302,7 +298,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
302 298
303 data = cpu_to_le64(data); 299 data = cpu_to_le64(data);
304 data = data >> ((8 - len) * 8); 300 data = data >> ((8 - len) * 8);
305 rc = pcistg_instr(data, req, offset); 301 rc = s390pci_store(data, req, offset);
306 return rc; 302 return rc;
307} 303}
308 304
@@ -409,20 +405,28 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
409 int size, u32 *val) 405 int size, u32 *val)
410{ 406{
411 struct zpci_dev *zdev = get_zdev_by_bus(bus); 407 struct zpci_dev *zdev = get_zdev_by_bus(bus);
408 int ret;
412 409
413 if (!zdev || devfn != ZPCI_DEVFN) 410 if (!zdev || devfn != ZPCI_DEVFN)
414 return 0; 411 ret = -ENODEV;
415 return zpci_cfg_load(zdev, where, val, size); 412 else
413 ret = zpci_cfg_load(zdev, where, val, size);
414
415 return ret;
416} 416}
417 417
418static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, 418static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
419 int size, u32 val) 419 int size, u32 val)
420{ 420{
421 struct zpci_dev *zdev = get_zdev_by_bus(bus); 421 struct zpci_dev *zdev = get_zdev_by_bus(bus);
422 int ret;
422 423
423 if (!zdev || devfn != ZPCI_DEVFN) 424 if (!zdev || devfn != ZPCI_DEVFN)
424 return 0; 425 ret = -ENODEV;
425 return zpci_cfg_store(zdev, where, val, size); 426 else
427 ret = zpci_cfg_store(zdev, where, val, size);
428
429 return ret;
426} 430}
427 431
428static struct pci_ops pci_root_ops = { 432static struct pci_ops pci_root_ops = {
@@ -474,7 +478,7 @@ scan:
474 } 478 }
475 479
476 /* enable interrupts again */ 480 /* enable interrupts again */
477 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 481 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
478 482
479 /* check again to not lose initiative */ 483 /* check again to not lose initiative */
480 rmb(); 484 rmb();
@@ -596,19 +600,6 @@ static void zpci_map_resources(struct zpci_dev *zdev)
596 } 600 }
597}; 601};
598 602
599static void zpci_unmap_resources(struct pci_dev *pdev)
600{
601 resource_size_t len;
602 int i;
603
604 for (i = 0; i < PCI_BAR_COUNT; i++) {
605 len = pci_resource_len(pdev, i);
606 if (!len)
607 continue;
608 pci_iounmap(pdev, (void *) pdev->resource[i].start);
609 }
610};
611
612struct zpci_dev *zpci_alloc_device(void) 603struct zpci_dev *zpci_alloc_device(void)
613{ 604{
614 struct zpci_dev *zdev; 605 struct zpci_dev *zdev;
@@ -636,32 +627,6 @@ void zpci_free_device(struct zpci_dev *zdev)
636 kfree(zdev); 627 kfree(zdev);
637} 628}
638 629
639/* Called on removal of pci_dev, leaves zpci and bus device */
640static void zpci_remove_device(struct pci_dev *pdev)
641{
642 struct zpci_dev *zdev = get_zdev(pdev);
643
644 dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
645 zdev->state = ZPCI_FN_STATE_CONFIGURED;
646 zpci_dma_exit_device(zdev);
647 zpci_fmb_disable_device(zdev);
648 zpci_sysfs_remove_device(&pdev->dev);
649 zpci_unmap_resources(pdev);
650 list_del(&zdev->entry); /* can be called from init */
651 zdev->pdev = NULL;
652}
653
654static void zpci_scan_devices(void)
655{
656 struct zpci_dev *zdev;
657
658 mutex_lock(&zpci_list_lock);
659 list_for_each_entry(zdev, &zpci_list, entry)
660 if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
661 zpci_scan_device(zdev);
662 mutex_unlock(&zpci_list_lock);
663}
664
665/* 630/*
666 * Too late for any s390 specific setup, since interrupts must be set up 631 * Too late for any s390 specific setup, since interrupts must be set up
667 * already which requires DMA setup too and the pci scan will access the 632 * already which requires DMA setup too and the pci scan will access the
@@ -688,12 +653,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
688 return 0; 653 return 0;
689} 654}
690 655
691void pcibios_disable_device(struct pci_dev *pdev)
692{
693 zpci_remove_device(pdev);
694 pdev->sysdata = NULL;
695}
696
697int pcibios_add_platform_entries(struct pci_dev *pdev) 656int pcibios_add_platform_entries(struct pci_dev *pdev)
698{ 657{
699 return zpci_sysfs_add_device(&pdev->dev); 658 return zpci_sysfs_add_device(&pdev->dev);
@@ -789,7 +748,7 @@ static int __init zpci_irq_init(void)
789 spin_lock_init(&bucket->lock); 748 spin_lock_init(&bucket->lock);
790 /* set summary to 1 to be called every time for the ISC */ 749 /* set summary to 1 to be called every time for the ISC */
791 *zpci_irq_si = 1; 750 *zpci_irq_si = 1;
792 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); 751 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
793 return 0; 752 return 0;
794 753
795out_ai: 754out_ai:
@@ -872,7 +831,19 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
872 spin_unlock(&zpci_iomap_lock); 831 spin_unlock(&zpci_iomap_lock);
873} 832}
874 833
875static int zpci_create_device_bus(struct zpci_dev *zdev) 834int pcibios_add_device(struct pci_dev *pdev)
835{
836 struct zpci_dev *zdev = get_zdev(pdev);
837
838 zdev->pdev = pdev;
839 zpci_debug_init_device(zdev);
840 zpci_fmb_enable_device(zdev);
841 zpci_map_resources(zdev);
842
843 return 0;
844}
845
846static int zpci_scan_bus(struct zpci_dev *zdev)
876{ 847{
877 struct resource *res; 848 struct resource *res;
878 LIST_HEAD(resources); 849 LIST_HEAD(resources);
@@ -909,8 +880,8 @@ static int zpci_create_device_bus(struct zpci_dev *zdev)
909 pci_add_resource(&resources, res); 880 pci_add_resource(&resources, res);
910 } 881 }
911 882
912 zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops, 883 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
913 zdev, &resources); 884 zdev, &resources);
914 if (!zdev->bus) 885 if (!zdev->bus)
915 return -EIO; 886 return -EIO;
916 887
@@ -959,6 +930,13 @@ out:
959} 930}
960EXPORT_SYMBOL_GPL(zpci_enable_device); 931EXPORT_SYMBOL_GPL(zpci_enable_device);
961 932
933int zpci_disable_device(struct zpci_dev *zdev)
934{
935 zpci_dma_exit_device(zdev);
936 return clp_disable_fh(zdev);
937}
938EXPORT_SYMBOL_GPL(zpci_disable_device);
939
962int zpci_create_device(struct zpci_dev *zdev) 940int zpci_create_device(struct zpci_dev *zdev)
963{ 941{
964 int rc; 942 int rc;
@@ -967,9 +945,16 @@ int zpci_create_device(struct zpci_dev *zdev)
967 if (rc) 945 if (rc)
968 goto out; 946 goto out;
969 947
970 rc = zpci_create_device_bus(zdev); 948 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
949 rc = zpci_enable_device(zdev);
950 if (rc)
951 goto out_free;
952
953 zdev->state = ZPCI_FN_STATE_ONLINE;
954 }
955 rc = zpci_scan_bus(zdev);
971 if (rc) 956 if (rc)
972 goto out_bus; 957 goto out_disable;
973 958
974 mutex_lock(&zpci_list_lock); 959 mutex_lock(&zpci_list_lock);
975 list_add_tail(&zdev->entry, &zpci_list); 960 list_add_tail(&zdev->entry, &zpci_list);
@@ -977,21 +962,12 @@ int zpci_create_device(struct zpci_dev *zdev)
977 hotplug_ops->create_slot(zdev); 962 hotplug_ops->create_slot(zdev);
978 mutex_unlock(&zpci_list_lock); 963 mutex_unlock(&zpci_list_lock);
979 964
980 if (zdev->state == ZPCI_FN_STATE_STANDBY)
981 return 0;
982
983 rc = zpci_enable_device(zdev);
984 if (rc)
985 goto out_start;
986 return 0; 965 return 0;
987 966
988out_start: 967out_disable:
989 mutex_lock(&zpci_list_lock); 968 if (zdev->state == ZPCI_FN_STATE_ONLINE)
990 list_del(&zdev->entry); 969 zpci_disable_device(zdev);
991 if (hotplug_ops) 970out_free:
992 hotplug_ops->remove_slot(zdev);
993 mutex_unlock(&zpci_list_lock);
994out_bus:
995 zpci_free_domain(zdev); 971 zpci_free_domain(zdev);
996out: 972out:
997 return rc; 973 return rc;
@@ -1016,15 +992,9 @@ int zpci_scan_device(struct zpci_dev *zdev)
1016 goto out; 992 goto out;
1017 } 993 }
1018 994
1019 zpci_debug_init_device(zdev);
1020 zpci_fmb_enable_device(zdev);
1021 zpci_map_resources(zdev);
1022 pci_bus_add_devices(zdev->bus); 995 pci_bus_add_devices(zdev->bus);
1023 996
1024 /* now that pdev was added to the bus mark it as used */
1025 zdev->state = ZPCI_FN_STATE_ONLINE;
1026 return 0; 997 return 0;
1027
1028out: 998out:
1029 zpci_dma_exit_device(zdev); 999 zpci_dma_exit_device(zdev);
1030 clp_disable_fh(zdev); 1000 clp_disable_fh(zdev);
@@ -1087,13 +1057,13 @@ void zpci_deregister_hp_ops(void)
1087} 1057}
1088EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); 1058EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
1089 1059
1090unsigned int s390_pci_probe = 1; 1060unsigned int s390_pci_probe;
1091EXPORT_SYMBOL_GPL(s390_pci_probe); 1061EXPORT_SYMBOL_GPL(s390_pci_probe);
1092 1062
1093char * __init pcibios_setup(char *str) 1063char * __init pcibios_setup(char *str)
1094{ 1064{
1095 if (!strcmp(str, "off")) { 1065 if (!strcmp(str, "on")) {
1096 s390_pci_probe = 0; 1066 s390_pci_probe = 1;
1097 return NULL; 1067 return NULL;
1098 } 1068 }
1099 return str; 1069 return str;
@@ -1138,7 +1108,6 @@ static int __init pci_base_init(void)
1138 if (rc) 1108 if (rc)
1139 goto out_find; 1109 goto out_find;
1140 1110
1141 zpci_scan_devices();
1142 return 0; 1111 return 0;
1143 1112
1144out_find: 1113out_find:
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index f339fe2feb15..bd34359d1546 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -13,6 +13,7 @@
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <asm/pci_debug.h>
16#include <asm/pci_clp.h> 17#include <asm/pci_clp.h>
17 18
18/* 19/*
@@ -144,6 +145,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
144 struct zpci_dev *zdev; 145 struct zpci_dev *zdev;
145 int rc; 146 int rc;
146 147
148 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
147 zdev = zpci_alloc_device(); 149 zdev = zpci_alloc_device();
148 if (IS_ERR(zdev)) 150 if (IS_ERR(zdev))
149 return PTR_ERR(zdev); 151 return PTR_ERR(zdev);
@@ -204,8 +206,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
204 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) 206 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
205 *fh = rrb->response.fh; 207 *fh = rrb->response.fh;
206 else { 208 else {
207 pr_err("Set PCI FN failed with response: %x cc: %d\n", 209 zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
208 rrb->response.hdr.rsp, rc); 210 rrb->response.hdr.rsp);
209 rc = -EIO; 211 rc = -EIO;
210 } 212 }
211 clp_free_block(rrb); 213 clp_free_block(rrb);
@@ -221,6 +223,8 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
221 if (!rc) 223 if (!rc)
222 /* Success -> store enabled handle in zdev */ 224 /* Success -> store enabled handle in zdev */
223 zdev->fh = fh; 225 zdev->fh = fh;
226
227 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
224 return rc; 228 return rc;
225} 229}
226 230
@@ -237,9 +241,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
237 if (!rc) 241 if (!rc)
238 /* Success -> store disabled handle in zdev */ 242 /* Success -> store disabled handle in zdev */
239 zdev->fh = fh; 243 zdev->fh = fh;
240 else 244
241 dev_err(&zdev->pdev->dev, 245 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
242 "Failed to disable fn handle: 0x%x\n", fh);
243 return rc; 246 return rc;
244} 247}
245 248
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index a5d07bc2a547..771b82359af4 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -11,12 +11,17 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/export.h>
14#include <linux/pci.h> 15#include <linux/pci.h>
15#include <asm/debug.h> 16#include <asm/debug.h>
16 17
17#include <asm/pci_dma.h> 18#include <asm/pci_dma.h>
18 19
19static struct dentry *debugfs_root; 20static struct dentry *debugfs_root;
21debug_info_t *pci_debug_msg_id;
22EXPORT_SYMBOL_GPL(pci_debug_msg_id);
23debug_info_t *pci_debug_err_id;
24EXPORT_SYMBOL_GPL(pci_debug_err_id);
20 25
21static char *pci_perf_names[] = { 26static char *pci_perf_names[] = {
22 /* hardware counters */ 27 /* hardware counters */
@@ -168,7 +173,6 @@ int __init zpci_debug_init(void)
168 return -EINVAL; 173 return -EINVAL;
169 debug_register_view(pci_debug_msg_id, &debug_sprintf_view); 174 debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
170 debug_set_level(pci_debug_msg_id, 3); 175 debug_set_level(pci_debug_msg_id, 3);
171 zpci_dbg("Debug view initialized\n");
172 176
173 /* error log */ 177 /* error log */
174 pci_debug_err_id = debug_register("pci_error", 2, 1, 16); 178 pci_debug_err_id = debug_register("pci_error", 2, 1, 16);
@@ -176,7 +180,6 @@ int __init zpci_debug_init(void)
176 return -EINVAL; 180 return -EINVAL;
177 debug_register_view(pci_debug_err_id, &debug_hex_ascii_view); 181 debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
178 debug_set_level(pci_debug_err_id, 6); 182 debug_set_level(pci_debug_err_id, 6);
179 zpci_err("Debug view initialized\n");
180 183
181 debugfs_root = debugfs_create_dir("pci", NULL); 184 debugfs_root = debugfs_create_dir("pci", NULL);
182 return 0; 185 return 0;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index a547419907c3..f8e69d5bc0a9 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -169,8 +169,9 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
169 * needs to be redone! 169 * needs to be redone!
170 */ 170 */
171 goto no_refresh; 171 goto no_refresh;
172 rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr, 172
173 nr_pages * PAGE_SIZE); 173 rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
174 nr_pages * PAGE_SIZE);
174 175
175no_refresh: 176no_refresh:
176 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); 177 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -268,8 +269,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
268 int flags = ZPCI_PTE_VALID; 269 int flags = ZPCI_PTE_VALID;
269 dma_addr_t dma_addr; 270 dma_addr_t dma_addr;
270 271
271 WARN_ON_ONCE(offset > PAGE_SIZE);
272
273 /* This rounds up number of pages based on size and offset */ 272 /* This rounds up number of pages based on size and offset */
274 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); 273 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
275 iommu_page_index = dma_alloc_iommu(zdev, nr_pages); 274 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
@@ -291,7 +290,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
291 290
292 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { 291 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
293 atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); 292 atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
294 return dma_addr + offset; 293 return dma_addr + (offset & ~PAGE_MASK);
295 } 294 }
296 295
297out_free: 296out_free:
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
new file mode 100644
index 000000000000..22eeb9d7ffeb
--- /dev/null
+++ b/arch/s390/pci/pci_insn.c
@@ -0,0 +1,202 @@
1/*
2 * s390 specific pci instructions
3 *
4 * Copyright IBM Corp. 2013
5 */
6
7#include <linux/export.h>
8#include <linux/errno.h>
9#include <linux/delay.h>
10#include <asm/pci_insn.h>
11#include <asm/processor.h>
12
13#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
14
15/* Modify PCI Function Controls */
16static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
17{
18 u8 cc;
19
20 asm volatile (
21 " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
22 " ipm %[cc]\n"
23 " srl %[cc],28\n"
24 : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
25 : : "cc");
26 *status = req >> 24 & 0xff;
27 return cc;
28}
29
30int s390pci_mod_fc(u64 req, struct zpci_fib *fib)
31{
32 u8 cc, status;
33
34 do {
35 cc = __mpcifc(req, fib, &status);
36 if (cc == 2)
37 msleep(ZPCI_INSN_BUSY_DELAY);
38 } while (cc == 2);
39
40 if (cc)
41 printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
42 __func__, cc, status);
43 return (cc) ? -EIO : 0;
44}
45
46/* Refresh PCI Translations */
47static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
48{
49 register u64 __addr asm("2") = addr;
50 register u64 __range asm("3") = range;
51 u8 cc;
52
53 asm volatile (
54 " .insn rre,0xb9d30000,%[fn],%[addr]\n"
55 " ipm %[cc]\n"
56 " srl %[cc],28\n"
57 : [cc] "=d" (cc), [fn] "+d" (fn)
58 : [addr] "d" (__addr), "d" (__range)
59 : "cc");
60 *status = fn >> 24 & 0xff;
61 return cc;
62}
63
64int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
65{
66 u8 cc, status;
67
68 do {
69 cc = __rpcit(fn, addr, range, &status);
70 if (cc == 2)
71 udelay(ZPCI_INSN_BUSY_DELAY);
72 } while (cc == 2);
73
74 if (cc)
75 printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
76 __func__, cc, status, addr, range);
77 return (cc) ? -EIO : 0;
78}
79
80/* Set Interruption Controls */
81void set_irq_ctrl(u16 ctl, char *unused, u8 isc)
82{
83 asm volatile (
84 " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
85 : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
86}
87
88/* PCI Load */
89static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
90{
91 register u64 __req asm("2") = req;
92 register u64 __offset asm("3") = offset;
93 int cc = -ENXIO;
94 u64 __data;
95
96 asm volatile (
97 " .insn rre,0xb9d20000,%[data],%[req]\n"
98 "0: ipm %[cc]\n"
99 " srl %[cc],28\n"
100 "1:\n"
101 EX_TABLE(0b, 1b)
102 : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
103 : "d" (__offset)
104 : "cc");
105 *status = __req >> 24 & 0xff;
106 if (!cc)
107 *data = __data;
108
109 return cc;
110}
111
112int s390pci_load(u64 *data, u64 req, u64 offset)
113{
114 u8 status;
115 int cc;
116
117 do {
118 cc = __pcilg(data, req, offset, &status);
119 if (cc == 2)
120 udelay(ZPCI_INSN_BUSY_DELAY);
121 } while (cc == 2);
122
123 if (cc)
124 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
125 __func__, cc, status, req, offset);
126 return (cc > 0) ? -EIO : cc;
127}
128EXPORT_SYMBOL_GPL(s390pci_load);
129
130/* PCI Store */
131static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
132{
133 register u64 __req asm("2") = req;
134 register u64 __offset asm("3") = offset;
135 int cc = -ENXIO;
136
137 asm volatile (
138 " .insn rre,0xb9d00000,%[data],%[req]\n"
139 "0: ipm %[cc]\n"
140 " srl %[cc],28\n"
141 "1:\n"
142 EX_TABLE(0b, 1b)
143 : [cc] "+d" (cc), [req] "+d" (__req)
144 : "d" (__offset), [data] "d" (data)
145 : "cc");
146 *status = __req >> 24 & 0xff;
147 return cc;
148}
149
150int s390pci_store(u64 data, u64 req, u64 offset)
151{
152 u8 status;
153 int cc;
154
155 do {
156 cc = __pcistg(data, req, offset, &status);
157 if (cc == 2)
158 udelay(ZPCI_INSN_BUSY_DELAY);
159 } while (cc == 2);
160
161 if (cc)
162 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
163 __func__, cc, status, req, offset);
164 return (cc > 0) ? -EIO : cc;
165}
166EXPORT_SYMBOL_GPL(s390pci_store);
167
168/* PCI Store Block */
169static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
170{
171 int cc = -ENXIO;
172
173 asm volatile (
174 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
175 "0: ipm %[cc]\n"
176 " srl %[cc],28\n"
177 "1:\n"
178 EX_TABLE(0b, 1b)
179 : [cc] "+d" (cc), [req] "+d" (req)
180 : [offset] "d" (offset), [data] "Q" (*data)
181 : "cc");
182 *status = req >> 24 & 0xff;
183 return cc;
184}
185
186int s390pci_store_block(const u64 *data, u64 req, u64 offset)
187{
188 u8 status;
189 int cc;
190
191 do {
192 cc = __pcistb(data, req, offset, &status);
193 if (cc == 2)
194 udelay(ZPCI_INSN_BUSY_DELAY);
195 } while (cc == 2);
196
197 if (cc)
198 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
199 __func__, cc, status, req, offset);
200 return (cc > 0) ? -EIO : cc;
201}
202EXPORT_SYMBOL_GPL(s390pci_store_block);
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
index 0297931335e1..b097aed05a9b 100644
--- a/arch/s390/pci/pci_msi.c
+++ b/arch/s390/pci/pci_msi.c
@@ -18,8 +18,9 @@
18 18
19/* mapping of irq numbers to msi_desc */ 19/* mapping of irq numbers to msi_desc */
20static struct hlist_head *msi_hash; 20static struct hlist_head *msi_hash;
21static unsigned int msihash_shift = 6; 21static const unsigned int msi_hash_bits = 8;
22#define msi_hashfn(nr) hash_long(nr, msihash_shift) 22#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
23#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
23 24
24static DEFINE_SPINLOCK(msi_map_lock); 25static DEFINE_SPINLOCK(msi_map_lock);
25 26
@@ -74,6 +75,7 @@ int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
74 map->irq = nr; 75 map->irq = nr;
75 map->msi = msi; 76 map->msi = msi;
76 zdev->msi_map[nr & ZPCI_MSI_MASK] = map; 77 zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
78 INIT_HLIST_NODE(&map->msi_chain);
77 79
78 pr_debug("%s hashing irq: %u to bucket nr: %llu\n", 80 pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
79 __func__, nr, msi_hashfn(nr)); 81 __func__, nr, msi_hashfn(nr));
@@ -125,11 +127,11 @@ int __init zpci_msihash_init(void)
125{ 127{
126 unsigned int i; 128 unsigned int i;
127 129
128 msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL); 130 msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
129 if (!msi_hash) 131 if (!msi_hash)
130 return -ENOMEM; 132 return -ENOMEM;
131 133
132 for (i = 0; i < (1U << msihash_shift); i++) 134 for (i = 0; i < MSI_HASH_BUCKETS; i++)
133 INIT_HLIST_HEAD(&msi_hash[i]); 135 INIT_HLIST_HEAD(&msi_hash[i]);
134 return 0; 136 return 0;
135} 137}
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index e569aa1fd2ba..c8def8bc9020 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -12,7 +12,7 @@ config SCORE
12 select GENERIC_CPU_DEVICES 12 select GENERIC_CPU_DEVICES
13 select GENERIC_CLOCKEVENTS 13 select GENERIC_CLOCKEVENTS
14 select HAVE_MOD_ARCH_SPECIFIC 14 select HAVE_MOD_ARCH_SPECIFIC
15 select HAVE_VIRT_TO_BUS 15 select VIRT_TO_BUS
16 select MODULES_USE_ELF_REL 16 select MODULES_USE_ELF_REL
17 select CLONE_BACKWARDS 17 select CLONE_BACKWARDS
18 18
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index 79568466b578..f4c6d02421d3 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -41,24 +41,6 @@ void machine_halt(void) {}
41/* If or when software machine-power-off is implemented, add code here. */ 41/* If or when software machine-power-off is implemented, add code here. */
42void machine_power_off(void) {} 42void machine_power_off(void) {}
43 43
44/*
45 * The idle thread. There's no useful work to be
46 * done, so just try to conserve power and have a
47 * low exit latency (ie sit in a loop waiting for
48 * somebody to say that they'd like to reschedule)
49 */
50void __noreturn cpu_idle(void)
51{
52 /* endless idle loop with no priority at all */
53 while (1) {
54 rcu_idle_enter();
55 while (!need_resched())
56 barrier();
57 rcu_idle_exit();
58 schedule_preempt_disabled();
59 }
60}
61
62void ret_from_fork(void); 44void ret_from_fork(void);
63void ret_from_kernel_thread(void); 45void ret_from_kernel_thread(void);
64 46
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index cee6bce1e30c..1592aad7dbc4 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
43 43
44static struct kcore_list kcore_mem, kcore_vmalloc; 44static struct kcore_list kcore_mem, kcore_vmalloc;
45 45
46static unsigned long setup_zero_page(void) 46static void setup_zero_page(void)
47{ 47{
48 struct page *page; 48 struct page *page;
49 49
@@ -52,9 +52,7 @@ static unsigned long setup_zero_page(void)
52 panic("Oh boy, that early out of memory?"); 52 panic("Oh boy, that early out of memory?");
53 53
54 page = virt_to_page((void *) empty_zero_page); 54 page = virt_to_page((void *) empty_zero_page);
55 SetPageReserved(page); 55 mark_page_reserved(page);
56
57 return 1UL;
58} 56}
59 57
60#ifndef CONFIG_NEED_MULTIPLE_NODES 58#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -84,7 +82,7 @@ void __init mem_init(void)
84 82
85 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 83 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
86 totalram_pages += free_all_bootmem(); 84 totalram_pages += free_all_bootmem();
87 totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */ 85 setup_zero_page(); /* Setup zeroed pages. */
88 reservedpages = 0; 86 reservedpages = 0;
89 87
90 for (tmp = 0; tmp < max_low_pfn; tmp++) 88 for (tmp = 0; tmp < max_low_pfn; tmp++)
@@ -109,37 +107,16 @@ void __init mem_init(void)
109} 107}
110#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 108#endif /* !CONFIG_NEED_MULTIPLE_NODES */
111 109
112static void free_init_pages(const char *what, unsigned long begin, unsigned long end)
113{
114 unsigned long pfn;
115
116 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
117 struct page *page = pfn_to_page(pfn);
118 void *addr = phys_to_virt(PFN_PHYS(pfn));
119
120 ClearPageReserved(page);
121 init_page_count(page);
122 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
123 __free_page(page);
124 totalram_pages++;
125 }
126 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
127}
128
129#ifdef CONFIG_BLK_DEV_INITRD 110#ifdef CONFIG_BLK_DEV_INITRD
130void free_initrd_mem(unsigned long start, unsigned long end) 111void free_initrd_mem(unsigned long start, unsigned long end)
131{ 112{
132 free_init_pages("initrd memory", 113 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
133 virt_to_phys((void *) start),
134 virt_to_phys((void *) end));
135} 114}
136#endif 115#endif
137 116
138void __init_refok free_initmem(void) 117void __init_refok free_initmem(void)
139{ 118{
140 free_init_pages("unused kernel memory", 119 free_initmem_default(POISON_FREE_INITMEM);
141 __pa(&__init_begin),
142 __pa(&__init_end));
143} 120}
144 121
145unsigned long pgd_current; 122unsigned long pgd_current;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5e859633ce69..1ea597c6497a 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -33,6 +33,7 @@ config SUPERH
33 select GENERIC_ATOMIC64 33 select GENERIC_ATOMIC64
34 select GENERIC_IRQ_SHOW 34 select GENERIC_IRQ_SHOW
35 select GENERIC_SMP_IDLE_THREAD 35 select GENERIC_SMP_IDLE_THREAD
36 select GENERIC_IDLE_POLL_SETUP
36 select GENERIC_CLOCKEVENTS 37 select GENERIC_CLOCKEVENTS
37 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST 38 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
38 select GENERIC_STRNCPY_FROM_USER 39 select GENERIC_STRNCPY_FROM_USER
@@ -148,9 +149,6 @@ config ARCH_HAS_ILOG2_U32
148config ARCH_HAS_ILOG2_U64 149config ARCH_HAS_ILOG2_U64
149 def_bool n 150 def_bool n
150 151
151config ARCH_HAS_DEFAULT_IDLE
152 def_bool y
153
154config NO_IOPORT 152config NO_IOPORT
155 def_bool !PCI 153 def_bool !PCI
156 depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \ 154 depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index aaff7671101b..764530c85aa9 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -254,11 +254,13 @@ static int usbhs_get_id(struct platform_device *pdev)
254 return gpio_get_value(GPIO_PTB3); 254 return gpio_get_value(GPIO_PTB3);
255} 255}
256 256
257static void usbhs_phy_reset(struct platform_device *pdev) 257static int usbhs_phy_reset(struct platform_device *pdev)
258{ 258{
259 /* enable vbus if HOST */ 259 /* enable vbus if HOST */
260 if (!gpio_get_value(GPIO_PTB3)) 260 if (!gpio_get_value(GPIO_PTB3))
261 gpio_set_value(GPIO_PTB5, 1); 261 gpio_set_value(GPIO_PTB5, 1);
262
263 return 0;
262} 264}
263 265
264static struct renesas_usbhs_platform_info usbhs_info = { 266static struct renesas_usbhs_platform_info usbhs_info = {
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index c2c85f6cd738..a162a7f86b2e 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -35,7 +35,7 @@ static unsigned int nr_ports;
35 35
36static struct sh7786_pcie_hwops { 36static struct sh7786_pcie_hwops {
37 int (*core_init)(void); 37 int (*core_init)(void);
38 async_func_ptr *port_init_hw; 38 async_func_t port_init_hw;
39} *sh7786_pcie_hwops; 39} *sh7786_pcie_hwops;
40 40
41static struct resource sh7786_pci0_resources[] = { 41static struct resource sh7786_pci0_resources[] = {
diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h
index b3808c7d67b2..699255d6d1c6 100644
--- a/arch/sh/include/asm/hugetlb.h
+++ b/arch/sh/include/asm/hugetlb.h
@@ -3,6 +3,7 @@
3 3
4#include <asm/cacheflush.h> 4#include <asm/cacheflush.h>
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm-generic/hugetlb.h>
6 7
7 8
8static inline int is_hugepage_only_range(struct mm_struct *mm, 9static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 7d5ac4e48485..45a93669289d 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -207,8 +207,6 @@ static inline bool test_and_clear_restore_sigmask(void)
207 return true; 207 return true;
208} 208}
209 209
210#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
211
212#endif /* !__ASSEMBLY__ */ 210#endif /* !__ASSEMBLY__ */
213 211
214#endif /* __KERNEL__ */ 212#endif /* __KERNEL__ */
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 3d5a1b387cc0..2ea4483fd722 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -24,98 +24,24 @@
24 24
25static void (*sh_idle)(void); 25static void (*sh_idle)(void);
26 26
27static int hlt_counter; 27void default_idle(void)
28
29static int __init nohlt_setup(char *__unused)
30{
31 hlt_counter = 1;
32 return 1;
33}
34__setup("nohlt", nohlt_setup);
35
36static int __init hlt_setup(char *__unused)
37{
38 hlt_counter = 0;
39 return 1;
40}
41__setup("hlt", hlt_setup);
42
43static inline int hlt_works(void)
44{
45 return !hlt_counter;
46}
47
48/*
49 * On SMP it's slightly faster (but much more power-consuming!)
50 * to poll the ->work.need_resched flag instead of waiting for the
51 * cross-CPU IPI to arrive. Use this option with caution.
52 */
53static void poll_idle(void)
54{ 28{
29 set_bl_bit();
55 local_irq_enable(); 30 local_irq_enable();
56 while (!need_resched()) 31 /* Isn't this racy ? */
57 cpu_relax(); 32 cpu_sleep();
33 clear_bl_bit();
58} 34}
59 35
60void default_idle(void) 36void arch_cpu_idle_dead(void)
61{ 37{
62 if (hlt_works()) { 38 play_dead();
63 clear_thread_flag(TIF_POLLING_NRFLAG);
64 smp_mb__after_clear_bit();
65
66 set_bl_bit();
67 if (!need_resched()) {
68 local_irq_enable();
69 cpu_sleep();
70 } else
71 local_irq_enable();
72
73 set_thread_flag(TIF_POLLING_NRFLAG);
74 clear_bl_bit();
75 } else
76 poll_idle();
77} 39}
78 40
79/* 41void arch_cpu_idle(void)
80 * The idle thread. There's no useful work to be done, so just try to conserve
81 * power and have a low exit latency (ie sit in a loop waiting for somebody to
82 * say that they'd like to reschedule)
83 */
84void cpu_idle(void)
85{ 42{
86 unsigned int cpu = smp_processor_id(); 43 if (cpuidle_idle_call())
87 44 sh_idle();
88 set_thread_flag(TIF_POLLING_NRFLAG);
89
90 /* endless idle loop with no priority at all */
91 while (1) {
92 tick_nohz_idle_enter();
93 rcu_idle_enter();
94
95 while (!need_resched()) {
96 check_pgt_cache();
97 rmb();
98
99 if (cpu_is_offline(cpu))
100 play_dead();
101
102 local_irq_disable();
103 /* Don't trace irqs off for idle */
104 stop_critical_timings();
105 if (cpuidle_idle_call())
106 sh_idle();
107 /*
108 * Sanity check to ensure that sh_idle() returns
109 * with IRQs enabled
110 */
111 WARN_ON(irqs_disabled());
112 start_critical_timings();
113 }
114
115 rcu_idle_exit();
116 tick_nohz_idle_exit();
117 schedule_preempt_disabled();
118 }
119} 45}
120 46
121void __init select_idle_routine(void) 47void __init select_idle_routine(void)
@@ -123,13 +49,8 @@ void __init select_idle_routine(void)
123 /* 49 /*
124 * If a platform has set its own idle routine, leave it alone. 50 * If a platform has set its own idle routine, leave it alone.
125 */ 51 */
126 if (sh_idle) 52 if (!sh_idle)
127 return;
128
129 if (hlt_works())
130 sh_idle = default_idle; 53 sh_idle = default_idle;
131 else
132 sh_idle = poll_idle;
133} 54}
134 55
135void stop_this_cpu(void *unused) 56void stop_this_cpu(void *unused)
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index 47475cca068a..fe584e516964 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -104,6 +104,7 @@ void sh_bios_vbr_reload(void)
104 ); 104 );
105} 105}
106 106
107#ifdef CONFIG_EARLY_PRINTK
107/* 108/*
108 * Print a string through the BIOS 109 * Print a string through the BIOS
109 */ 110 */
@@ -144,8 +145,6 @@ static struct console bios_console = {
144 .index = -1, 145 .index = -1,
145}; 146};
146 147
147static struct console *early_console;
148
149static int __init setup_early_printk(char *buf) 148static int __init setup_early_printk(char *buf)
150{ 149{
151 int keep_early = 0; 150 int keep_early = 0;
@@ -170,3 +169,4 @@ static int __init setup_early_printk(char *buf)
170 return 0; 169 return 0;
171} 170}
172early_param("earlyprintk", setup_early_printk); 171early_param("earlyprintk", setup_early_printk);
172#endif
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 2062aa88af41..45696451f0ea 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -203,7 +203,7 @@ asmlinkage void __cpuinit start_secondary(void)
203 set_cpu_online(cpu, true); 203 set_cpu_online(cpu, true);
204 per_cpu(cpu_state, cpu) = CPU_ONLINE; 204 per_cpu(cpu_state, cpu) = CPU_ONLINE;
205 205
206 cpu_idle(); 206 cpu_startup_entry(CPUHP_ONLINE);
207} 207}
208 208
209extern struct { 209extern struct {
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 105794037143..20f9ead650d3 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -417,15 +417,13 @@ void __init mem_init(void)
417 417
418 for_each_online_node(nid) { 418 for_each_online_node(nid) {
419 pg_data_t *pgdat = NODE_DATA(nid); 419 pg_data_t *pgdat = NODE_DATA(nid);
420 unsigned long node_pages = 0;
421 void *node_high_memory; 420 void *node_high_memory;
422 421
423 num_physpages += pgdat->node_present_pages; 422 num_physpages += pgdat->node_present_pages;
424 423
425 if (pgdat->node_spanned_pages) 424 if (pgdat->node_spanned_pages)
426 node_pages = free_all_bootmem_node(pgdat); 425 totalram_pages += free_all_bootmem_node(pgdat);
427 426
428 totalram_pages += node_pages;
429 427
430 node_high_memory = (void *)__va((pgdat->node_start_pfn + 428 node_high_memory = (void *)__va((pgdat->node_start_pfn +
431 pgdat->node_spanned_pages) << 429 pgdat->node_spanned_pages) <<
@@ -501,31 +499,13 @@ void __init mem_init(void)
501 499
502void free_initmem(void) 500void free_initmem(void)
503{ 501{
504 unsigned long addr; 502 free_initmem_default(0);
505
506 addr = (unsigned long)(&__init_begin);
507 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
508 ClearPageReserved(virt_to_page(addr));
509 init_page_count(virt_to_page(addr));
510 free_page(addr);
511 totalram_pages++;
512 }
513 printk("Freeing unused kernel memory: %ldk freed\n",
514 ((unsigned long)&__init_end -
515 (unsigned long)&__init_begin) >> 10);
516} 503}
517 504
518#ifdef CONFIG_BLK_DEV_INITRD 505#ifdef CONFIG_BLK_DEV_INITRD
519void free_initrd_mem(unsigned long start, unsigned long end) 506void free_initrd_mem(unsigned long start, unsigned long end)
520{ 507{
521 unsigned long p; 508 free_reserved_area(start, end, 0, "initrd");
522 for (p = start; p < end; p += PAGE_SIZE) {
523 ClearPageReserved(virt_to_page(p));
524 init_page_count(virt_to_page(p));
525 free_page(p);
526 totalram_pages++;
527 }
528 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
529} 509}
530#endif 510#endif
531 511
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 289127d5241c..66dc562950ae 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -84,12 +84,6 @@ config ARCH_DEFCONFIG
84 default "arch/sparc/configs/sparc32_defconfig" if SPARC32 84 default "arch/sparc/configs/sparc32_defconfig" if SPARC32
85 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 85 default "arch/sparc/configs/sparc64_defconfig" if SPARC64
86 86
87# CONFIG_BITS can be used at source level to get 32/64 bits
88config BITS
89 int
90 default 32 if SPARC32
91 default 64 if SPARC64
92
93config IOMMU_HELPER 87config IOMMU_HELPER
94 bool 88 bool
95 default y if SPARC64 89 default y if SPARC64
@@ -197,7 +191,7 @@ config RWSEM_XCHGADD_ALGORITHM
197 191
198config GENERIC_HWEIGHT 192config GENERIC_HWEIGHT
199 bool 193 bool
200 default y if !ULTRA_HAS_POPULATION_COUNT 194 default y
201 195
202config GENERIC_CALIBRATE_DELAY 196config GENERIC_CALIBRATE_DELAY
203 bool 197 bool
@@ -413,6 +407,8 @@ config SERIAL_CONSOLE
413config SPARC_LEON 407config SPARC_LEON
414 bool "Sparc Leon processor family" 408 bool "Sparc Leon processor family"
415 depends on SPARC32 409 depends on SPARC32
410 select USB_EHCI_BIG_ENDIAN_MMIO
411 select USB_EHCI_BIG_ENDIAN_DESC
416 ---help--- 412 ---help---
417 If you say Y here if you are running on a SPARC-LEON processor. 413 If you say Y here if you are running on a SPARC-LEON processor.
418 The LEON processor is a synthesizable VHDL model of the 414 The LEON processor is a synthesizable VHDL model of the
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index e26d430ce2fd..ff18e3cfb6b1 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -2,11 +2,16 @@
2 2
3 3
4generic-y += clkdev.h 4generic-y += clkdev.h
5generic-y += cputime.h
5generic-y += div64.h 6generic-y += div64.h
7generic-y += emergency-restart.h
6generic-y += exec.h 8generic-y += exec.h
7generic-y += local64.h 9generic-y += local64.h
10generic-y += mutex.h
8generic-y += irq_regs.h 11generic-y += irq_regs.h
9generic-y += local.h 12generic-y += local.h
10generic-y += module.h 13generic-y += module.h
14generic-y += serial.h
11generic-y += trace_clock.h 15generic-y += trace_clock.h
16generic-y += types.h
12generic-y += word-at-a-time.h 17generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h
deleted file mode 100644
index 1a642b81e019..000000000000
--- a/arch/sparc/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __SPARC_CPUTIME_H
2#define __SPARC_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __SPARC_CPUTIME_H */
diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/arch/sparc/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index 7eb57d245044..e4cab465b81f 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
2#define _ASM_SPARC64_HUGETLB_H 2#define _ASM_SPARC64_HUGETLB_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
5 6
6 7
7void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/sparc/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 08fcce90316b..7619f2f792af 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
915 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); 915 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
916} 916}
917 917
918#include <asm/tlbflush.h>
918#include <asm-generic/pgtable.h> 919#include <asm-generic/pgtable.h>
919 920
920/* We provide our own get_unmapped_area to cope with VA holes and 921/* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h
deleted file mode 100644
index f90d61c28059..000000000000
--- a/arch/sparc/include/asm/serial.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __SPARC_SERIAL_H
2#define __SPARC_SERIAL_H
3
4#define BASE_BAUD ( 1843200 / 16 )
5
6#endif /* __SPARC_SERIAL_H */
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index b73da3c5f10a..3c8917f054de 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long); 36 unsigned long, unsigned long);
37 37
38void cpu_panic(void); 38void cpu_panic(void);
39extern void smp4m_irq_rotate(int cpu);
40 39
41/* 40/*
42 * General functions that each host system must provide. 41 * General functions that each host system must provide.
@@ -46,7 +45,6 @@ void sun4m_init_smp(void);
46void sun4d_init_smp(void); 45void sun4d_init_smp(void);
47 46
48void smp_callin(void); 47void smp_callin(void);
49void smp_boot_cpus(void);
50void smp_store_cpu_info(int); 48void smp_store_cpu_info(int);
51 49
52void smp_resched_interrupt(void); 50void smp_resched_interrupt(void);
@@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);
107 105
108#define raw_smp_processor_id() (current_thread_info()->cpu) 106#define raw_smp_processor_id() (current_thread_info()->cpu)
109 107
110#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
111#define prof_counter(__cpu) cpu_data(__cpu).counter
112
113void smp_setup_cpu_possible_map(void); 108void smp_setup_cpu_possible_map(void);
114 109
115#endif /* !(__ASSEMBLY__) */ 110#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index d06a26601753..6b67e50fb9b4 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -45,6 +45,7 @@
45#define SUN4V_CHIP_NIAGARA3 0x03 45#define SUN4V_CHIP_NIAGARA3 0x03
46#define SUN4V_CHIP_NIAGARA4 0x04 46#define SUN4V_CHIP_NIAGARA4 0x04
47#define SUN4V_CHIP_NIAGARA5 0x05 47#define SUN4V_CHIP_NIAGARA5 0x05
48#define SUN4V_CHIP_SPARC64X 0x8a
48#define SUN4V_CHIP_UNKNOWN 0xff 49#define SUN4V_CHIP_UNKNOWN 0xff
49 50
50#ifndef __ASSEMBLY__ 51#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
index cad36f56fa03..c7de3323819c 100644
--- a/arch/sparc/include/asm/switch_to_64.h
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -18,8 +18,7 @@ do { \
18 * and 2 stores in this critical code path. -DaveM 18 * and 2 stores in this critical code path. -DaveM
19 */ 19 */
20#define switch_to(prev, next, last) \ 20#define switch_to(prev, next, last) \
21do { flush_tlb_pending(); \ 21do { save_and_clear_fpu(); \
22 save_and_clear_fpu(); \
23 /* If you are tempted to conditionalize the following */ \ 22 /* If you are tempted to conditionalize the following */ \
24 /* so that ASI is only written if it changes, think again. */ \ 23 /* so that ASI is only written if it changes, think again. */ \
25 __asm__ __volatile__("wr %%g0, %0, %%asi" \ 24 __asm__ __volatile__("wr %%g0, %0, %%asi" \
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 25849ae3e900..dd3807599bb9 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -132,8 +132,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
132#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ 132#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
133 _TIF_SIGPENDING) 133 _TIF_SIGPENDING)
134 134
135#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
136
137#endif /* __KERNEL__ */ 135#endif /* __KERNEL__ */
138 136
139#endif /* _ASM_THREAD_INFO_H */ 137#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 269bd92313df..d5e504251079 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -256,8 +256,6 @@ static inline bool test_and_clear_restore_sigmask(void)
256 return true; 256 return true;
257} 257}
258 258
259#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
260
261#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) 259#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
262#define test_thread_64bit_stack(__SP) \ 260#define test_thread_64bit_stack(__SP) \
263 ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \ 261 ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index 2ef463494153..f0d6a9700f4c 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -11,24 +11,40 @@
11struct tlb_batch { 11struct tlb_batch {
12 struct mm_struct *mm; 12 struct mm_struct *mm;
13 unsigned long tlb_nr; 13 unsigned long tlb_nr;
14 unsigned long active;
14 unsigned long vaddrs[TLB_BATCH_NR]; 15 unsigned long vaddrs[TLB_BATCH_NR];
15}; 16};
16 17
17extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); 18extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
18extern void flush_tsb_user(struct tlb_batch *tb); 19extern void flush_tsb_user(struct tlb_batch *tb);
20extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
19 21
20/* TLB flush operations. */ 22/* TLB flush operations. */
21 23
22extern void flush_tlb_pending(void); 24static inline void flush_tlb_mm(struct mm_struct *mm)
25{
26}
27
28static inline void flush_tlb_page(struct vm_area_struct *vma,
29 unsigned long vmaddr)
30{
31}
32
33static inline void flush_tlb_range(struct vm_area_struct *vma,
34 unsigned long start, unsigned long end)
35{
36}
37
38#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
23 39
24#define flush_tlb_range(vma,start,end) \ 40extern void flush_tlb_pending(void);
25 do { (void)(start); flush_tlb_pending(); } while (0) 41extern void arch_enter_lazy_mmu_mode(void);
26#define flush_tlb_page(vma,addr) flush_tlb_pending() 42extern void arch_leave_lazy_mmu_mode(void);
27#define flush_tlb_mm(mm) flush_tlb_pending() 43#define arch_flush_lazy_mmu_mode() do {} while (0)
28 44
29/* Local cpu only. */ 45/* Local cpu only. */
30extern void __flush_tlb_all(void); 46extern void __flush_tlb_all(void);
31 47extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
32extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); 48extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
33 49
34#ifndef CONFIG_SMP 50#ifndef CONFIG_SMP
@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
38 __flush_tlb_kernel_range(start,end); \ 54 __flush_tlb_kernel_range(start,end); \
39} while (0) 55} while (0)
40 56
57static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
58{
59 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
60}
61
41#else /* CONFIG_SMP */ 62#else /* CONFIG_SMP */
42 63
43extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); 64extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
65extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
44 66
45#define flush_tlb_kernel_range(start, end) \ 67#define flush_tlb_kernel_range(start, end) \
46do { flush_tsb_kernel_range(start,end); \ 68do { flush_tsb_kernel_range(start,end); \
47 smp_flush_tlb_kernel_range(start, end); \ 69 smp_flush_tlb_kernel_range(start, end); \
48} while (0) 70} while (0)
49 71
72#define global_flush_tlb_page(mm, vaddr) \
73 smp_flush_tlb_page(mm, vaddr)
74
50#endif /* ! CONFIG_SMP */ 75#endif /* ! CONFIG_SMP */
51 76
52#endif /* _SPARC64_TLBFLUSH_H */ 77#endif /* _SPARC64_TLBFLUSH_H */
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild
index ce175aff71b7..b5843ee09fb5 100644
--- a/arch/sparc/include/uapi/asm/Kbuild
+++ b/arch/sparc/include/uapi/asm/Kbuild
@@ -44,7 +44,6 @@ header-y += swab.h
44header-y += termbits.h 44header-y += termbits.h
45header-y += termios.h 45header-y += termios.h
46header-y += traps.h 46header-y += traps.h
47header-y += types.h
48header-y += uctx.h 47header-y += uctx.h
49header-y += unistd.h 48header-y += unistd.h
50header-y += utrap.h 49header-y += utrap.h
diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h
deleted file mode 100644
index 383d156cde9c..000000000000
--- a/arch/sparc/include/uapi/asm/types.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _SPARC_TYPES_H
2#define _SPARC_TYPES_H
3/*
4 * This file is never included by application software unless
5 * explicitly requested (e.g., via linux/types.h) in which case the
6 * application is Linux specific so (user-) name space pollution is
7 * not a major issue. However, for interoperability, libraries still
8 * need to be careful to avoid a name clashes.
9 */
10
11#if defined(__sparc__)
12
13#include <asm-generic/int-ll64.h>
14
15#endif /* defined(__sparc__) */
16
17#endif /* defined(_SPARC_TYPES_H) */
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index a6c94a2bf9d4..5c5125895db8 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void)
493 sparc_pmu_type = "niagara5"; 493 sparc_pmu_type = "niagara5";
494 break; 494 break;
495 495
496 case SUN4V_CHIP_SPARC64X:
497 sparc_cpu_type = "SPARC64-X";
498 sparc_fpu_type = "SPARC64-X integrated FPU";
499 sparc_pmu_type = "sparc64-x";
500 break;
501
496 default: 502 default:
497 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", 503 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
498 prom_cpu_compatible); 504 prom_cpu_compatible);
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 2feb15c35d9e..26b706a1867d 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -134,6 +134,8 @@ prom_niagara_prefix:
134 .asciz "SUNW,UltraSPARC-T" 134 .asciz "SUNW,UltraSPARC-T"
135prom_sparc_prefix: 135prom_sparc_prefix:
136 .asciz "SPARC-" 136 .asciz "SPARC-"
137prom_sparc64x_prefix:
138 .asciz "SPARC64-X"
137 .align 4 139 .align 4
138prom_root_compatible: 140prom_root_compatible:
139 .skip 64 141 .skip 64
@@ -412,7 +414,7 @@ sun4v_chip_type:
412 cmp %g2, 'T' 414 cmp %g2, 'T'
413 be,pt %xcc, 70f 415 be,pt %xcc, 70f
414 cmp %g2, 'M' 416 cmp %g2, 'M'
415 bne,pn %xcc, 4f 417 bne,pn %xcc, 49f
416 nop 418 nop
417 419
41870: ldub [%g1 + 7], %g2 42070: ldub [%g1 + 7], %g2
@@ -425,7 +427,7 @@ sun4v_chip_type:
425 cmp %g2, '5' 427 cmp %g2, '5'
426 be,pt %xcc, 5f 428 be,pt %xcc, 5f
427 mov SUN4V_CHIP_NIAGARA5, %g4 429 mov SUN4V_CHIP_NIAGARA5, %g4
428 ba,pt %xcc, 4f 430 ba,pt %xcc, 49f
429 nop 431 nop
430 432
43191: sethi %hi(prom_cpu_compatible), %g1 43391: sethi %hi(prom_cpu_compatible), %g1
@@ -439,6 +441,25 @@ sun4v_chip_type:
439 mov SUN4V_CHIP_NIAGARA2, %g4 441 mov SUN4V_CHIP_NIAGARA2, %g4
440 442
4414: 4434:
444 /* Athena */
445 sethi %hi(prom_cpu_compatible), %g1
446 or %g1, %lo(prom_cpu_compatible), %g1
447 sethi %hi(prom_sparc64x_prefix), %g7
448 or %g7, %lo(prom_sparc64x_prefix), %g7
449 mov 9, %g3
45041: ldub [%g7], %g2
451 ldub [%g1], %g4
452 cmp %g2, %g4
453 bne,pn %icc, 49f
454 add %g7, 1, %g7
455 subcc %g3, 1, %g3
456 bne,pt %xcc, 41b
457 add %g1, 1, %g1
458 mov SUN4V_CHIP_SPARC64X, %g4
459 ba,pt %xcc, 5f
460 nop
461
46249:
442 mov SUN4V_CHIP_UNKNOWN, %g4 463 mov SUN4V_CHIP_UNKNOWN, %g4
4435: sethi %hi(sun4v_chip_type), %g2 4645: sethi %hi(sun4v_chip_type), %g2
444 or %g2, %lo(sun4v_chip_type), %g2 465 or %g2, %lo(sun4v_chip_type), %g2
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
index 9365432904d6..605c960b2fa6 100644
--- a/arch/sparc/kernel/hvtramp.S
+++ b/arch/sparc/kernel/hvtramp.S
@@ -128,8 +128,7 @@ hv_cpu_startup:
128 128
129 call smp_callin 129 call smp_callin
130 nop 130 nop
131 call cpu_idle 131
132 mov 0, %o0
133 call cpu_panic 132 call cpu_panic
134 nop 133 nop
135 134
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index fc4320886a3a..4d1487138d26 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -186,6 +186,8 @@ struct grpci2_cap_first {
186#define CAP9_IOMAP_OFS 0x20 186#define CAP9_IOMAP_OFS 0x20
187#define CAP9_BARSIZE_OFS 0x24 187#define CAP9_BARSIZE_OFS 0x24
188 188
189#define TGT 256
190
189struct grpci2_priv { 191struct grpci2_priv {
190 struct leon_pci_info info; /* must be on top of this structure */ 192 struct leon_pci_info info; /* must be on top of this structure */
191 struct grpci2_regs *regs; 193 struct grpci2_regs *regs;
@@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
237 if (where & 0x3) 239 if (where & 0x3)
238 return -EINVAL; 240 return -EINVAL;
239 241
240 if (bus == 0 && PCI_SLOT(devfn) != 0) 242 if (bus == 0) {
241 devfn += (0x8 * 6); 243 devfn += (0x8 * 6); /* start at AD16=Device0 */
244 } else if (bus == TGT) {
245 bus = 0;
246 devfn = 0; /* special case: bridge controller itself */
247 }
242 248
243 /* Select bus */ 249 /* Select bus */
244 spin_lock_irqsave(&grpci2_dev_lock, flags); 250 spin_lock_irqsave(&grpci2_dev_lock, flags);
@@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
303 if (where & 0x3) 309 if (where & 0x3)
304 return -EINVAL; 310 return -EINVAL;
305 311
306 if (bus == 0 && PCI_SLOT(devfn) != 0) 312 if (bus == 0) {
307 devfn += (0x8 * 6); 313 devfn += (0x8 * 6); /* start at AD16=Device0 */
314 } else if (bus == TGT) {
315 bus = 0;
316 devfn = 0; /* special case: bridge controller itself */
317 }
308 318
309 /* Select bus */ 319 /* Select bus */
310 spin_lock_irqsave(&grpci2_dev_lock, flags); 320 spin_lock_irqsave(&grpci2_dev_lock, flags);
@@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
368 unsigned int busno = bus->number; 378 unsigned int busno = bus->number;
369 int ret; 379 int ret;
370 380
371 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { 381 if (PCI_SLOT(devfn) > 15 || busno > 255) {
372 *val = ~0; 382 *val = ~0;
373 return 0; 383 return 0;
374 } 384 }
@@ -406,7 +416,7 @@ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
406 struct grpci2_priv *priv = grpci2priv; 416 struct grpci2_priv *priv = grpci2priv;
407 unsigned int busno = bus->number; 417 unsigned int busno = bus->number;
408 418
409 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) 419 if (PCI_SLOT(devfn) > 15 || busno > 255)
410 return 0; 420 return 0;
411 421
412#ifdef GRPCI2_DEBUG_CFGACCESS 422#ifdef GRPCI2_DEBUG_CFGACCESS
@@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv *priv)
578 REGSTORE(regs->ahbmst_map[i], priv->pci_area); 588 REGSTORE(regs->ahbmst_map[i], priv->pci_area);
579 589
580 /* Get the GRPCI2 Host PCI ID */ 590 /* Get the GRPCI2 Host PCI ID */
581 grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); 591 grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid);
582 592
583 /* Get address to first (always defined) capability structure */ 593 /* Get address to first (always defined) capability structure */
584 grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); 594 grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr);
585 595
586 /* Enable/Disable Byte twisting */ 596 /* Enable/Disable Byte twisting */
587 grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); 597 grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map);
588 io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); 598 io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
589 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); 599 grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map);
590 600
591 /* Setup the Host's PCI Target BARs for other peripherals to access, 601 /* Setup the Host's PCI Target BARs for other peripherals to access,
592 * and do DMA to the host's memory. The target BARs can be sized and 602 * and do DMA to the host's memory. The target BARs can be sized and
@@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv *priv)
617 pciadr = 0; 627 pciadr = 0;
618 } 628 }
619 } 629 }
620 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); 630 grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4,
621 grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); 631 bar_sz);
622 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); 632 grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
633 grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
623 printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", 634 printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
624 i, pciadr, ahbadr); 635 i, pciadr, ahbadr);
625 } 636 }
626 637
627 /* set as bus master and enable pci memory responses */ 638 /* set as bus master and enable pci memory responses */
628 grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); 639 grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data);
629 data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 640 data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
630 grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); 641 grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data);
631 642
632 /* Enable Error respone (CPU-TRAP) on illegal memory access. */ 643 /* Enable Error respone (CPU-TRAP) on illegal memory access. */
633 REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); 644 REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 62eede13831a..c85241006e32 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -64,23 +64,12 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
64struct task_struct *last_task_used_math = NULL; 64struct task_struct *last_task_used_math = NULL;
65struct thread_info *current_set[NR_CPUS]; 65struct thread_info *current_set[NR_CPUS];
66 66
67/* 67/* Idle loop support. */
68 * the idle loop on a Sparc... ;) 68void arch_cpu_idle(void)
69 */
70void cpu_idle(void)
71{ 69{
72 set_thread_flag(TIF_POLLING_NRFLAG); 70 if (sparc_idle)
73 71 (*sparc_idle)();
74 /* endless idle loop with no priority at all */ 72 local_irq_enable();
75 for (;;) {
76 while (!need_resched()) {
77 if (sparc_idle)
78 (*sparc_idle)();
79 else
80 cpu_relax();
81 }
82 schedule_preempt_disabled();
83 }
84} 73}
85 74
86/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ 75/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index cdb80b2adbe0..9fbf0d14a361 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -52,20 +52,17 @@
52 52
53#include "kstack.h" 53#include "kstack.h"
54 54
55static void sparc64_yield(int cpu) 55/* Idle loop support on sparc64. */
56void arch_cpu_idle(void)
56{ 57{
57 if (tlb_type != hypervisor) { 58 if (tlb_type != hypervisor) {
58 touch_nmi_watchdog(); 59 touch_nmi_watchdog();
59 return; 60 } else {
60 }
61
62 clear_thread_flag(TIF_POLLING_NRFLAG);
63 smp_mb__after_clear_bit();
64
65 while (!need_resched() && !cpu_is_offline(cpu)) {
66 unsigned long pstate; 61 unsigned long pstate;
67 62
68 /* Disable interrupts. */ 63 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
64 * the cpu sleep hypervisor call.
65 */
69 __asm__ __volatile__( 66 __asm__ __volatile__(
70 "rdpr %%pstate, %0\n\t" 67 "rdpr %%pstate, %0\n\t"
71 "andn %0, %1, %0\n\t" 68 "andn %0, %1, %0\n\t"
@@ -73,7 +70,7 @@ static void sparc64_yield(int cpu)
73 : "=&r" (pstate) 70 : "=&r" (pstate)
74 : "i" (PSTATE_IE)); 71 : "i" (PSTATE_IE));
75 72
76 if (!need_resched() && !cpu_is_offline(cpu)) 73 if (!need_resched() && !cpu_is_offline(smp_processor_id()))
77 sun4v_cpu_yield(); 74 sun4v_cpu_yield();
78 75
79 /* Re-enable interrupts. */ 76 /* Re-enable interrupts. */
@@ -84,36 +81,16 @@ static void sparc64_yield(int cpu)
84 : "=&r" (pstate) 81 : "=&r" (pstate)
85 : "i" (PSTATE_IE)); 82 : "i" (PSTATE_IE));
86 } 83 }
87 84 local_irq_enable();
88 set_thread_flag(TIF_POLLING_NRFLAG);
89} 85}
90 86
91/* The idle loop on sparc64. */
92void cpu_idle(void)
93{
94 int cpu = smp_processor_id();
95
96 set_thread_flag(TIF_POLLING_NRFLAG);
97
98 while(1) {
99 tick_nohz_idle_enter();
100 rcu_idle_enter();
101
102 while (!need_resched() && !cpu_is_offline(cpu))
103 sparc64_yield(cpu);
104
105 rcu_idle_exit();
106 tick_nohz_idle_exit();
107
108#ifdef CONFIG_HOTPLUG_CPU 87#ifdef CONFIG_HOTPLUG_CPU
109 if (cpu_is_offline(cpu)) { 88void arch_cpu_idle_dead()
110 sched_preempt_enable_no_resched(); 89{
111 cpu_play_dead(); 90 sched_preempt_enable_no_resched();
112 } 91 cpu_play_dead();
113#endif
114 schedule_preempt_disabled();
115 }
116} 92}
93#endif
117 94
118#ifdef CONFIG_COMPAT 95#ifdef CONFIG_COMPAT
119static void show_regwindow32(struct pt_regs *regs) 96static void show_regwindow32(struct pt_regs *regs)
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 9e7e6d718367..e3f2b81c23f1 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -369,7 +369,7 @@ void __cpuinit sparc_start_secondary(void *arg)
369 local_irq_enable(); 369 local_irq_enable();
370 370
371 wmb(); 371 wmb();
372 cpu_idle(); 372 cpu_startup_entry(CPUHP_ONLINE);
373 373
374 /* We should never reach here! */ 374 /* We should never reach here! */
375 BUG(); 375 BUG();
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 537eb66abd06..77539eda928c 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -127,6 +127,8 @@ void __cpuinit smp_callin(void)
127 127
128 /* idle thread is expected to have preempt disabled */ 128 /* idle thread is expected to have preempt disabled */
129 preempt_disable(); 129 preempt_disable();
130
131 cpu_startup_entry(CPUHP_ONLINE);
130} 132}
131 133
132void cpu_panic(void) 134void cpu_panic(void)
@@ -849,7 +851,7 @@ void smp_tsb_sync(struct mm_struct *mm)
849} 851}
850 852
851extern unsigned long xcall_flush_tlb_mm; 853extern unsigned long xcall_flush_tlb_mm;
852extern unsigned long xcall_flush_tlb_pending; 854extern unsigned long xcall_flush_tlb_page;
853extern unsigned long xcall_flush_tlb_kernel_range; 855extern unsigned long xcall_flush_tlb_kernel_range;
854extern unsigned long xcall_fetch_glob_regs; 856extern unsigned long xcall_fetch_glob_regs;
855extern unsigned long xcall_fetch_glob_pmu; 857extern unsigned long xcall_fetch_glob_pmu;
@@ -1074,23 +1076,56 @@ local_flush_and_out:
1074 put_cpu(); 1076 put_cpu();
1075} 1077}
1076 1078
1079struct tlb_pending_info {
1080 unsigned long ctx;
1081 unsigned long nr;
1082 unsigned long *vaddrs;
1083};
1084
1085static void tlb_pending_func(void *info)
1086{
1087 struct tlb_pending_info *t = info;
1088
1089 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1090}
1091
1077void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) 1092void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1078{ 1093{
1079 u32 ctx = CTX_HWBITS(mm->context); 1094 u32 ctx = CTX_HWBITS(mm->context);
1095 struct tlb_pending_info info;
1080 int cpu = get_cpu(); 1096 int cpu = get_cpu();
1081 1097
1098 info.ctx = ctx;
1099 info.nr = nr;
1100 info.vaddrs = vaddrs;
1101
1082 if (mm == current->mm && atomic_read(&mm->mm_users) == 1) 1102 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1083 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); 1103 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1084 else 1104 else
1085 smp_cross_call_masked(&xcall_flush_tlb_pending, 1105 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1086 ctx, nr, (unsigned long) vaddrs, 1106 &info, 1);
1087 mm_cpumask(mm));
1088 1107
1089 __flush_tlb_pending(ctx, nr, vaddrs); 1108 __flush_tlb_pending(ctx, nr, vaddrs);
1090 1109
1091 put_cpu(); 1110 put_cpu();
1092} 1111}
1093 1112
1113void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1114{
1115 unsigned long context = CTX_HWBITS(mm->context);
1116 int cpu = get_cpu();
1117
1118 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1119 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1120 else
1121 smp_cross_call_masked(&xcall_flush_tlb_page,
1122 context, vaddr, 0,
1123 mm_cpumask(mm));
1124 __flush_tlb_page(context, vaddr);
1125
1126 put_cpu();
1127}
1128
1094void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) 1129void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1095{ 1130{
1096 start &= PAGE_MASK; 1131 start &= PAGE_MASK;
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index da1b781b5e65..2e973a26fbda 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -407,8 +407,7 @@ after_lock_tlb:
407 407
408 call smp_callin 408 call smp_callin
409 nop 409 nop
410 call cpu_idle 410
411 mov 0, %o0
412 call cpu_panic 411 call cpu_panic
413 nop 412 nop
4141: b,a,pt %xcc, 1b 4131: b,a,pt %xcc, 1b
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c
index 48d00e72ce15..8ec4e9c0251a 100644
--- a/arch/sparc/lib/bitext.c
+++ b/arch/sparc/lib/bitext.c
@@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
119 119
120void bit_map_init(struct bit_map *t, unsigned long *map, int size) 120void bit_map_init(struct bit_map *t, unsigned long *map, int size)
121{ 121{
122 122 bitmap_zero(map, size);
123 if ((size & 07) != 0)
124 BUG();
125 memset(map, 0, size>>3);
126
127 memset(t, 0, sizeof *t); 123 memset(t, 0, sizeof *t);
128 spin_lock_init(&t->lock); 124 spin_lock_init(&t->lock);
129 t->map = map; 125 t->map = map;
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 48e0c030e8f5..4490c397bb5b 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -282,14 +282,8 @@ static void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
282 printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); 282 printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
283#endif 283#endif
284 284
285 for (tmp = start_pfn; tmp < end_pfn; tmp++) { 285 for (tmp = start_pfn; tmp < end_pfn; tmp++)
286 struct page *page = pfn_to_page(tmp); 286 free_highmem_page(pfn_to_page(tmp));
287
288 ClearPageReserved(page);
289 init_page_count(page);
290 __free_page(page);
291 totalhigh_pages++;
292 }
293} 287}
294 288
295void __init mem_init(void) 289void __init mem_init(void)
@@ -347,8 +341,6 @@ void __init mem_init(void)
347 map_high_region(start_pfn, end_pfn); 341 map_high_region(start_pfn, end_pfn);
348 } 342 }
349 343
350 totalram_pages += totalhigh_pages;
351
352 codepages = (((unsigned long) &_etext) - ((unsigned long)&_start)); 344 codepages = (((unsigned long) &_etext) - ((unsigned long)&_start));
353 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; 345 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
354 datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext)); 346 datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext));
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 1588d33d5492..6ac99d64a13c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2181,10 +2181,9 @@ unsigned long vmemmap_table[VMEMMAP_SIZE];
2181static long __meminitdata addr_start, addr_end; 2181static long __meminitdata addr_start, addr_end;
2182static int __meminitdata node_start; 2182static int __meminitdata node_start;
2183 2183
2184int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 2184int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2185 int node)
2185{ 2186{
2186 unsigned long vstart = (unsigned long) start;
2187 unsigned long vend = (unsigned long) (start + nr);
2188 unsigned long phys_start = (vstart - VMEMMAP_BASE); 2187 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2189 unsigned long phys_end = (vend - VMEMMAP_BASE); 2188 unsigned long phys_end = (vend - VMEMMAP_BASE);
2190 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; 2189 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
@@ -2236,7 +2235,7 @@ void __meminit vmemmap_populate_print_last(void)
2236 } 2235 }
2237} 2236}
2238 2237
2239void vmemmap_free(struct page *memmap, unsigned long nr_pages) 2238void vmemmap_free(unsigned long start, unsigned long end)
2240{ 2239{
2241} 2240}
2242 2241
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 0f4f7191fbba..28f96f27c768 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -34,7 +34,7 @@
34#define IOMMU_RNGE IOMMU_RNGE_256MB 34#define IOMMU_RNGE IOMMU_RNGE_256MB
35#define IOMMU_START 0xF0000000 35#define IOMMU_START 0xF0000000
36#define IOMMU_WINSIZE (256*1024*1024U) 36#define IOMMU_WINSIZE (256*1024*1024U)
37#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */ 37#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
38#define IOMMU_ORDER 6 /* 4096 * (1<<6) */ 38#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
39 39
40/* srmmu.c */ 40/* srmmu.c */
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index c38bb72e3e80..036c2797dece 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)
280 SRMMU_NOCACHE_ALIGN_MAX, 0UL); 280 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
281 memset(srmmu_nocache_pool, 0, srmmu_nocache_size); 281 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
282 282
283 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); 283 srmmu_nocache_bitmap =
284 __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
285 SMP_CACHE_BYTES, 0UL);
284 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); 286 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
285 287
286 srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); 288 srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index ba6ae7ffdc2c..83d89bcb44af 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
24void flush_tlb_pending(void) 24void flush_tlb_pending(void)
25{ 25{
26 struct tlb_batch *tb = &get_cpu_var(tlb_batch); 26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27 struct mm_struct *mm = tb->mm;
27 28
28 if (tb->tlb_nr) { 29 if (!tb->tlb_nr)
29 flush_tsb_user(tb); 30 goto out;
30 31
31 if (CTX_VALID(tb->mm->context)) { 32 flush_tsb_user(tb);
33
34 if (CTX_VALID(mm->context)) {
35 if (tb->tlb_nr == 1) {
36 global_flush_tlb_page(mm, tb->vaddrs[0]);
37 } else {
32#ifdef CONFIG_SMP 38#ifdef CONFIG_SMP
33 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, 39 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
34 &tb->vaddrs[0]); 40 &tb->vaddrs[0]);
@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
37 tb->tlb_nr, &tb->vaddrs[0]); 43 tb->tlb_nr, &tb->vaddrs[0]);
38#endif 44#endif
39 } 45 }
40 tb->tlb_nr = 0;
41 } 46 }
42 47
48 tb->tlb_nr = 0;
49
50out:
43 put_cpu_var(tlb_batch); 51 put_cpu_var(tlb_batch);
44} 52}
45 53
54void arch_enter_lazy_mmu_mode(void)
55{
56 struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
57
58 tb->active = 1;
59}
60
61void arch_leave_lazy_mmu_mode(void)
62{
63 struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
64
65 if (tb->tlb_nr)
66 flush_tlb_pending();
67 tb->active = 0;
68}
69
46static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, 70static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
47 bool exec) 71 bool exec)
48{ 72{
@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
60 nr = 0; 84 nr = 0;
61 } 85 }
62 86
87 if (!tb->active) {
88 global_flush_tlb_page(mm, vaddr);
89 flush_tsb_user_page(mm, vaddr);
90 goto out;
91 }
92
63 if (nr == 0) 93 if (nr == 0)
64 tb->mm = mm; 94 tb->mm = mm;
65 95
@@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
68 if (nr >= TLB_BATCH_NR) 98 if (nr >= TLB_BATCH_NR)
69 flush_tlb_pending(); 99 flush_tlb_pending();
70 100
101out:
71 put_cpu_var(tlb_batch); 102 put_cpu_var(tlb_batch);
72} 103}
73 104
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 428982b9becf..2cc3bce5ee91 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -7,11 +7,10 @@
7#include <linux/preempt.h> 7#include <linux/preempt.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/tlbflush.h>
11#include <asm/tlb.h>
12#include <asm/mmu_context.h>
13#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/mmu_context.h>
14#include <asm/tsb.h> 12#include <asm/tsb.h>
13#include <asm/tlb.h>
15#include <asm/oplib.h> 14#include <asm/oplib.h>
16 15
17extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 16extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
46 } 45 }
47} 46}
48 47
49static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, 48static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
50 unsigned long tsb, unsigned long nentries) 49 unsigned long hash_shift,
50 unsigned long nentries)
51{ 51{
52 unsigned long i; 52 unsigned long tag, ent, hash;
53 53
54 for (i = 0; i < tb->tlb_nr; i++) { 54 v &= ~0x1UL;
55 unsigned long v = tb->vaddrs[i]; 55 hash = tsb_hash(v, hash_shift, nentries);
56 unsigned long tag, ent, hash; 56 ent = tsb + (hash * sizeof(struct tsb));
57 tag = (v >> 22UL);
57 58
58 v &= ~0x1UL; 59 tsb_flush(ent, tag);
60}
59 61
60 hash = tsb_hash(v, hash_shift, nentries); 62static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
61 ent = tsb + (hash * sizeof(struct tsb)); 63 unsigned long tsb, unsigned long nentries)
62 tag = (v >> 22UL); 64{
65 unsigned long i;
63 66
64 tsb_flush(ent, tag); 67 for (i = 0; i < tb->tlb_nr; i++)
65 } 68 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
66} 69}
67 70
68void flush_tsb_user(struct tlb_batch *tb) 71void flush_tsb_user(struct tlb_batch *tb)
@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
90 spin_unlock_irqrestore(&mm->context.lock, flags); 93 spin_unlock_irqrestore(&mm->context.lock, flags);
91} 94}
92 95
96void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
97{
98 unsigned long nentries, base, flags;
99
100 spin_lock_irqsave(&mm->context.lock, flags);
101
102 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
103 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
104 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
105 base = __pa(base);
106 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
107
108#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
109 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
110 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
111 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
112 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
113 base = __pa(base);
114 __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
115 }
116#endif
117 spin_unlock_irqrestore(&mm->context.lock, flags);
118}
119
93#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K 120#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
94#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K 121#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
95 122
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index f8e13d421fcb..432aa0cb1b38 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */
53 nop 53 nop
54 54
55 .align 32 55 .align 32
56 .globl __flush_tlb_page
57__flush_tlb_page: /* 22 insns */
58 /* %o0 = context, %o1 = vaddr */
59 rdpr %pstate, %g7
60 andn %g7, PSTATE_IE, %g2
61 wrpr %g2, %pstate
62 mov SECONDARY_CONTEXT, %o4
63 ldxa [%o4] ASI_DMMU, %g2
64 stxa %o0, [%o4] ASI_DMMU
65 andcc %o1, 1, %g0
66 andn %o1, 1, %o3
67 be,pn %icc, 1f
68 or %o3, 0x10, %o3
69 stxa %g0, [%o3] ASI_IMMU_DEMAP
701: stxa %g0, [%o3] ASI_DMMU_DEMAP
71 membar #Sync
72 stxa %g2, [%o4] ASI_DMMU
73 sethi %hi(KERNBASE), %o4
74 flush %o4
75 retl
76 wrpr %g7, 0x0, %pstate
77 nop
78 nop
79 nop
80 nop
81
82 .align 32
56 .globl __flush_tlb_pending 83 .globl __flush_tlb_pending
57__flush_tlb_pending: /* 26 insns */ 84__flush_tlb_pending: /* 26 insns */
58 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 85 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
203 retl 230 retl
204 wrpr %g7, 0x0, %pstate 231 wrpr %g7, 0x0, %pstate
205 232
233__cheetah_flush_tlb_page: /* 22 insns */
234 /* %o0 = context, %o1 = vaddr */
235 rdpr %pstate, %g7
236 andn %g7, PSTATE_IE, %g2
237 wrpr %g2, 0x0, %pstate
238 wrpr %g0, 1, %tl
239 mov PRIMARY_CONTEXT, %o4
240 ldxa [%o4] ASI_DMMU, %g2
241 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
242 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
243 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
244 stxa %o0, [%o4] ASI_DMMU
245 andcc %o1, 1, %g0
246 be,pn %icc, 1f
247 andn %o1, 1, %o3
248 stxa %g0, [%o3] ASI_IMMU_DEMAP
2491: stxa %g0, [%o3] ASI_DMMU_DEMAP
250 membar #Sync
251 stxa %g2, [%o4] ASI_DMMU
252 sethi %hi(KERNBASE), %o4
253 flush %o4
254 wrpr %g0, 0, %tl
255 retl
256 wrpr %g7, 0x0, %pstate
257
206__cheetah_flush_tlb_pending: /* 27 insns */ 258__cheetah_flush_tlb_pending: /* 27 insns */
207 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 259 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
208 rdpr %pstate, %g7 260 rdpr %pstate, %g7
@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
269 retl 321 retl
270 nop 322 nop
271 323
324__hypervisor_flush_tlb_page: /* 11 insns */
325 /* %o0 = context, %o1 = vaddr */
326 mov %o0, %g2
327 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
328 mov %g2, %o1 /* ARG1: mmu context */
329 mov HV_MMU_ALL, %o2 /* ARG2: flags */
330 srlx %o0, PAGE_SHIFT, %o0
331 sllx %o0, PAGE_SHIFT, %o0
332 ta HV_MMU_UNMAP_ADDR_TRAP
333 brnz,pn %o0, __hypervisor_tlb_tl0_error
334 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
335 retl
336 nop
337
272__hypervisor_flush_tlb_pending: /* 16 insns */ 338__hypervisor_flush_tlb_pending: /* 16 insns */
273 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 339 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
274 sllx %o1, 3, %g1 340 sllx %o1, 3, %g1
@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
339 call tlb_patch_one 405 call tlb_patch_one
340 mov 19, %o2 406 mov 19, %o2
341 407
408 sethi %hi(__flush_tlb_page), %o0
409 or %o0, %lo(__flush_tlb_page), %o0
410 sethi %hi(__cheetah_flush_tlb_page), %o1
411 or %o1, %lo(__cheetah_flush_tlb_page), %o1
412 call tlb_patch_one
413 mov 22, %o2
414
342 sethi %hi(__flush_tlb_pending), %o0 415 sethi %hi(__flush_tlb_pending), %o0
343 or %o0, %lo(__flush_tlb_pending), %o0 416 or %o0, %lo(__flush_tlb_pending), %o0
344 sethi %hi(__cheetah_flush_tlb_pending), %o1 417 sethi %hi(__cheetah_flush_tlb_pending), %o1
@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
397 nop 470 nop
398 nop 471 nop
399 472
400 .globl xcall_flush_tlb_pending 473 .globl xcall_flush_tlb_page
401xcall_flush_tlb_pending: /* 21 insns */ 474xcall_flush_tlb_page: /* 17 insns */
402 /* %g5=context, %g1=nr, %g7=vaddrs[] */ 475 /* %g5=context, %g1=vaddr */
403 sllx %g1, 3, %g1
404 mov PRIMARY_CONTEXT, %g4 476 mov PRIMARY_CONTEXT, %g4
405 ldxa [%g4] ASI_DMMU, %g2 477 ldxa [%g4] ASI_DMMU, %g2
406 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 478 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
408 or %g5, %g4, %g5 480 or %g5, %g4, %g5
409 mov PRIMARY_CONTEXT, %g4 481 mov PRIMARY_CONTEXT, %g4
410 stxa %g5, [%g4] ASI_DMMU 482 stxa %g5, [%g4] ASI_DMMU
4111: sub %g1, (1 << 3), %g1 483 andcc %g1, 0x1, %g0
412 ldx [%g7 + %g1], %g5
413 andcc %g5, 0x1, %g0
414 be,pn %icc, 2f 484 be,pn %icc, 2f
415 485 andn %g1, 0x1, %g5
416 andn %g5, 0x1, %g5
417 stxa %g0, [%g5] ASI_IMMU_DEMAP 486 stxa %g0, [%g5] ASI_IMMU_DEMAP
4182: stxa %g0, [%g5] ASI_DMMU_DEMAP 4872: stxa %g0, [%g5] ASI_DMMU_DEMAP
419 membar #Sync 488 membar #Sync
420 brnz,pt %g1, 1b
421 nop
422 stxa %g2, [%g4] ASI_DMMU 489 stxa %g2, [%g4] ASI_DMMU
423 retry 490 retry
424 nop 491 nop
492 nop
425 493
426 .globl xcall_flush_tlb_kernel_range 494 .globl xcall_flush_tlb_kernel_range
427xcall_flush_tlb_kernel_range: /* 25 insns */ 495xcall_flush_tlb_kernel_range: /* 25 insns */
@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
656 membar #Sync 724 membar #Sync
657 retry 725 retry
658 726
659 .globl __hypervisor_xcall_flush_tlb_pending 727 .globl __hypervisor_xcall_flush_tlb_page
660__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ 728__hypervisor_xcall_flush_tlb_page: /* 17 insns */
661 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ 729 /* %g5=ctx, %g1=vaddr */
662 sllx %g1, 3, %g1
663 mov %o0, %g2 730 mov %o0, %g2
664 mov %o1, %g3 731 mov %o1, %g3
665 mov %o2, %g4 732 mov %o2, %g4
6661: sub %g1, (1 << 3), %g1 733 mov %g1, %o0 /* ARG0: virtual address */
667 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
668 mov %g5, %o1 /* ARG1: mmu context */ 734 mov %g5, %o1 /* ARG1: mmu context */
669 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 735 mov HV_MMU_ALL, %o2 /* ARG2: flags */
670 srlx %o0, PAGE_SHIFT, %o0 736 srlx %o0, PAGE_SHIFT, %o0
@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
673 mov HV_MMU_UNMAP_ADDR_TRAP, %g6 739 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
674 brnz,a,pn %o0, __hypervisor_tlb_xcall_error 740 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
675 mov %o0, %g5 741 mov %o0, %g5
676 brnz,pt %g1, 1b
677 nop
678 mov %g2, %o0 742 mov %g2, %o0
679 mov %g3, %o1 743 mov %g3, %o1
680 mov %g4, %o2 744 mov %g4, %o2
@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
757 call tlb_patch_one 821 call tlb_patch_one
758 mov 10, %o2 822 mov 10, %o2
759 823
824 sethi %hi(__flush_tlb_page), %o0
825 or %o0, %lo(__flush_tlb_page), %o0
826 sethi %hi(__hypervisor_flush_tlb_page), %o1
827 or %o1, %lo(__hypervisor_flush_tlb_page), %o1
828 call tlb_patch_one
829 mov 11, %o2
830
760 sethi %hi(__flush_tlb_pending), %o0 831 sethi %hi(__flush_tlb_pending), %o0
761 or %o0, %lo(__flush_tlb_pending), %o0 832 or %o0, %lo(__flush_tlb_pending), %o0
762 sethi %hi(__hypervisor_flush_tlb_pending), %o1 833 sethi %hi(__hypervisor_flush_tlb_pending), %o1
@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
788 call tlb_patch_one 859 call tlb_patch_one
789 mov 21, %o2 860 mov 21, %o2
790 861
791 sethi %hi(xcall_flush_tlb_pending), %o0 862 sethi %hi(xcall_flush_tlb_page), %o0
792 or %o0, %lo(xcall_flush_tlb_pending), %o0 863 or %o0, %lo(xcall_flush_tlb_page), %o0
793 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 864 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
794 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 865 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
795 call tlb_patch_one 866 call tlb_patch_one
796 mov 21, %o2 867 mov 17, %o2
797 868
798 sethi %hi(xcall_flush_tlb_kernel_range), %o0 869 sethi %hi(xcall_flush_tlb_kernel_range), %o0
799 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 870 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index ff496ab1e794..25877aebc685 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -17,7 +17,7 @@ config TILE
17 select GENERIC_IRQ_SHOW 17 select GENERIC_IRQ_SHOW
18 select HAVE_DEBUG_BUGVERBOSE 18 select HAVE_DEBUG_BUGVERBOSE
19 select HAVE_SYSCALL_WRAPPERS if TILEGX 19 select HAVE_SYSCALL_WRAPPERS if TILEGX
20 select HAVE_VIRT_TO_BUS 20 select VIRT_TO_BUS
21 select SYS_HYPERVISOR 21 select SYS_HYPERVISOR
22 select ARCH_HAVE_NMI_SAFE_CMPXCHG 22 select ARCH_HAVE_NMI_SAFE_CMPXCHG
23 select GENERIC_CLOCKEVENTS 23 select GENERIC_CLOCKEVENTS
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 8c5eff6d6df5..47684815e5c8 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m
330CONFIG_MD_RAID1=m 330CONFIG_MD_RAID1=m
331CONFIG_MD_RAID10=m 331CONFIG_MD_RAID10=m
332CONFIG_MD_RAID456=m 332CONFIG_MD_RAID456=m
333CONFIG_MULTICORE_RAID456=y
334CONFIG_MD_FAULTY=m 333CONFIG_MD_FAULTY=m
335CONFIG_BLK_DEV_DM=m 334CONFIG_BLK_DEV_DM=m
336CONFIG_DM_DEBUG=y 335CONFIG_DM_DEBUG=y
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index e7a3dfcbcda7..dd2b8f0c631f 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m
324CONFIG_MD_RAID1=m 324CONFIG_MD_RAID1=m
325CONFIG_MD_RAID10=m 325CONFIG_MD_RAID10=m
326CONFIG_MD_RAID456=m 326CONFIG_MD_RAID456=m
327CONFIG_MULTICORE_RAID456=y
328CONFIG_MD_FAULTY=m 327CONFIG_MD_FAULTY=m
329CONFIG_BLK_DEV_DM=m 328CONFIG_BLK_DEV_DM=m
330CONFIG_DM_DEBUG=y 329CONFIG_DM_DEBUG=y
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h
index 0f885af2b621..3257733003f8 100644
--- a/arch/tile/include/asm/hugetlb.h
+++ b/arch/tile/include/asm/hugetlb.h
@@ -16,6 +16,7 @@
16#define _ASM_TILE_HUGETLB_H 16#define _ASM_TILE_HUGETLB_H
17 17
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm-generic/hugetlb.h>
19 20
20 21
21static inline int is_hugepage_only_range(struct mm_struct *mm, 22static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 241c0bb60b12..c96f9bbb760d 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -40,7 +40,15 @@
40#include <asm/percpu.h> 40#include <asm/percpu.h>
41#include <arch/spr_def.h> 41#include <arch/spr_def.h>
42 42
43/* Set and clear kernel interrupt masks. */ 43/*
44 * Set and clear kernel interrupt masks.
45 *
46 * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
47 * clobber. We rely on it being equivalent to a compiler barrier in
48 * this code since arch_local_irq_save() and friends must act as
49 * compiler barriers. This compiler semantic is baked into enough
50 * places that the compiler will maintain it going forward.
51 */
44#if CHIP_HAS_SPLIT_INTR_MASK() 52#if CHIP_HAS_SPLIT_INTR_MASK()
45#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 53#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
46# error Fix assumptions about which word various interrupts are in 54# error Fix assumptions about which word various interrupts are in
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index e9c670d7a7fe..ccc8ef37235c 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -153,8 +153,6 @@ extern void _cpu_idle(void);
153#define TS_POLLING 0x0004 /* in idle loop but not sleeping */ 153#define TS_POLLING 0x0004 /* in idle loop but not sleeping */
154#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ 154#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */
155 155
156#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
157
158#ifndef __ASSEMBLY__ 156#ifndef __ASSEMBLY__
159#define HAVE_SET_RESTORE_SIGMASK 1 157#define HAVE_SET_RESTORE_SIGMASK 1
160static inline void set_restore_sigmask(void) 158static inline void set_restore_sigmask(void)
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index afb9c9a0d887..34d72a151bf3 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/irqflags.h> 19#include <linux/irqflags.h>
20#include <linux/printk.h>
20#include <asm/setup.h> 21#include <asm/setup.h>
21#include <hv/hypervisor.h> 22#include <hv/hypervisor.h>
22 23
@@ -33,25 +34,8 @@ static struct console early_hv_console = {
33}; 34};
34 35
35/* Direct interface for emergencies */ 36/* Direct interface for emergencies */
36static struct console *early_console = &early_hv_console;
37static int early_console_initialized;
38static int early_console_complete; 37static int early_console_complete;
39 38
40static void early_vprintk(const char *fmt, va_list ap)
41{
42 char buf[512];
43 int n = vscnprintf(buf, sizeof(buf), fmt, ap);
44 early_console->write(early_console, buf, n);
45}
46
47void early_printk(const char *fmt, ...)
48{
49 va_list ap;
50 va_start(ap, fmt);
51 early_vprintk(fmt, ap);
52 va_end(ap);
53}
54
55void early_panic(const char *fmt, ...) 39void early_panic(const char *fmt, ...)
56{ 40{
57 va_list ap; 41 va_list ap;
@@ -69,14 +53,13 @@ static int __initdata keep_early;
69 53
70static int __init setup_early_printk(char *str) 54static int __init setup_early_printk(char *str)
71{ 55{
72 if (early_console_initialized) 56 if (early_console)
73 return 1; 57 return 1;
74 58
75 if (str != NULL && strncmp(str, "keep", 4) == 0) 59 if (str != NULL && strncmp(str, "keep", 4) == 0)
76 keep_early = 1; 60 keep_early = 1;
77 61
78 early_console = &early_hv_console; 62 early_console = &early_hv_console;
79 early_console_initialized = 1;
80 register_console(early_console); 63 register_console(early_console);
81 64
82 return 0; 65 return 0;
@@ -85,12 +68,12 @@ static int __init setup_early_printk(char *str)
85void __init disable_early_printk(void) 68void __init disable_early_printk(void)
86{ 69{
87 early_console_complete = 1; 70 early_console_complete = 1;
88 if (!early_console_initialized || !early_console) 71 if (!early_console)
89 return; 72 return;
90 if (!keep_early) { 73 if (!keep_early) {
91 early_printk("disabling early console\n"); 74 early_printk("disabling early console\n");
92 unregister_console(early_console); 75 unregister_console(early_console);
93 early_console_initialized = 0; 76 early_console = NULL;
94 } else { 77 } else {
95 early_printk("keeping early console\n"); 78 early_printk("keeping early console\n");
96 } 79 }
@@ -98,7 +81,7 @@ void __init disable_early_printk(void)
98 81
99void warn_early_printk(void) 82void warn_early_printk(void)
100{ 83{
101 if (early_console_complete || early_console_initialized) 84 if (early_console_complete || early_console)
102 return; 85 return;
103 early_printk("\ 86 early_printk("\
104Machine shutting down before console output is fully initialized.\n\ 87Machine shutting down before console output is fully initialized.\n\
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index caf93ae11793..80b2a18deb87 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -40,13 +40,11 @@
40#include <arch/abi.h> 40#include <arch/abi.h>
41#include <arch/sim_def.h> 41#include <arch/sim_def.h>
42 42
43
44/* 43/*
45 * Use the (x86) "idle=poll" option to prefer low latency when leaving the 44 * Use the (x86) "idle=poll" option to prefer low latency when leaving the
46 * idle loop over low power while in the idle loop, e.g. if we have 45 * idle loop over low power while in the idle loop, e.g. if we have
47 * one thread per core and we want to get threads out of futex waits fast. 46 * one thread per core and we want to get threads out of futex waits fast.
48 */ 47 */
49static int no_idle_nap;
50static int __init idle_setup(char *str) 48static int __init idle_setup(char *str)
51{ 49{
52 if (!str) 50 if (!str)
@@ -54,64 +52,19 @@ static int __init idle_setup(char *str)
54 52
55 if (!strcmp(str, "poll")) { 53 if (!strcmp(str, "poll")) {
56 pr_info("using polling idle threads.\n"); 54 pr_info("using polling idle threads.\n");
57 no_idle_nap = 1; 55 cpu_idle_poll_ctrl(true);
58 } else if (!strcmp(str, "halt")) 56 return 0;
59 no_idle_nap = 0; 57 } else if (!strcmp(str, "halt")) {
60 else 58 return 0;
61 return -1; 59 }
62 60 return -1;
63 return 0;
64} 61}
65early_param("idle", idle_setup); 62early_param("idle", idle_setup);
66 63
67/* 64void arch_cpu_idle(void)
68 * The idle thread. There's no useful work to be
69 * done, so just try to conserve power and have a
70 * low exit latency (ie sit in a loop waiting for
71 * somebody to say that they'd like to reschedule)
72 */
73void cpu_idle(void)
74{ 65{
75 int cpu = smp_processor_id(); 66 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
76 67 _cpu_idle();
77
78 current_thread_info()->status |= TS_POLLING;
79
80 if (no_idle_nap) {
81 while (1) {
82 while (!need_resched())
83 cpu_relax();
84 schedule();
85 }
86 }
87
88 /* endless idle loop with no priority at all */
89 while (1) {
90 tick_nohz_idle_enter();
91 rcu_idle_enter();
92 while (!need_resched()) {
93 if (cpu_is_offline(cpu))
94 BUG(); /* no HOTPLUG_CPU */
95
96 local_irq_disable();
97 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
98 current_thread_info()->status &= ~TS_POLLING;
99 /*
100 * TS_POLLING-cleared state must be visible before we
101 * test NEED_RESCHED:
102 */
103 smp_mb();
104
105 if (!need_resched())
106 _cpu_idle();
107 else
108 local_irq_enable();
109 current_thread_info()->status |= TS_POLLING;
110 }
111 rcu_idle_exit();
112 tick_nohz_idle_exit();
113 schedule_preempt_disabled();
114 }
115} 68}
116 69
117/* 70/*
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index d1e15f7b59c6..7a5aa1a7864e 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot)
1004 1004
1005#ifdef CONFIG_BLK_DEV_INITRD 1005#ifdef CONFIG_BLK_DEV_INITRD
1006 1006
1007/*
1008 * Note that the kernel can potentially support other compression
1009 * techniques than gz, though we don't do so by default. If we ever
1010 * decide to do so we can either look for other filename extensions,
1011 * or just allow a file with this name to be compressed with an
1012 * arbitrary compressor (somewhat counterintuitively).
1013 */
1014static int __initdata set_initramfs_file; 1007static int __initdata set_initramfs_file;
1015static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; 1008static char __initdata initramfs_file[128] = "initramfs";
1016 1009
1017static int __init setup_initramfs_file(char *str) 1010static int __init setup_initramfs_file(char *str)
1018{ 1011{
@@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str)
1026early_param("initramfs_file", setup_initramfs_file); 1019early_param("initramfs_file", setup_initramfs_file);
1027 1020
1028/* 1021/*
1029 * We look for an "initramfs.cpio.gz" file in the hvfs. 1022 * We look for a file called "initramfs" in the hvfs. If there is one, we
1030 * If there is one, we allocate some memory for it and it will be 1023 * allocate some memory for it and it will be unpacked to the initramfs.
1031 * unpacked to the initramfs. 1024 * If it's compressed, the initd code will uncompress it first.
1032 */ 1025 */
1033static void __init load_hv_initrd(void) 1026static void __init load_hv_initrd(void)
1034{ 1027{
@@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void)
1038 1031
1039 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); 1032 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
1040 if (fd == HV_ENOENT) { 1033 if (fd == HV_ENOENT) {
1041 if (set_initramfs_file) 1034 if (set_initramfs_file) {
1042 pr_warning("No such hvfs initramfs file '%s'\n", 1035 pr_warning("No such hvfs initramfs file '%s'\n",
1043 initramfs_file); 1036 initramfs_file);
1044 return; 1037 return;
1038 } else {
1039 /* Try old backwards-compatible name. */
1040 fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
1041 if (fd == HV_ENOENT)
1042 return;
1043 }
1045 } 1044 }
1046 BUG_ON(fd < 0); 1045 BUG_ON(fd < 0);
1047 stat = hv_fs_fstat(fd); 1046 stat = hv_fs_fstat(fd);
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index e686c5ac90be..44bab29bf2f3 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -207,9 +207,7 @@ void __cpuinit online_secondary(void)
207 /* Set up tile-timer clock-event device on this cpu */ 207 /* Set up tile-timer clock-event device on this cpu */
208 setup_tile_timer(); 208 setup_tile_timer();
209 209
210 preempt_enable(); 210 cpu_startup_entry(CPUHP_ONLINE);
211
212 cpu_idle();
213} 211}
214 212
215int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 213int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index b3b4972c2451..dfd63ce87327 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
592 in parallel. Reuse of the virtual address is prevented by 592 in parallel. Reuse of the virtual address is prevented by
593 leaving it in the global lists until we're done with it. 593 leaving it in the global lists until we're done with it.
594 cpa takes care of the direct mappings. */ 594 cpa takes care of the direct mappings. */
595 read_lock(&vmlist_lock); 595 p = find_vm_area((void *)addr);
596 for (p = vmlist; p; p = p->next) {
597 if (p->addr == addr)
598 break;
599 }
600 read_unlock(&vmlist_lock);
601 596
602 if (!p) { 597 if (!p) {
603 pr_err("iounmap: bad address %p\n", addr); 598 pr_err("iounmap: bad address %p\n", addr);
diff --git a/arch/um/drivers/chan.h b/arch/um/drivers/chan.h
index 78f1b8999964..c512b0306dd4 100644
--- a/arch/um/drivers/chan.h
+++ b/arch/um/drivers/chan.h
@@ -37,7 +37,7 @@ extern int console_write_chan(struct chan *chan, const char *buf,
37extern int console_open_chan(struct line *line, struct console *co); 37extern int console_open_chan(struct line *line, struct console *co);
38extern void deactivate_chan(struct chan *chan, int irq); 38extern void deactivate_chan(struct chan *chan, int irq);
39extern void reactivate_chan(struct chan *chan, int irq); 39extern void reactivate_chan(struct chan *chan, int irq);
40extern void chan_enable_winch(struct chan *chan, struct tty_struct *tty); 40extern void chan_enable_winch(struct chan *chan, struct tty_port *port);
41extern int enable_chan(struct line *line); 41extern int enable_chan(struct line *line);
42extern void close_chan(struct line *line); 42extern void close_chan(struct line *line);
43extern int chan_window_size(struct line *line, 43extern int chan_window_size(struct line *line,
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 15c553c239a1..acbe6c67afba 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -122,10 +122,10 @@ static int open_chan(struct list_head *chans)
122 return err; 122 return err;
123} 123}
124 124
125void chan_enable_winch(struct chan *chan, struct tty_struct *tty) 125void chan_enable_winch(struct chan *chan, struct tty_port *port)
126{ 126{
127 if (chan && chan->primary && chan->ops->winch) 127 if (chan && chan->primary && chan->ops->winch)
128 register_winch(chan->fd, tty); 128 register_winch(chan->fd, port);
129} 129}
130 130
131static void line_timer_cb(struct work_struct *work) 131static void line_timer_cb(struct work_struct *work)
@@ -568,11 +568,7 @@ void chan_interrupt(struct line *line, int irq)
568 reactivate_fd(chan->fd, irq); 568 reactivate_fd(chan->fd, irq);
569 if (err == -EIO) { 569 if (err == -EIO) {
570 if (chan->primary) { 570 if (chan->primary) {
571 struct tty_struct *tty = tty_port_tty_get(&line->port); 571 tty_port_tty_hangup(&line->port, false);
572 if (tty != NULL) {
573 tty_hangup(tty);
574 tty_kref_put(tty);
575 }
576 if (line->chan_out != chan) 572 if (line->chan_out != chan)
577 close_one_chan(line->chan_out, 1); 573 close_one_chan(line->chan_out, 1);
578 } 574 }
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index 9be670ad23b5..3fd7c3efdb18 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -216,7 +216,7 @@ static int winch_thread(void *arg)
216 } 216 }
217} 217}
218 218
219static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out, 219static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
220 unsigned long *stack_out) 220 unsigned long *stack_out)
221{ 221{
222 struct winch_data data; 222 struct winch_data data;
@@ -271,7 +271,7 @@ static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out,
271 return err; 271 return err;
272} 272}
273 273
274void register_winch(int fd, struct tty_struct *tty) 274void register_winch(int fd, struct tty_port *port)
275{ 275{
276 unsigned long stack; 276 unsigned long stack;
277 int pid, thread, count, thread_fd = -1; 277 int pid, thread, count, thread_fd = -1;
@@ -281,17 +281,17 @@ void register_winch(int fd, struct tty_struct *tty)
281 return; 281 return;
282 282
283 pid = tcgetpgrp(fd); 283 pid = tcgetpgrp(fd);
284 if (is_skas_winch(pid, fd, tty)) { 284 if (is_skas_winch(pid, fd, port)) {
285 register_winch_irq(-1, fd, -1, tty, 0); 285 register_winch_irq(-1, fd, -1, port, 0);
286 return; 286 return;
287 } 287 }
288 288
289 if (pid == -1) { 289 if (pid == -1) {
290 thread = winch_tramp(fd, tty, &thread_fd, &stack); 290 thread = winch_tramp(fd, port, &thread_fd, &stack);
291 if (thread < 0) 291 if (thread < 0)
292 return; 292 return;
293 293
294 register_winch_irq(thread_fd, fd, thread, tty, stack); 294 register_winch_irq(thread_fd, fd, thread, port, stack);
295 295
296 count = write(thread_fd, &c, sizeof(c)); 296 count = write(thread_fd, &c, sizeof(c));
297 if (count != sizeof(c)) 297 if (count != sizeof(c))
diff --git a/arch/um/drivers/chan_user.h b/arch/um/drivers/chan_user.h
index dc693298eb8f..03f1b565c5f9 100644
--- a/arch/um/drivers/chan_user.h
+++ b/arch/um/drivers/chan_user.h
@@ -38,10 +38,10 @@ extern int generic_window_size(int fd, void *unused, unsigned short *rows_out,
38 unsigned short *cols_out); 38 unsigned short *cols_out);
39extern void generic_free(void *data); 39extern void generic_free(void *data);
40 40
41struct tty_struct; 41struct tty_port;
42extern void register_winch(int fd, struct tty_struct *tty); 42extern void register_winch(int fd, struct tty_port *port);
43extern void register_winch_irq(int fd, int tty_fd, int pid, 43extern void register_winch_irq(int fd, int tty_fd, int pid,
44 struct tty_struct *tty, unsigned long stack); 44 struct tty_port *port, unsigned long stack);
45 45
46#define __channel_help(fn, prefix) \ 46#define __channel_help(fn, prefix) \
47__uml_help(fn, prefix "[0-9]*=<channel description>\n" \ 47__uml_help(fn, prefix "[0-9]*=<channel description>\n" \
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index f1b38571f94e..8035145f043b 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -248,7 +248,6 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
248{ 248{
249 struct chan *chan = data; 249 struct chan *chan = data;
250 struct line *line = chan->line; 250 struct line *line = chan->line;
251 struct tty_struct *tty;
252 int err; 251 int err;
253 252
254 /* 253 /*
@@ -267,12 +266,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
267 } 266 }
268 spin_unlock(&line->lock); 267 spin_unlock(&line->lock);
269 268
270 tty = tty_port_tty_get(&line->port); 269 tty_port_tty_wakeup(&line->port);
271 if (tty == NULL)
272 return IRQ_NONE;
273
274 tty_wakeup(tty);
275 tty_kref_put(tty);
276 270
277 return IRQ_HANDLED; 271 return IRQ_HANDLED;
278} 272}
@@ -305,7 +299,7 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
305 return ret; 299 return ret;
306 300
307 if (!line->sigio) { 301 if (!line->sigio) {
308 chan_enable_winch(line->chan_out, tty); 302 chan_enable_winch(line->chan_out, port);
309 line->sigio = 1; 303 line->sigio = 1;
310 } 304 }
311 305
@@ -315,8 +309,22 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
315 return 0; 309 return 0;
316} 310}
317 311
312static void unregister_winch(struct tty_struct *tty);
313
314static void line_destruct(struct tty_port *port)
315{
316 struct tty_struct *tty = tty_port_tty_get(port);
317 struct line *line = tty->driver_data;
318
319 if (line->sigio) {
320 unregister_winch(tty);
321 line->sigio = 0;
322 }
323}
324
318static const struct tty_port_operations line_port_ops = { 325static const struct tty_port_operations line_port_ops = {
319 .activate = line_activate, 326 .activate = line_activate,
327 .destruct = line_destruct,
320}; 328};
321 329
322int line_open(struct tty_struct *tty, struct file *filp) 330int line_open(struct tty_struct *tty, struct file *filp)
@@ -340,18 +348,6 @@ int line_install(struct tty_driver *driver, struct tty_struct *tty,
340 return 0; 348 return 0;
341} 349}
342 350
343static void unregister_winch(struct tty_struct *tty);
344
345void line_cleanup(struct tty_struct *tty)
346{
347 struct line *line = tty->driver_data;
348
349 if (line->sigio) {
350 unregister_winch(tty);
351 line->sigio = 0;
352 }
353}
354
355void line_close(struct tty_struct *tty, struct file * filp) 351void line_close(struct tty_struct *tty, struct file * filp)
356{ 352{
357 struct line *line = tty->driver_data; 353 struct line *line = tty->driver_data;
@@ -601,7 +597,7 @@ struct winch {
601 int fd; 597 int fd;
602 int tty_fd; 598 int tty_fd;
603 int pid; 599 int pid;
604 struct tty_struct *tty; 600 struct tty_port *port;
605 unsigned long stack; 601 unsigned long stack;
606 struct work_struct work; 602 struct work_struct work;
607}; 603};
@@ -655,7 +651,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
655 goto out; 651 goto out;
656 } 652 }
657 } 653 }
658 tty = winch->tty; 654 tty = tty_port_tty_get(winch->port);
659 if (tty != NULL) { 655 if (tty != NULL) {
660 line = tty->driver_data; 656 line = tty->driver_data;
661 if (line != NULL) { 657 if (line != NULL) {
@@ -663,6 +659,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
663 &tty->winsize.ws_col); 659 &tty->winsize.ws_col);
664 kill_pgrp(tty->pgrp, SIGWINCH, 1); 660 kill_pgrp(tty->pgrp, SIGWINCH, 1);
665 } 661 }
662 tty_kref_put(tty);
666 } 663 }
667 out: 664 out:
668 if (winch->fd != -1) 665 if (winch->fd != -1)
@@ -670,7 +667,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
670 return IRQ_HANDLED; 667 return IRQ_HANDLED;
671} 668}
672 669
673void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, 670void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
674 unsigned long stack) 671 unsigned long stack)
675{ 672{
676 struct winch *winch; 673 struct winch *winch;
@@ -685,7 +682,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
685 .fd = fd, 682 .fd = fd,
686 .tty_fd = tty_fd, 683 .tty_fd = tty_fd,
687 .pid = pid, 684 .pid = pid,
688 .tty = tty, 685 .port = port,
689 .stack = stack }); 686 .stack = stack });
690 687
691 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, 688 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
@@ -714,15 +711,18 @@ static void unregister_winch(struct tty_struct *tty)
714{ 711{
715 struct list_head *ele, *next; 712 struct list_head *ele, *next;
716 struct winch *winch; 713 struct winch *winch;
714 struct tty_struct *wtty;
717 715
718 spin_lock(&winch_handler_lock); 716 spin_lock(&winch_handler_lock);
719 717
720 list_for_each_safe(ele, next, &winch_handlers) { 718 list_for_each_safe(ele, next, &winch_handlers) {
721 winch = list_entry(ele, struct winch, list); 719 winch = list_entry(ele, struct winch, list);
722 if (winch->tty == tty) { 720 wtty = tty_port_tty_get(winch->port);
721 if (wtty == tty) {
723 free_winch(winch); 722 free_winch(winch);
724 break; 723 break;
725 } 724 }
725 tty_kref_put(wtty);
726 } 726 }
727 spin_unlock(&winch_handler_lock); 727 spin_unlock(&winch_handler_lock);
728} 728}
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index d8926c303629..39f186252e02 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -218,6 +218,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
218 spin_lock_irqsave(&lp->lock, flags); 218 spin_lock_irqsave(&lp->lock, flags);
219 219
220 len = (*lp->write)(lp->fd, skb, lp); 220 len = (*lp->write)(lp->fd, skb, lp);
221 skb_tx_timestamp(skb);
221 222
222 if (len == skb->len) { 223 if (len == skb->len) {
223 dev->stats.tx_packets++; 224 dev->stats.tx_packets++;
@@ -281,6 +282,7 @@ static void uml_net_get_drvinfo(struct net_device *dev,
281static const struct ethtool_ops uml_net_ethtool_ops = { 282static const struct ethtool_ops uml_net_ethtool_ops = {
282 .get_drvinfo = uml_net_get_drvinfo, 283 .get_drvinfo = uml_net_get_drvinfo,
283 .get_link = ethtool_op_get_link, 284 .get_link = ethtool_op_get_link,
285 .get_ts_info = ethtool_op_get_ts_info,
284}; 286};
285 287
286static void uml_net_user_timer_expire(unsigned long _conn) 288static void uml_net_user_timer_expire(unsigned long _conn)
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 16fdd0a0f9d6..b8d14fa52059 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -105,7 +105,6 @@ static const struct tty_operations ssl_ops = {
105 .throttle = line_throttle, 105 .throttle = line_throttle,
106 .unthrottle = line_unthrottle, 106 .unthrottle = line_unthrottle,
107 .install = ssl_install, 107 .install = ssl_install,
108 .cleanup = line_cleanup,
109 .hangup = line_hangup, 108 .hangup = line_hangup,
110}; 109};
111 110
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 827777af3f6d..7b361f36ca96 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -110,7 +110,6 @@ static const struct tty_operations console_ops = {
110 .set_termios = line_set_termios, 110 .set_termios = line_set_termios,
111 .throttle = line_throttle, 111 .throttle = line_throttle,
112 .unthrottle = line_unthrottle, 112 .unthrottle = line_unthrottle,
113 .cleanup = line_cleanup,
114 .hangup = line_hangup, 113 .hangup = line_hangup,
115}; 114};
116 115
diff --git a/arch/um/kernel/early_printk.c b/arch/um/kernel/early_printk.c
index 49480f092456..4a0800bc37b2 100644
--- a/arch/um/kernel/early_printk.c
+++ b/arch/um/kernel/early_printk.c
@@ -16,7 +16,7 @@ static void early_console_write(struct console *con, const char *s, unsigned int
16 um_early_printk(s, n); 16 um_early_printk(s, n);
17} 17}
18 18
19static struct console early_console = { 19static struct console early_console_dev = {
20 .name = "earlycon", 20 .name = "earlycon",
21 .write = early_console_write, 21 .write = early_console_write,
22 .flags = CON_BOOT, 22 .flags = CON_BOOT,
@@ -25,8 +25,10 @@ static struct console early_console = {
25 25
26static int __init setup_early_printk(char *buf) 26static int __init setup_early_printk(char *buf)
27{ 27{
28 register_console(&early_console); 28 if (!early_console) {
29 29 early_console = &early_console_dev;
30 register_console(&early_console_dev);
31 }
30 return 0; 32 return 0;
31} 33}
32 34
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 5abcbfbe7e25..9df292b270a8 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -42,17 +42,12 @@ static unsigned long brk_end;
42static void setup_highmem(unsigned long highmem_start, 42static void setup_highmem(unsigned long highmem_start,
43 unsigned long highmem_len) 43 unsigned long highmem_len)
44{ 44{
45 struct page *page;
46 unsigned long highmem_pfn; 45 unsigned long highmem_pfn;
47 int i; 46 int i;
48 47
49 highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT; 48 highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
50 for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) { 49 for (i = 0; i < highmem_len >> PAGE_SHIFT; i++)
51 page = &mem_map[highmem_pfn + i]; 50 free_highmem_page(&mem_map[highmem_pfn + i]);
52 ClearPageReserved(page);
53 init_page_count(page);
54 __free_page(page);
55 }
56} 51}
57#endif 52#endif
58 53
@@ -73,18 +68,13 @@ void __init mem_init(void)
73 totalram_pages = free_all_bootmem(); 68 totalram_pages = free_all_bootmem();
74 max_low_pfn = totalram_pages; 69 max_low_pfn = totalram_pages;
75#ifdef CONFIG_HIGHMEM 70#ifdef CONFIG_HIGHMEM
76 totalhigh_pages = highmem >> PAGE_SHIFT; 71 setup_highmem(end_iomem, highmem);
77 totalram_pages += totalhigh_pages;
78#endif 72#endif
79 num_physpages = totalram_pages; 73 num_physpages = totalram_pages;
80 max_pfn = totalram_pages; 74 max_pfn = totalram_pages;
81 printk(KERN_INFO "Memory: %luk available\n", 75 printk(KERN_INFO "Memory: %luk available\n",
82 nr_free_pages() << (PAGE_SHIFT-10)); 76 nr_free_pages() << (PAGE_SHIFT-10));
83 kmalloc_ok = 1; 77 kmalloc_ok = 1;
84
85#ifdef CONFIG_HIGHMEM
86 setup_highmem(end_iomem, highmem);
87#endif
88} 78}
89 79
90/* 80/*
@@ -254,15 +244,7 @@ void free_initmem(void)
254#ifdef CONFIG_BLK_DEV_INITRD 244#ifdef CONFIG_BLK_DEV_INITRD
255void free_initrd_mem(unsigned long start, unsigned long end) 245void free_initrd_mem(unsigned long start, unsigned long end)
256{ 246{
257 if (start < end) 247 free_reserved_area(start, end, 0, "initrd");
258 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
259 (end - start) >> 10);
260 for (; start < end; start += PAGE_SIZE) {
261 ClearPageReserved(virt_to_page(start));
262 init_page_count(virt_to_page(start));
263 free_page(start);
264 totalram_pages++;
265 }
266} 248}
267#endif 249#endif
268 250
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index b462b13c5bae..bbcef522bcb1 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -210,33 +210,14 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
210 kmalloc_ok = save_kmalloc_ok; 210 kmalloc_ok = save_kmalloc_ok;
211} 211}
212 212
213void default_idle(void) 213void arch_cpu_idle(void)
214{ 214{
215 unsigned long long nsecs; 215 unsigned long long nsecs;
216 216
217 while (1) {
218 /* endless idle loop with no priority at all */
219
220 /*
221 * although we are an idle CPU, we do not want to
222 * get into the scheduler unnecessarily.
223 */
224 if (need_resched())
225 schedule();
226
227 tick_nohz_idle_enter();
228 rcu_idle_enter();
229 nsecs = disable_timer();
230 idle_sleep(nsecs);
231 rcu_idle_exit();
232 tick_nohz_idle_exit();
233 }
234}
235
236void cpu_idle(void)
237{
238 cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); 217 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
239 default_idle(); 218 nsecs = disable_timer();
219 idle_sleep(nsecs);
220 local_irq_enable();
240} 221}
241 222
242int __cant_sleep(void) { 223int __cant_sleep(void) {
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index b1469fe93295..9d9f1b4bf826 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -15,7 +15,7 @@
15#include <sysdep/mcontext.h> 15#include <sysdep/mcontext.h>
16#include "internal.h" 16#include "internal.h"
17 17
18void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = { 18void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
19 [SIGTRAP] = relay_signal, 19 [SIGTRAP] = relay_signal,
20 [SIGFPE] = relay_signal, 20 [SIGFPE] = relay_signal,
21 [SIGILL] = relay_signal, 21 [SIGILL] = relay_signal,
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index da4b9e9999fd..337518c5042a 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -15,6 +15,8 @@
15#include <sys/mman.h> 15#include <sys/mman.h>
16#include <sys/stat.h> 16#include <sys/stat.h>
17#include <sys/wait.h> 17#include <sys/wait.h>
18#include <sys/time.h>
19#include <sys/resource.h>
18#include <asm/unistd.h> 20#include <asm/unistd.h>
19#include <init.h> 21#include <init.h>
20#include <os.h> 22#include <os.h>
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index dc50b157fc83..2943e3acdf0c 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -9,7 +9,7 @@ config UNICORE32
9 select GENERIC_ATOMIC64 9 select GENERIC_ATOMIC64
10 select HAVE_KERNEL_LZO 10 select HAVE_KERNEL_LZO
11 select HAVE_KERNEL_LZMA 11 select HAVE_KERNEL_LZMA
12 select HAVE_VIRT_TO_BUS 12 select VIRT_TO_BUS
13 select ARCH_HAVE_CUSTOM_GPIO_H 13 select ARCH_HAVE_CUSTOM_GPIO_H
14 select GENERIC_FIND_FIRST_BIT 14 select GENERIC_FIND_FIRST_BIT
15 select GENERIC_IRQ_PROBE 15 select GENERIC_IRQ_PROBE
diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c
index 3922255f1fa8..9be0d5d02a9a 100644
--- a/arch/unicore32/kernel/early_printk.c
+++ b/arch/unicore32/kernel/early_printk.c
@@ -33,21 +33,17 @@ static struct console early_ocd_console = {
33 .index = -1, 33 .index = -1,
34}; 34};
35 35
36/* Direct interface for emergencies */
37static struct console *early_console = &early_ocd_console;
38
39static int __initdata keep_early;
40
41static int __init setup_early_printk(char *buf) 36static int __init setup_early_printk(char *buf)
42{ 37{
43 if (!buf) 38 int keep_early;
39
40 if (!buf || early_console)
44 return 0; 41 return 0;
45 42
46 if (strstr(buf, "keep")) 43 if (strstr(buf, "keep"))
47 keep_early = 1; 44 keep_early = 1;
48 45
49 if (!strncmp(buf, "ocd", 3)) 46 early_console = &early_ocd_console;
50 early_console = &early_ocd_console;
51 47
52 if (keep_early) 48 if (keep_early)
53 early_console->flags &= ~CON_BOOT; 49 early_console->flags &= ~CON_BOOT;
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 872d7e22d847..7fab86d7c5d4 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -45,25 +45,10 @@ static const char * const processor_modes[] = {
45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" 45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR"
46}; 46};
47 47
48void cpu_idle(void) 48void arch_cpu_idle(void)
49{ 49{
50 /* endless idle loop with no priority at all */ 50 cpu_do_idle();
51 while (1) { 51 local_irq_enable();
52 tick_nohz_idle_enter();
53 rcu_idle_enter();
54 while (!need_resched()) {
55 local_irq_disable();
56 stop_critical_timings();
57 cpu_do_idle();
58 local_irq_enable();
59 start_critical_timings();
60 }
61 rcu_idle_exit();
62 tick_nohz_idle_exit();
63 preempt_enable_no_resched();
64 schedule();
65 preempt_disable();
66 }
67} 52}
68 53
69static char reboot_mode = 'h'; 54static char reboot_mode = 'h';
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index de186bde8975..63df12d71ce3 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -66,6 +66,9 @@ void show_mem(unsigned int filter)
66 printk(KERN_DEFAULT "Mem-info:\n"); 66 printk(KERN_DEFAULT "Mem-info:\n");
67 show_free_areas(filter); 67 show_free_areas(filter);
68 68
69 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
70 return;
71
69 for_each_bank(i, mi) { 72 for_each_bank(i, mi) {
70 struct membank *bank = &mi->bank[i]; 73 struct membank *bank = &mi->bank[i];
71 unsigned int pfn1, pfn2; 74 unsigned int pfn1, pfn2;
@@ -313,24 +316,6 @@ void __init bootmem_init(void)
313 max_pfn = max_high - PHYS_PFN_OFFSET; 316 max_pfn = max_high - PHYS_PFN_OFFSET;
314} 317}
315 318
316static inline int free_area(unsigned long pfn, unsigned long end, char *s)
317{
318 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
319
320 for (; pfn < end; pfn++) {
321 struct page *page = pfn_to_page(pfn);
322 ClearPageReserved(page);
323 init_page_count(page);
324 __free_page(page);
325 pages++;
326 }
327
328 if (size && s)
329 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
330
331 return pages;
332}
333
334static inline void 319static inline void
335free_memmap(unsigned long start_pfn, unsigned long end_pfn) 320free_memmap(unsigned long start_pfn, unsigned long end_pfn)
336{ 321{
@@ -404,9 +389,9 @@ void __init mem_init(void)
404 389
405 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 390 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
406 391
407 /* this will put all unused low memory onto the freelists */
408 free_unused_memmap(&meminfo); 392 free_unused_memmap(&meminfo);
409 393
394 /* this will put all unused low memory onto the freelists */
410 totalram_pages += free_all_bootmem(); 395 totalram_pages += free_all_bootmem();
411 396
412 reserved_pages = free_pages = 0; 397 reserved_pages = free_pages = 0;
@@ -491,9 +476,7 @@ void __init mem_init(void)
491 476
492void free_initmem(void) 477void free_initmem(void)
493{ 478{
494 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 479 free_initmem_default(0);
495 __phys_to_pfn(__pa(__init_end)),
496 "init");
497} 480}
498 481
499#ifdef CONFIG_BLK_DEV_INITRD 482#ifdef CONFIG_BLK_DEV_INITRD
@@ -503,9 +486,7 @@ static int keep_initrd;
503void free_initrd_mem(unsigned long start, unsigned long end) 486void free_initrd_mem(unsigned long start, unsigned long end)
504{ 487{
505 if (!keep_initrd) 488 if (!keep_initrd)
506 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 489 free_reserved_area(start, end, 0, "initrd");
507 __phys_to_pfn(__pa(end)),
508 "initrd");
509} 490}
510 491
511static int __init keepinitrd_setup(char *__unused) 492static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
index b7a605597b08..13068ee22f33 100644
--- a/arch/unicore32/mm/ioremap.c
+++ b/arch/unicore32/mm/ioremap.c
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
235void __uc32_iounmap(volatile void __iomem *io_addr) 235void __uc32_iounmap(volatile void __iomem *io_addr)
236{ 236{
237 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 237 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
238 struct vm_struct **p, *tmp; 238 struct vm_struct *vm;
239 239
240 /* 240 /*
241 * If this is a section based mapping we need to handle it 241 * If this is a section based mapping we need to handle it
@@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
244 * all the mappings before the area can be reclaimed 244 * all the mappings before the area can be reclaimed
245 * by someone else. 245 * by someone else.
246 */ 246 */
247 write_lock(&vmlist_lock); 247 vm = find_vm_area(addr);
248 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 248 if (vm && (vm->flags & VM_IOREMAP) &&
249 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { 249 (vm->flags & VM_UNICORE_SECTION_MAPPING))
250 if (tmp->flags & VM_UNICORE_SECTION_MAPPING) { 250 unmap_area_sections((unsigned long)vm->addr, vm->size);
251 unmap_area_sections((unsigned long)tmp->addr,
252 tmp->size);
253 }
254 break;
255 }
256 }
257 write_unlock(&vmlist_lock);
258 251
259 vunmap(addr); 252 vunmap(addr);
260} 253}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a4f24f5b1218..05b057dca4a7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -112,7 +112,7 @@ config X86
112 select GENERIC_STRNLEN_USER 112 select GENERIC_STRNLEN_USER
113 select HAVE_CONTEXT_TRACKING if X86_64 113 select HAVE_CONTEXT_TRACKING if X86_64
114 select HAVE_IRQ_TIME_ACCOUNTING 114 select HAVE_IRQ_TIME_ACCOUNTING
115 select HAVE_VIRT_TO_BUS 115 select VIRT_TO_BUS
116 select MODULES_USE_ELF_REL if X86_32 116 select MODULES_USE_ELF_REL if X86_32
117 select MODULES_USE_ELF_RELA if X86_64 117 select MODULES_USE_ELF_RELA if X86_64
118 select CLONE_BACKWARDS if X86_32 118 select CLONE_BACKWARDS if X86_32
@@ -120,6 +120,7 @@ config X86
120 select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION 120 select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
121 select OLD_SIGACTION if X86_32 121 select OLD_SIGACTION if X86_32
122 select COMPAT_OLD_SIGACTION if IA32_EMULATION 122 select COMPAT_OLD_SIGACTION if IA32_EMULATION
123 select RTC_LIB
123 124
124config INSTRUCTION_DECODER 125config INSTRUCTION_DECODER
125 def_bool y 126 def_bool y
@@ -188,9 +189,6 @@ config GENERIC_CALIBRATE_DELAY
188config ARCH_HAS_CPU_RELAX 189config ARCH_HAS_CPU_RELAX
189 def_bool y 190 def_bool y
190 191
191config ARCH_HAS_DEFAULT_IDLE
192 def_bool y
193
194config ARCH_HAS_CACHE_LINE_SIZE 192config ARCH_HAS_CACHE_LINE_SIZE
195 def_bool y 193 def_bool y
196 194
@@ -389,7 +387,7 @@ config X86_NUMACHIP
389 387
390config X86_VSMP 388config X86_VSMP
391 bool "ScaleMP vSMP" 389 bool "ScaleMP vSMP"
392 select PARAVIRT_GUEST 390 select HYPERVISOR_GUEST
393 select PARAVIRT 391 select PARAVIRT
394 depends on X86_64 && PCI 392 depends on X86_64 && PCI
395 depends on X86_EXTENDED_PLATFORM 393 depends on X86_EXTENDED_PLATFORM
@@ -596,44 +594,17 @@ config SCHED_OMIT_FRAME_POINTER
596 594
597 If in doubt, say "Y". 595 If in doubt, say "Y".
598 596
599menuconfig PARAVIRT_GUEST 597menuconfig HYPERVISOR_GUEST
600 bool "Paravirtualized guest support" 598 bool "Linux guest support"
601 ---help---
602 Say Y here to get to see options related to running Linux under
603 various hypervisors. This option alone does not add any kernel code.
604
605 If you say N, all options in this submenu will be skipped and disabled.
606
607if PARAVIRT_GUEST
608
609config PARAVIRT_TIME_ACCOUNTING
610 bool "Paravirtual steal time accounting"
611 select PARAVIRT
612 default n
613 ---help--- 599 ---help---
614 Select this option to enable fine granularity task steal time 600 Say Y here to enable options for running Linux under various hyper-
615 accounting. Time spent executing other tasks in parallel with 601 visors. This option enables basic hypervisor detection and platform
616 the current vCPU is discounted from the vCPU power. To account for 602 setup.
617 that, there can be a small performance impact.
618
619 If in doubt, say N here.
620
621source "arch/x86/xen/Kconfig"
622 603
623config KVM_GUEST 604 If you say N, all options in this submenu will be skipped and
624 bool "KVM Guest support (including kvmclock)" 605 disabled, and Linux guest support won't be built in.
625 select PARAVIRT
626 select PARAVIRT
627 select PARAVIRT_CLOCK
628 default y if PARAVIRT_GUEST
629 ---help---
630 This option enables various optimizations for running under the KVM
631 hypervisor. It includes a paravirtualized clock, so that instead
632 of relying on a PIT (or probably other) emulation by the
633 underlying device model, the host provides the guest with
634 timing infrastructure such as time of day, and system time
635 606
636source "arch/x86/lguest/Kconfig" 607if HYPERVISOR_GUEST
637 608
638config PARAVIRT 609config PARAVIRT
639 bool "Enable paravirtualization code" 610 bool "Enable paravirtualization code"
@@ -643,6 +614,13 @@ config PARAVIRT
643 over full virtualization. However, when run without a hypervisor 614 over full virtualization. However, when run without a hypervisor
644 the kernel is theoretically slower and slightly larger. 615 the kernel is theoretically slower and slightly larger.
645 616
617config PARAVIRT_DEBUG
618 bool "paravirt-ops debugging"
619 depends on PARAVIRT && DEBUG_KERNEL
620 ---help---
621 Enable to debug paravirt_ops internals. Specifically, BUG if
622 a paravirt_op is missing when it is called.
623
646config PARAVIRT_SPINLOCKS 624config PARAVIRT_SPINLOCKS
647 bool "Paravirtualization layer for spinlocks" 625 bool "Paravirtualization layer for spinlocks"
648 depends on PARAVIRT && SMP 626 depends on PARAVIRT && SMP
@@ -656,17 +634,38 @@ config PARAVIRT_SPINLOCKS
656 634
657 If you are unsure how to answer this question, answer N. 635 If you are unsure how to answer this question, answer N.
658 636
659config PARAVIRT_CLOCK 637source "arch/x86/xen/Kconfig"
660 bool
661 638
662endif 639config KVM_GUEST
640 bool "KVM Guest support (including kvmclock)"
641 depends on PARAVIRT
642 select PARAVIRT_CLOCK
643 default y
644 ---help---
645 This option enables various optimizations for running under the KVM
646 hypervisor. It includes a paravirtualized clock, so that instead
647 of relying on a PIT (or probably other) emulation by the
648 underlying device model, the host provides the guest with
649 timing infrastructure such as time of day, and system time
663 650
664config PARAVIRT_DEBUG 651source "arch/x86/lguest/Kconfig"
665 bool "paravirt-ops debugging" 652
666 depends on PARAVIRT && DEBUG_KERNEL 653config PARAVIRT_TIME_ACCOUNTING
654 bool "Paravirtual steal time accounting"
655 depends on PARAVIRT
656 default n
667 ---help--- 657 ---help---
668 Enable to debug paravirt_ops internals. Specifically, BUG if 658 Select this option to enable fine granularity task steal time
669 a paravirt_op is missing when it is called. 659 accounting. Time spent executing other tasks in parallel with
660 the current vCPU is discounted from the vCPU power. To account for
661 that, there can be a small performance impact.
662
663 If in doubt, say N here.
664
665config PARAVIRT_CLOCK
666 bool
667
668endif #HYPERVISOR_GUEST
670 669
671config NO_BOOTMEM 670config NO_BOOTMEM
672 def_bool y 671 def_bool y
@@ -1549,6 +1548,7 @@ config X86_SMAP
1549config EFI 1548config EFI
1550 bool "EFI runtime service support" 1549 bool "EFI runtime service support"
1551 depends on ACPI 1550 depends on ACPI
1551 select UCS2_STRING
1552 ---help--- 1552 ---help---
1553 This enables the kernel to use EFI runtime services that are 1553 This enables the kernel to use EFI runtime services that are
1554 available (such as the EFI variable services). 1554 available (such as the EFI variable services).
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index b322f124ee3c..16f738385dcb 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -131,7 +131,7 @@ config DOUBLEFAULT
131 131
132config DEBUG_TLBFLUSH 132config DEBUG_TLBFLUSH
133 bool "Set upper limit of TLB entries to flush one-by-one" 133 bool "Set upper limit of TLB entries to flush one-by-one"
134 depends on DEBUG_KERNEL && (X86_64 || X86_INVLPG) 134 depends on DEBUG_KERNEL
135 ---help--- 135 ---help---
136 136
137 X86-only for now. 137 X86-only for now.
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 8a84501acb1b..5ef205c5f37b 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6 6
7targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o 7targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo
8 8
9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC 10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
29 $(obj)/piggy.o 29 $(obj)/piggy.o
30 30
31$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone 31$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
32$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone
33 32
34ifeq ($(CONFIG_EFI_STUB), y) 33ifeq ($(CONFIG_EFI_STUB), y)
35 VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o 34 VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
@@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
43$(obj)/vmlinux.bin: vmlinux FORCE 42$(obj)/vmlinux.bin: vmlinux FORCE
44 $(call if_changed,objcopy) 43 $(call if_changed,objcopy)
45 44
46targets += vmlinux.bin.all vmlinux.relocs 45targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs
47 46
48CMD_RELOCS = arch/x86/tools/relocs 47CMD_RELOCS = arch/x86/tools/relocs
49quiet_cmd_relocs = RELOCS $@ 48quiet_cmd_relocs = RELOCS $@
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index c205035a6b96..35ee62fccf98 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
251 *size = len; 251 *size = len;
252} 252}
253 253
254static efi_status_t setup_efi_vars(struct boot_params *params)
255{
256 struct setup_data *data;
257 struct efi_var_bootdata *efidata;
258 u64 store_size, remaining_size, var_size;
259 efi_status_t status;
260
261 if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION)
262 return EFI_UNSUPPORTED;
263
264 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
265
266 while (data && data->next)
267 data = (struct setup_data *)(unsigned long)data->next;
268
269 status = efi_call_phys4((void *)sys_table->runtime->query_variable_info,
270 EFI_VARIABLE_NON_VOLATILE |
271 EFI_VARIABLE_BOOTSERVICE_ACCESS |
272 EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
273 &remaining_size, &var_size);
274
275 if (status != EFI_SUCCESS)
276 return status;
277
278 status = efi_call_phys3(sys_table->boottime->allocate_pool,
279 EFI_LOADER_DATA, sizeof(*efidata), &efidata);
280
281 if (status != EFI_SUCCESS)
282 return status;
283
284 efidata->data.type = SETUP_EFI_VARS;
285 efidata->data.len = sizeof(struct efi_var_bootdata) -
286 sizeof(struct setup_data);
287 efidata->data.next = 0;
288 efidata->store_size = store_size;
289 efidata->remaining_size = remaining_size;
290 efidata->max_var_size = var_size;
291
292 if (data)
293 data->next = (unsigned long)efidata;
294 else
295 params->hdr.setup_data = (unsigned long)efidata;
296
297}
298
254static efi_status_t setup_efi_pci(struct boot_params *params) 299static efi_status_t setup_efi_pci(struct boot_params *params)
255{ 300{
256 efi_pci_io_protocol *pci; 301 efi_pci_io_protocol *pci;
@@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
1157 1202
1158 setup_graphics(boot_params); 1203 setup_graphics(boot_params);
1159 1204
1205 setup_efi_vars(boot_params);
1206
1160 setup_efi_pci(boot_params); 1207 setup_efi_pci(boot_params);
1161 1208
1162 status = efi_call_phys3(sys_table->boottime->allocate_pool, 1209 status = efi_call_phys3(sys_table->boottime->allocate_pool,
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index c1d383d1fb7e..16f24e6dad79 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -52,7 +52,7 @@ ENTRY(startup_32)
52 jnz 1f 52 jnz 1f
53 53
54 cli 54 cli
55 movl $(__KERNEL_DS), %eax 55 movl $(__BOOT_DS), %eax
56 movl %eax, %ds 56 movl %eax, %ds
57 movl %eax, %es 57 movl %eax, %es
58 movl %eax, %ss 58 movl %eax, %ss
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 8d871eaddb66..d47786acb016 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -35,7 +35,7 @@ extern void __add_wrong_size(void)
35 35
36/* 36/*
37 * An exchange-type operation, which takes a value and a pointer, and 37 * An exchange-type operation, which takes a value and a pointer, and
38 * returns a the old value. 38 * returns the old value.
39 */ 39 */
40#define __xchg_op(ptr, arg, op, lock) \ 40#define __xchg_op(ptr, arg, op, lock) \
41 ({ \ 41 ({ \
diff --git a/arch/x86/include/asm/context_tracking.h b/arch/x86/include/asm/context_tracking.h
index 1616562683e9..1fe49704b146 100644
--- a/arch/x86/include/asm/context_tracking.h
+++ b/arch/x86/include/asm/context_tracking.h
@@ -1,31 +1,10 @@
1#ifndef _ASM_X86_CONTEXT_TRACKING_H 1#ifndef _ASM_X86_CONTEXT_TRACKING_H
2#define _ASM_X86_CONTEXT_TRACKING_H 2#define _ASM_X86_CONTEXT_TRACKING_H
3 3
4#ifndef __ASSEMBLY__
5#include <linux/context_tracking.h>
6#include <asm/ptrace.h>
7
8static inline void exception_enter(struct pt_regs *regs)
9{
10 user_exit();
11}
12
13static inline void exception_exit(struct pt_regs *regs)
14{
15#ifdef CONFIG_CONTEXT_TRACKING
16 if (user_mode(regs))
17 user_enter();
18#endif
19}
20
21#else /* __ASSEMBLY__ */
22
23#ifdef CONFIG_CONTEXT_TRACKING 4#ifdef CONFIG_CONTEXT_TRACKING
24# define SCHEDULE_USER call schedule_user 5# define SCHEDULE_USER call schedule_user
25#else 6#else
26# define SCHEDULE_USER call schedule 7# define SCHEDULE_USER call schedule
27#endif 8#endif
28 9
29#endif /* !__ASSEMBLY__ */
30
31#endif 10#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 93fe929d1cee..398f7cb1353d 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -9,6 +9,7 @@
9#endif 9#endif
10 10
11#define NCAPINTS 10 /* N 32-bit words worth of info */ 11#define NCAPINTS 10 /* N 32-bit words worth of info */
12#define NBUGINTS 1 /* N 32-bit bug flags */
12 13
13/* 14/*
14 * Note: If the comment begins with a quoted string, that string is used 15 * Note: If the comment begins with a quoted string, that string is used
@@ -100,6 +101,7 @@
100#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ 101#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
101#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ 102#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
102#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */ 103#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
104#define X86_FEATURE_NONSTOP_TSC_S3 (3*32+30) /* TSC doesn't stop in S3 state */
103 105
104/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 106/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
105#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 107#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
@@ -168,6 +170,7 @@
168#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ 170#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
169#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ 171#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
170#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */ 172#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */
173#define X86_FEATURE_PERFCTR_L2 (6*32+28) /* L2 performance counter extensions */
171 174
172/* 175/*
173 * Auxiliary flags: Linux defined - For features scattered in various 176 * Auxiliary flags: Linux defined - For features scattered in various
@@ -216,6 +219,17 @@
216#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ 219#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
217#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ 220#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
218 221
222/*
223 * BUG word(s)
224 */
225#define X86_BUG(x) (NCAPINTS*32 + (x))
226
227#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
228#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
229#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
230#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* AMD Erratum 383 */
231#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* AMD Erratum 400 */
232
219#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 233#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
220 234
221#include <asm/asm.h> 235#include <asm/asm.h>
@@ -311,6 +325,7 @@ extern const char * const x86_power_flags[32];
311#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 325#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
312#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 326#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
313#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) 327#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
328#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
314#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 329#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
315#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 330#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
316#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 331#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
@@ -401,6 +416,13 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
401#define static_cpu_has(bit) boot_cpu_has(bit) 416#define static_cpu_has(bit) boot_cpu_has(bit)
402#endif 417#endif
403 418
419#define cpu_has_bug(c, bit) cpu_has(c, (bit))
420#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
421#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit));
422
423#define static_cpu_has_bug(bit) static_cpu_has((bit))
424#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
425
404#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 426#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
405 427
406#endif /* _ASM_X86_CPUFEATURE_H */ 428#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 60c89f30c727..2fb5d5884e23 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void);
102extern void efi_unmap_memmap(void); 102extern void efi_unmap_memmap(void);
103extern void efi_memory_uc(u64 addr, unsigned long size); 103extern void efi_memory_uc(u64 addr, unsigned long size);
104 104
105struct efi_var_bootdata {
106 struct setup_data data;
107 u64 store_size;
108 u64 remaining_size;
109 u64 max_var_size;
110};
111
105#ifdef CONFIG_EFI 112#ifdef CONFIG_EFI
106 113
107static inline bool efi_is_native(void) 114static inline bool efi_is_native(void)
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index a09c28571064..0dc7d9e21c34 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -104,12 +104,7 @@ enum fixed_addresses {
104 FIX_LI_PCIA, /* Lithium PCI Bridge A */ 104 FIX_LI_PCIA, /* Lithium PCI Bridge A */
105 FIX_LI_PCIB, /* Lithium PCI Bridge B */ 105 FIX_LI_PCIB, /* Lithium PCI Bridge B */
106#endif 106#endif
107#ifdef CONFIG_X86_F00F_BUG 107 FIX_RO_IDT, /* Virtual mapping for read-only IDT */
108 FIX_F00F_IDT, /* Virtual mapping for IDT */
109#endif
110#ifdef CONFIG_X86_CYCLONE_TIMER
111 FIX_CYCLONE_TIMER, /*cyclone timer register*/
112#endif
113#ifdef CONFIG_X86_32 108#ifdef CONFIG_X86_32
114 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 109 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
115 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 110 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index bdd35dbd0605..a8091216963b 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
2#define _ASM_X86_HUGETLB_H 2#define _ASM_X86_HUGETLB_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
5 6
6 7
7static inline int is_hugepage_only_range(struct mm_struct *mm, 8static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 86095ed14135..2d4b5e6107cd 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -20,13 +20,11 @@
20#ifndef _ASM_X86_HYPERVISOR_H 20#ifndef _ASM_X86_HYPERVISOR_H
21#define _ASM_X86_HYPERVISOR_H 21#define _ASM_X86_HYPERVISOR_H
22 22
23#ifdef CONFIG_HYPERVISOR_GUEST
24
23#include <asm/kvm_para.h> 25#include <asm/kvm_para.h>
24#include <asm/xen/hypervisor.h> 26#include <asm/xen/hypervisor.h>
25 27
26extern void init_hypervisor(struct cpuinfo_x86 *c);
27extern void init_hypervisor_platform(void);
28extern bool hypervisor_x2apic_available(void);
29
30/* 28/*
31 * x86 hypervisor information 29 * x86 hypervisor information
32 */ 30 */
@@ -55,4 +53,12 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
55extern const struct hypervisor_x86 x86_hyper_xen_hvm; 53extern const struct hypervisor_x86 x86_hyper_xen_hvm;
56extern const struct hypervisor_x86 x86_hyper_kvm; 54extern const struct hypervisor_x86 x86_hyper_kvm;
57 55
58#endif 56extern void init_hypervisor(struct cpuinfo_x86 *c);
57extern void init_hypervisor_platform(void);
58extern bool hypervisor_x2apic_available(void);
59#else
60static inline void init_hypervisor(struct cpuinfo_x86 *c) { }
61static inline void init_hypervisor_platform(void) { }
62static inline bool hypervisor_x2apic_available(void) { return false; }
63#endif /* CONFIG_HYPERVISOR_GUEST */
64#endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index d3ddd17405d0..5a6d2873f80e 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -77,6 +77,7 @@ struct arch_specific_insn {
77 * a post_handler or break_handler). 77 * a post_handler or break_handler).
78 */ 78 */
79 int boostable; 79 int boostable;
80 bool if_modifier;
80}; 81};
81 82
82struct arch_optimized_insn { 83struct arch_optimized_insn {
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 635a74d22409..4979778cc7fb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -414,8 +414,8 @@ struct kvm_vcpu_arch {
414 gpa_t time; 414 gpa_t time;
415 struct pvclock_vcpu_time_info hv_clock; 415 struct pvclock_vcpu_time_info hv_clock;
416 unsigned int hw_tsc_khz; 416 unsigned int hw_tsc_khz;
417 unsigned int time_offset; 417 struct gfn_to_hva_cache pv_time;
418 struct page *time_page; 418 bool pv_time_enabled;
419 /* set guest stopped flag in pvclock flags field */ 419 /* set guest stopped flag in pvclock flags field */
420 bool pvclock_set_guest_stopped_request; 420 bool pvclock_set_guest_stopped_request;
421 421
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index f4076af1f4ed..fa5f71e021d5 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -146,13 +146,13 @@ DECLARE_PER_CPU(struct device *, mce_device);
146void mce_intel_feature_init(struct cpuinfo_x86 *c); 146void mce_intel_feature_init(struct cpuinfo_x86 *c);
147void cmci_clear(void); 147void cmci_clear(void);
148void cmci_reenable(void); 148void cmci_reenable(void);
149void cmci_rediscover(int dying); 149void cmci_rediscover(void);
150void cmci_recheck(void); 150void cmci_recheck(void);
151#else 151#else
152static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } 152static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
153static inline void cmci_clear(void) {} 153static inline void cmci_clear(void) {}
154static inline void cmci_reenable(void) {} 154static inline void cmci_reenable(void) {}
155static inline void cmci_rediscover(int dying) {} 155static inline void cmci_rediscover(void) {}
156static inline void cmci_recheck(void) {} 156static inline void cmci_recheck(void) {}
157#endif 157#endif
158 158
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 9264802e2824..cb7502852acb 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -137,11 +137,11 @@ static inline unsigned long long native_read_pmc(int counter)
137 * pointer indirection), this allows gcc to optimize better 137 * pointer indirection), this allows gcc to optimize better
138 */ 138 */
139 139
140#define rdmsr(msr, val1, val2) \ 140#define rdmsr(msr, low, high) \
141do { \ 141do { \
142 u64 __val = native_read_msr((msr)); \ 142 u64 __val = native_read_msr((msr)); \
143 (void)((val1) = (u32)__val); \ 143 (void)((low) = (u32)__val); \
144 (void)((val2) = (u32)(__val >> 32)); \ 144 (void)((high) = (u32)(__val >> 32)); \
145} while (0) 145} while (0)
146 146
147static inline void wrmsr(unsigned msr, unsigned low, unsigned high) 147static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
@@ -162,12 +162,12 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
162} 162}
163 163
164/* rdmsr with exception handling */ 164/* rdmsr with exception handling */
165#define rdmsr_safe(msr, p1, p2) \ 165#define rdmsr_safe(msr, low, high) \
166({ \ 166({ \
167 int __err; \ 167 int __err; \
168 u64 __val = native_read_msr_safe((msr), &__err); \ 168 u64 __val = native_read_msr_safe((msr), &__err); \
169 (*p1) = (u32)__val; \ 169 (*low) = (u32)__val; \
170 (*p2) = (u32)(__val >> 32); \ 170 (*high) = (u32)(__val >> 32); \
171 __err; \ 171 __err; \
172}) 172})
173 173
@@ -208,7 +208,7 @@ do { \
208#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ 208#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
209 (u32)((val) >> 32)) 209 (u32)((val) >> 32))
210 210
211#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) 211#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
212 212
213#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) 213#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
214 214
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8b491e66eaa8..6c896fbe21db 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -48,6 +48,5 @@
48 * arch/x86/kernel/head_64.S), and it is mapped here: 48 * arch/x86/kernel/head_64.S), and it is mapped here:
49 */ 49 */
50#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 50#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
51#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
52 51
53#endif /* _ASM_X86_PAGE_64_DEFS_H */ 52#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 5edd1742cfd0..cfdc9ee4c900 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -262,10 +262,6 @@ static inline void set_ldt(const void *addr, unsigned entries)
262{ 262{
263 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); 263 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
264} 264}
265static inline void store_gdt(struct desc_ptr *dtr)
266{
267 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
268}
269static inline void store_idt(struct desc_ptr *dtr) 265static inline void store_idt(struct desc_ptr *dtr)
270{ 266{
271 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); 267 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
@@ -703,7 +699,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
703 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); 699 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
704} 700}
705 701
706void arch_flush_lazy_mmu_mode(void); 702static inline void arch_flush_lazy_mmu_mode(void)
703{
704 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
705}
707 706
708static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 707static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
709 phys_addr_t phys, pgprot_t flags) 708 phys_addr_t phys, pgprot_t flags)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 142236ed83af..0db1fcac668c 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -91,6 +91,7 @@ struct pv_lazy_ops {
91 /* Set deferred update mode, used for batching operations. */ 91 /* Set deferred update mode, used for batching operations. */
92 void (*enter)(void); 92 void (*enter)(void);
93 void (*leave)(void); 93 void (*leave)(void);
94 void (*flush)(void);
94}; 95};
95 96
96struct pv_time_ops { 97struct pv_time_ops {
@@ -122,7 +123,7 @@ struct pv_cpu_ops {
122 void (*load_tr_desc)(void); 123 void (*load_tr_desc)(void);
123 void (*load_gdt)(const struct desc_ptr *); 124 void (*load_gdt)(const struct desc_ptr *);
124 void (*load_idt)(const struct desc_ptr *); 125 void (*load_idt)(const struct desc_ptr *);
125 void (*store_gdt)(struct desc_ptr *); 126 /* store_gdt has been removed. */
126 void (*store_idt)(struct desc_ptr *); 127 void (*store_idt)(struct desc_ptr *);
127 void (*set_ldt)(const void *desc, unsigned entries); 128 void (*set_ldt)(const void *desc, unsigned entries);
128 unsigned long (*store_tr)(void); 129 unsigned long (*store_tr)(void);
@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
679 680
680void paravirt_enter_lazy_mmu(void); 681void paravirt_enter_lazy_mmu(void);
681void paravirt_leave_lazy_mmu(void); 682void paravirt_leave_lazy_mmu(void);
683void paravirt_flush_lazy_mmu(void);
682 684
683void _paravirt_nop(void); 685void _paravirt_nop(void);
684u32 _paravirt_ident_32(u32); 686u32 _paravirt_ident_32(u32);
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 4f7e67e2345e..85e13ccf15c4 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -24,45 +24,45 @@
24#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) 24#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
25#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1)) 25#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
26 26
27#define P4_ESCR_EVENT_MASK 0x7e000000U 27#define P4_ESCR_EVENT_MASK 0x7e000000ULL
28#define P4_ESCR_EVENT_SHIFT 25 28#define P4_ESCR_EVENT_SHIFT 25
29#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U 29#define P4_ESCR_EVENTMASK_MASK 0x01fffe00ULL
30#define P4_ESCR_EVENTMASK_SHIFT 9 30#define P4_ESCR_EVENTMASK_SHIFT 9
31#define P4_ESCR_TAG_MASK 0x000001e0U 31#define P4_ESCR_TAG_MASK 0x000001e0ULL
32#define P4_ESCR_TAG_SHIFT 5 32#define P4_ESCR_TAG_SHIFT 5
33#define P4_ESCR_TAG_ENABLE 0x00000010U 33#define P4_ESCR_TAG_ENABLE 0x00000010ULL
34#define P4_ESCR_T0_OS 0x00000008U 34#define P4_ESCR_T0_OS 0x00000008ULL
35#define P4_ESCR_T0_USR 0x00000004U 35#define P4_ESCR_T0_USR 0x00000004ULL
36#define P4_ESCR_T1_OS 0x00000002U 36#define P4_ESCR_T1_OS 0x00000002ULL
37#define P4_ESCR_T1_USR 0x00000001U 37#define P4_ESCR_T1_USR 0x00000001ULL
38 38
39#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT) 39#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
40#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) 40#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
41#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) 41#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
42 42
43#define P4_CCCR_OVF 0x80000000U 43#define P4_CCCR_OVF 0x80000000ULL
44#define P4_CCCR_CASCADE 0x40000000U 44#define P4_CCCR_CASCADE 0x40000000ULL
45#define P4_CCCR_OVF_PMI_T0 0x04000000U 45#define P4_CCCR_OVF_PMI_T0 0x04000000ULL
46#define P4_CCCR_OVF_PMI_T1 0x08000000U 46#define P4_CCCR_OVF_PMI_T1 0x08000000ULL
47#define P4_CCCR_FORCE_OVF 0x02000000U 47#define P4_CCCR_FORCE_OVF 0x02000000ULL
48#define P4_CCCR_EDGE 0x01000000U 48#define P4_CCCR_EDGE 0x01000000ULL
49#define P4_CCCR_THRESHOLD_MASK 0x00f00000U 49#define P4_CCCR_THRESHOLD_MASK 0x00f00000ULL
50#define P4_CCCR_THRESHOLD_SHIFT 20 50#define P4_CCCR_THRESHOLD_SHIFT 20
51#define P4_CCCR_COMPLEMENT 0x00080000U 51#define P4_CCCR_COMPLEMENT 0x00080000ULL
52#define P4_CCCR_COMPARE 0x00040000U 52#define P4_CCCR_COMPARE 0x00040000ULL
53#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U 53#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000ULL
54#define P4_CCCR_ESCR_SELECT_SHIFT 13 54#define P4_CCCR_ESCR_SELECT_SHIFT 13
55#define P4_CCCR_ENABLE 0x00001000U 55#define P4_CCCR_ENABLE 0x00001000ULL
56#define P4_CCCR_THREAD_SINGLE 0x00010000U 56#define P4_CCCR_THREAD_SINGLE 0x00010000ULL
57#define P4_CCCR_THREAD_BOTH 0x00020000U 57#define P4_CCCR_THREAD_BOTH 0x00020000ULL
58#define P4_CCCR_THREAD_ANY 0x00030000U 58#define P4_CCCR_THREAD_ANY 0x00030000ULL
59#define P4_CCCR_RESERVED 0x00000fffU 59#define P4_CCCR_RESERVED 0x00000fffULL
60 60
61#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) 61#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
62#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) 62#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
63 63
64#define P4_GEN_ESCR_EMASK(class, name, bit) \ 64#define P4_GEN_ESCR_EMASK(class, name, bit) \
65 class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) 65 class##__##name = ((1ULL << bit) << P4_ESCR_EVENTMASK_SHIFT)
66#define P4_ESCR_EMASK_BIT(class, name) class##__##name 66#define P4_ESCR_EMASK_BIT(class, name) class##__##name
67 67
68/* 68/*
@@ -107,7 +107,7 @@
107 * P4_PEBS_CONFIG_MASK and related bits on 107 * P4_PEBS_CONFIG_MASK and related bits on
108 * modification.) 108 * modification.)
109 */ 109 */
110#define P4_CONFIG_ALIASABLE (1 << 9) 110#define P4_CONFIG_ALIASABLE (1ULL << 9)
111 111
112/* 112/*
113 * The bits we allow to pass for RAW events 113 * The bits we allow to pass for RAW events
@@ -784,17 +784,17 @@ enum P4_ESCR_EMASKS {
784 * Note we have UOP and PEBS bits reserved for now 784 * Note we have UOP and PEBS bits reserved for now
785 * just in case if we will need them once 785 * just in case if we will need them once
786 */ 786 */
787#define P4_PEBS_CONFIG_ENABLE (1 << 7) 787#define P4_PEBS_CONFIG_ENABLE (1ULL << 7)
788#define P4_PEBS_CONFIG_UOP_TAG (1 << 8) 788#define P4_PEBS_CONFIG_UOP_TAG (1ULL << 8)
789#define P4_PEBS_CONFIG_METRIC_MASK 0x3f 789#define P4_PEBS_CONFIG_METRIC_MASK 0x3FLL
790#define P4_PEBS_CONFIG_MASK 0xff 790#define P4_PEBS_CONFIG_MASK 0xFFLL
791 791
792/* 792/*
793 * mem: Only counters MSR_IQ_COUNTER4 (16) and 793 * mem: Only counters MSR_IQ_COUNTER4 (16) and
794 * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling 794 * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling
795 */ 795 */
796#define P4_PEBS_ENABLE 0x02000000U 796#define P4_PEBS_ENABLE 0x02000000ULL
797#define P4_PEBS_ENABLE_UOP_TAG 0x01000000U 797#define P4_PEBS_ENABLE_UOP_TAG 0x01000000ULL
798 798
799#define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK) 799#define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK)
800#define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK) 800#define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 567b5d0632b2..e6423002c10b 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -351,7 +351,6 @@ static inline void update_page_count(int level, unsigned long pages) { }
351 * as a pte too. 351 * as a pte too.
352 */ 352 */
353extern pte_t *lookup_address(unsigned long address, unsigned int *level); 353extern pte_t *lookup_address(unsigned long address, unsigned int *level);
354extern int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase);
355extern phys_addr_t slow_virt_to_phys(void *__address); 354extern phys_addr_t slow_virt_to_phys(void *__address);
356 355
357#endif /* !__ASSEMBLY__ */ 356#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3270116b1488..22224b3b43bb 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -91,9 +91,6 @@ struct cpuinfo_x86 {
91 /* Problems on some 486Dx4's and old 386's: */ 91 /* Problems on some 486Dx4's and old 386's: */
92 char hard_math; 92 char hard_math;
93 char rfu; 93 char rfu;
94 char fdiv_bug;
95 char f00f_bug;
96 char coma_bug;
97 char pad0; 94 char pad0;
98#else 95#else
99 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ 96 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
@@ -107,7 +104,7 @@ struct cpuinfo_x86 {
107 __u32 extended_cpuid_level; 104 __u32 extended_cpuid_level;
108 /* Maximum supported CPUID level, -1=no CPUID: */ 105 /* Maximum supported CPUID level, -1=no CPUID: */
109 int cpuid_level; 106 int cpuid_level;
110 __u32 x86_capability[NCAPINTS]; 107 __u32 x86_capability[NCAPINTS + NBUGINTS];
111 char x86_vendor_id[16]; 108 char x86_vendor_id[16];
112 char x86_model_id[64]; 109 char x86_model_id[64];
113 /* in KB - valid for CPUS which support this call: */ 110 /* in KB - valid for CPUS which support this call: */
@@ -973,26 +970,6 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
973 return ratio; 970 return ratio;
974} 971}
975 972
976/*
977 * AMD errata checking
978 */
979#ifdef CONFIG_CPU_SUP_AMD
980extern const int amd_erratum_383[];
981extern const int amd_erratum_400[];
982extern bool cpu_has_amd_erratum(const int *);
983
984#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
985#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
986#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
987 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
988#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
989#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
990#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
991
992#else
993#define cpu_has_amd_erratum(x) (false)
994#endif /* CONFIG_CPU_SUP_AMD */
995
996extern unsigned long arch_align_stack(unsigned long sp); 973extern unsigned long arch_align_stack(unsigned long sp);
997extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 974extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
998 975
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index 487055c8c1aa..f6064b7385b0 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -15,7 +15,6 @@ struct saved_context {
15 unsigned long cr0, cr2, cr3, cr4; 15 unsigned long cr0, cr2, cr3, cr4;
16 u64 misc_enable; 16 u64 misc_enable;
17 bool misc_enable_saved; 17 bool misc_enable_saved;
18 struct desc_ptr gdt;
19 struct desc_ptr idt; 18 struct desc_ptr idt;
20 u16 ldt; 19 u16 ldt;
21 u16 tss; 20 u16 tss;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 09b0bf104156..97b84e08a211 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -25,9 +25,6 @@ struct saved_context {
25 u64 misc_enable; 25 u64 misc_enable;
26 bool misc_enable_saved; 26 bool misc_enable_saved;
27 unsigned long efer; 27 unsigned long efer;
28 u16 gdt_pad;
29 u16 gdt_limit;
30 unsigned long gdt_base;
31 u16 idt_pad; 28 u16 idt_pad;
32 u16 idt_limit; 29 u16 idt_limit;
33 unsigned long idt_base; 30 unsigned long idt_base;
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 1ace47b62592..2e188d68397c 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[];
29 */ 29 */
30static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) 30static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
31{ 31{
32 return regs->orig_ax & __SYSCALL_MASK; 32 return regs->orig_ax;
33} 33}
34 34
35static inline void syscall_rollback(struct task_struct *task, 35static inline void syscall_rollback(struct task_struct *task,
36 struct pt_regs *regs) 36 struct pt_regs *regs)
37{ 37{
38 regs->ax = regs->orig_ax & __SYSCALL_MASK; 38 regs->ax = regs->orig_ax;
39} 39}
40 40
41static inline long syscall_get_error(struct task_struct *task, 41static inline long syscall_get_error(struct task_struct *task,
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2cd056e3ada3..a1df6e84691f 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -241,8 +241,6 @@ static inline struct thread_info *current_thread_info(void)
241 skip sending interrupt */ 241 skip sending interrupt */
242#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ 242#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
243 243
244#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
245
246#ifndef __ASSEMBLY__ 244#ifndef __ASSEMBLY__
247#define HAVE_SET_RESTORE_SIGMASK 1 245#define HAVE_SET_RESTORE_SIGMASK 1
248static inline void set_restore_sigmask(void) 246static inline void set_restore_sigmask(void)
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 4fef20773b8f..c7797307fc2b 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -7,7 +7,7 @@
7 7
8#define tlb_flush(tlb) \ 8#define tlb_flush(tlb) \
9{ \ 9{ \
10 if (tlb->fullmm == 0) \ 10 if (!tlb->fullmm && !tlb->need_flush_all) \
11 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ 11 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
12 else \ 12 else \
13 flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ 13 flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 8ff8be7835ab..6e5197910fd8 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -55,4 +55,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
55extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); 55extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
56extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); 56extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
57extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 57extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
58extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
58#endif /* _ASM_UPROBES_H */ 59#endif /* _ASM_UPROBES_H */
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index c20d1ce62dc6..e709884d0ef9 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
382 return _hypercall3(int, console_io, cmd, count, str); 382 return _hypercall3(int, console_io, cmd, count, str);
383} 383}
384 384
385extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); 385extern int __must_check xen_physdev_op_compat(int, void *);
386 386
387static inline int 387static inline int
388HYPERVISOR_physdev_op(int cmd, void *arg) 388HYPERVISOR_physdev_op(int cmd, void *arg)
389{ 389{
390 int rc = _hypercall2(int, physdev_op, cmd, arg); 390 int rc = _hypercall2(int, physdev_op, cmd, arg);
391 if (unlikely(rc == -ENOSYS)) 391 if (unlikely(rc == -ENOSYS))
392 rc = HYPERVISOR_physdev_op_compat(cmd, arg); 392 rc = xen_physdev_op_compat(cmd, arg);
393 return rc; 393 return rc;
394} 394}
395 395
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index c15ddaf90710..08744242b8d2 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -6,6 +6,7 @@
6#define SETUP_E820_EXT 1 6#define SETUP_E820_EXT 1
7#define SETUP_DTB 2 7#define SETUP_DTB 2
8#define SETUP_PCI 3 8#define SETUP_PCI 3
9#define SETUP_EFI_VARS 4
9 10
10/* ram_size flags */ 11/* ram_size flags */
11#define RAMDISK_IMAGE_START_MASK 0x07FF 12#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 892ce40a7470..b5757885d7a4 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -44,6 +44,7 @@
44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
46 46
47#define MSR_PLATFORM_INFO 0x000000ce
47#define MSR_MTRRcap 0x000000fe 48#define MSR_MTRRcap 0x000000fe
48#define MSR_IA32_BBL_CR_CTL 0x00000119 49#define MSR_IA32_BBL_CR_CTL 0x00000119
49#define MSR_IA32_BBL_CR_CTL3 0x0000011e 50#define MSR_IA32_BBL_CR_CTL3 0x0000011e
@@ -71,6 +72,7 @@
71#define MSR_IA32_PEBS_ENABLE 0x000003f1 72#define MSR_IA32_PEBS_ENABLE 0x000003f1
72#define MSR_IA32_DS_AREA 0x00000600 73#define MSR_IA32_DS_AREA 0x00000600
73#define MSR_IA32_PERF_CAPABILITIES 0x00000345 74#define MSR_IA32_PERF_CAPABILITIES 0x00000345
75#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
74 76
75#define MSR_MTRRfix64K_00000 0x00000250 77#define MSR_MTRRfix64K_00000 0x00000250
76#define MSR_MTRRfix16K_80000 0x00000258 78#define MSR_MTRRfix16K_80000 0x00000258
@@ -194,6 +196,10 @@
194#define MSR_AMD64_IBSBRTARGET 0xc001103b 196#define MSR_AMD64_IBSBRTARGET 0xc001103b
195#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ 197#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
196 198
199/* Fam 16h MSRs */
200#define MSR_F16H_L2I_PERF_CTL 0xc0010230
201#define MSR_F16H_L2I_PERF_CTR 0xc0010231
202
197/* Fam 15h MSRs */ 203/* Fam 15h MSRs */
198#define MSR_F15H_PERF_CTL 0xc0010200 204#define MSR_F15H_PERF_CTL 0xc0010200
199#define MSR_F15H_PERF_CTR 0xc0010201 205#define MSR_F15H_PERF_CTR 0xc0010201
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 0532f5d6e4ef..b44577bc9744 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -46,7 +46,7 @@ int acpi_suspend_lowlevel(void)
46 header->pmode_behavior = 0; 46 header->pmode_behavior = 0;
47 47
48#ifndef CONFIG_64BIT 48#ifndef CONFIG_64BIT
49 store_gdt((struct desc_ptr *)&header->pmode_gdt); 49 native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
50 50
51 if (!rdmsr_safe(MSR_EFER, 51 if (!rdmsr_safe(MSR_EFER,
52 &header->pmode_efer_low, 52 &header->pmode_efer_low,
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 13ab720573e3..d1daa66ab162 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -1,4 +1,4 @@
1 .section .text..page_aligned 1 .text
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/page_types.h> 4#include <asm/page_types.h>
@@ -18,7 +18,6 @@ wakeup_pmode_return:
18 movw %ax, %gs 18 movw %ax, %gs
19 19
20 # reload the gdt, as we need the full 32 bit address 20 # reload the gdt, as we need the full 32 bit address
21 lgdt saved_gdt
22 lidt saved_idt 21 lidt saved_idt
23 lldt saved_ldt 22 lldt saved_ldt
24 ljmp $(__KERNEL_CS), $1f 23 ljmp $(__KERNEL_CS), $1f
@@ -44,7 +43,6 @@ bogus_magic:
44 43
45 44
46save_registers: 45save_registers:
47 sgdt saved_gdt
48 sidt saved_idt 46 sidt saved_idt
49 sldt saved_ldt 47 sldt saved_ldt
50 str saved_tss 48 str saved_tss
@@ -93,7 +91,6 @@ ENTRY(saved_magic) .long 0
93ENTRY(saved_eip) .long 0 91ENTRY(saved_eip) .long 0
94 92
95# saved registers 93# saved registers
96saved_gdt: .long 0,0
97saved_idt: .long 0,0 94saved_idt: .long 0,0
98saved_ldt: .long 0 95saved_ldt: .long 0
99saved_tss: .long 0 96saved_tss: .long 0
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index ef5ccca79a6c..c15cf9a25e27 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -271,7 +271,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
271 replacement = (u8 *)&a->repl_offset + a->repl_offset; 271 replacement = (u8 *)&a->repl_offset + a->repl_offset;
272 BUG_ON(a->replacementlen > a->instrlen); 272 BUG_ON(a->replacementlen > a->instrlen);
273 BUG_ON(a->instrlen > sizeof(insnbuf)); 273 BUG_ON(a->instrlen > sizeof(insnbuf));
274 BUG_ON(a->cpuid >= NCAPINTS*32); 274 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
275 if (!boot_cpu_has(a->cpuid)) 275 if (!boot_cpu_has(a->cpuid))
276 continue; 276 continue;
277 277
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index aadf3359e2a7..3048ded1b598 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,12 +20,14 @@ const struct pci_device_id amd_nb_misc_ids[] = {
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
23 {} 24 {}
24}; 25};
25EXPORT_SYMBOL(amd_nb_misc_ids); 26EXPORT_SYMBOL(amd_nb_misc_ids);
26 27
27static struct pci_device_id amd_nb_link_ids[] = { 28static const struct pci_device_id amd_nb_link_ids[] = {
28 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 29 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
30 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
29 {} 31 {}
30}; 32};
31 33
@@ -81,7 +83,6 @@ int amd_cache_northbridges(void)
81 next_northbridge(link, amd_nb_link_ids); 83 next_northbridge(link, amd_nb_link_ids);
82 } 84 }
83 85
84 /* some CPU families (e.g. family 0x11) do not support GART */
85 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 86 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
86 boot_cpu_data.x86 == 0x15) 87 boot_cpu_data.x86 == 0x15)
87 amd_northbridges.flags |= AMD_NB_GART; 88 amd_northbridges.flags |= AMD_NB_GART;
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index d5fd66f0d4cd..fd972a3e4cbb 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -87,7 +87,7 @@ static u32 __init allocate_aperture(void)
87 */ 87 */
88 addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, 88 addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
89 aper_size, aper_size); 89 aper_size, aper_size);
90 if (!addr || addr + aper_size > GART_MAX_ADDR) { 90 if (!addr) {
91 printk(KERN_ERR 91 printk(KERN_ERR
92 "Cannot allocate aperture memory hole (%lx,%uK)\n", 92 "Cannot allocate aperture memory hole (%lx,%uK)\n",
93 addr, aper_size>>10); 93 addr, aper_size>>10);
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index a0e067d3d96c..b0684e4a73aa 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -14,7 +14,6 @@ CFLAGS_common.o := $(nostackp)
14 14
15obj-y := intel_cacheinfo.o scattered.o topology.o 15obj-y := intel_cacheinfo.o scattered.o topology.o
16obj-y += proc.o capflags.o powerflags.o common.o 16obj-y += proc.o capflags.o powerflags.o common.o
17obj-y += vmware.o hypervisor.o mshyperv.o
18obj-y += rdrand.o 17obj-y += rdrand.o
19obj-y += match.o 18obj-y += match.o
20 19
@@ -31,7 +30,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
31obj-$(CONFIG_PERF_EVENTS) += perf_event.o 30obj-$(CONFIG_PERF_EVENTS) += perf_event.o
32 31
33ifdef CONFIG_PERF_EVENTS 32ifdef CONFIG_PERF_EVENTS
34obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o 33obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o
35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o 34obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
36obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 35obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
37obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o 36obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
@@ -42,11 +41,13 @@ obj-$(CONFIG_MTRR) += mtrr/
42 41
43obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o 42obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
44 43
44obj-$(CONFIG_HYPERVISOR_GUEST) += vmware.o hypervisor.o mshyperv.o
45
45quiet_cmd_mkcapflags = MKCAP $@ 46quiet_cmd_mkcapflags = MKCAP $@
46 cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ 47 cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
47 48
48cpufeature = $(src)/../../include/asm/cpufeature.h 49cpufeature = $(src)/../../include/asm/cpufeature.h
49 50
50targets += capflags.c 51targets += capflags.c
51$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE 52$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
52 $(call if_changed,mkcapflags) 53 $(call if_changed,mkcapflags)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index fa96eb0d02fb..5013a48d1aff 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -20,11 +20,11 @@
20 20
21static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 21static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
22{ 22{
23 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
24 u32 gprs[8] = { 0 }; 23 u32 gprs[8] = { 0 };
25 int err; 24 int err;
26 25
27 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); 26 WARN_ONCE((boot_cpu_data.x86 != 0xf),
27 "%s should only be used on K8!\n", __func__);
28 28
29 gprs[1] = msr; 29 gprs[1] = msr;
30 gprs[7] = 0x9c5a203a; 30 gprs[7] = 0x9c5a203a;
@@ -38,10 +38,10 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
38 38
39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) 39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
40{ 40{
41 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
42 u32 gprs[8] = { 0 }; 41 u32 gprs[8] = { 0 };
43 42
44 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); 43 WARN_ONCE((boot_cpu_data.x86 != 0xf),
44 "%s should only be used on K8!\n", __func__);
45 45
46 gprs[0] = (u32)val; 46 gprs[0] = (u32)val;
47 gprs[1] = msr; 47 gprs[1] = msr;
@@ -192,11 +192,11 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
192 /* Athlon 660/661 is valid. */ 192 /* Athlon 660/661 is valid. */
193 if ((c->x86_model == 6) && ((c->x86_mask == 0) || 193 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
194 (c->x86_mask == 1))) 194 (c->x86_mask == 1)))
195 goto valid_k7; 195 return;
196 196
197 /* Duron 670 is valid */ 197 /* Duron 670 is valid */
198 if ((c->x86_model == 7) && (c->x86_mask == 0)) 198 if ((c->x86_model == 7) && (c->x86_mask == 0))
199 goto valid_k7; 199 return;
200 200
201 /* 201 /*
202 * Athlon 662, Duron 671, and Athlon >model 7 have capability 202 * Athlon 662, Duron 671, and Athlon >model 7 have capability
@@ -209,7 +209,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
209 ((c->x86_model == 7) && (c->x86_mask >= 1)) || 209 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
210 (c->x86_model > 7)) 210 (c->x86_model > 7))
211 if (cpu_has_mp) 211 if (cpu_has_mp)
212 goto valid_k7; 212 return;
213 213
214 /* If we get here, not a certified SMP capable AMD system. */ 214 /* If we get here, not a certified SMP capable AMD system. */
215 215
@@ -220,9 +220,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
220 WARN_ONCE(1, "WARNING: This combination of AMD" 220 WARN_ONCE(1, "WARNING: This combination of AMD"
221 " processors is not suitable for SMP.\n"); 221 " processors is not suitable for SMP.\n");
222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); 222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
223
224valid_k7:
225 ;
226} 223}
227 224
228static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 225static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
@@ -513,6 +510,10 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
513#endif 510#endif
514} 511}
515 512
513static const int amd_erratum_383[];
514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum);
516
516static void __cpuinit init_amd(struct cpuinfo_x86 *c) 517static void __cpuinit init_amd(struct cpuinfo_x86 *c)
517{ 518{
518 u32 dummy; 519 u32 dummy;
@@ -727,8 +728,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
727 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value); 728 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
728 value &= ~(1ULL << 24); 729 value &= ~(1ULL << 24);
729 wrmsrl_safe(MSR_AMD64_BU_CFG2, value); 730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
731
732 if (cpu_has_amd_erratum(amd_erratum_383))
733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
730 } 734 }
731 735
736 if (cpu_has_amd_erratum(amd_erratum_400))
737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
738
732 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
733} 740}
734 741
@@ -847,8 +854,7 @@ cpu_dev_register(amd_cpu_dev);
847 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that 854 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
848 * have an OSVW id assigned, which it takes as first argument. Both take a 855 * have an OSVW id assigned, which it takes as first argument. Both take a
849 * variable number of family-specific model-stepping ranges created by 856 * variable number of family-specific model-stepping ranges created by
850 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const 857 * AMD_MODEL_RANGE().
851 * int[] in arch/x86/include/asm/processor.h.
852 * 858 *
853 * Example: 859 * Example:
854 * 860 *
@@ -858,16 +864,22 @@ cpu_dev_register(amd_cpu_dev);
858 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); 864 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
859 */ 865 */
860 866
861const int amd_erratum_400[] = 867#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
868#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
869#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
870 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
871#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
872#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
873#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
874
875static const int amd_erratum_400[] =
862 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), 876 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
863 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 877 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
864EXPORT_SYMBOL_GPL(amd_erratum_400);
865 878
866const int amd_erratum_383[] = 879static const int amd_erratum_383[] =
867 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 880 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
868EXPORT_SYMBOL_GPL(amd_erratum_383);
869 881
870bool cpu_has_amd_erratum(const int *erratum) 882static bool cpu_has_amd_erratum(const int *erratum)
871{ 883{
872 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); 884 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
873 int osvw_id = *erratum++; 885 int osvw_id = *erratum++;
@@ -908,5 +920,3 @@ bool cpu_has_amd_erratum(const int *erratum)
908 920
909 return false; 921 return false;
910} 922}
911
912EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index af6455e3fcc9..4112be9a4659 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -59,7 +59,7 @@ static void __init check_fpu(void)
59 * trap_init() enabled FXSR and company _before_ testing for FP 59 * trap_init() enabled FXSR and company _before_ testing for FP
60 * problems here. 60 * problems here.
61 * 61 *
62 * Test for the divl bug.. 62 * Test for the divl bug: http://en.wikipedia.org/wiki/Fdiv_bug
63 */ 63 */
64 __asm__("fninit\n\t" 64 __asm__("fninit\n\t"
65 "fldl %1\n\t" 65 "fldl %1\n\t"
@@ -75,26 +75,12 @@ static void __init check_fpu(void)
75 75
76 kernel_fpu_end(); 76 kernel_fpu_end();
77 77
78 boot_cpu_data.fdiv_bug = fdiv_bug; 78 if (fdiv_bug) {
79 if (boot_cpu_data.fdiv_bug) 79 set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV);
80 pr_warn("Hmm, FPU with FDIV bug\n"); 80 pr_warn("Hmm, FPU with FDIV bug\n");
81 }
81} 82}
82 83
83/*
84 * Check whether we are able to run this kernel safely on SMP.
85 *
86 * - i386 is no longer supported.
87 * - In order to run on anything without a TSC, we need to be
88 * compiled for a i486.
89 */
90
91static void __init check_config(void)
92{
93 if (boot_cpu_data.x86 < 4)
94 panic("Kernel requires i486+ for 'invlpg' and other features");
95}
96
97
98void __init check_bugs(void) 84void __init check_bugs(void)
99{ 85{
100 identify_boot_cpu(); 86 identify_boot_cpu();
@@ -102,7 +88,17 @@ void __init check_bugs(void)
102 pr_info("CPU: "); 88 pr_info("CPU: ");
103 print_cpu_info(&boot_cpu_data); 89 print_cpu_info(&boot_cpu_data);
104#endif 90#endif
105 check_config(); 91
92 /*
93 * Check whether we are able to run this kernel safely on SMP.
94 *
95 * - i386 is no longer supported.
96 * - In order to run on anything without a TSC, we need to be
97 * compiled for a i486.
98 */
99 if (boot_cpu_data.x86 < 4)
100 panic("Kernel requires i486+ for 'invlpg' and other features");
101
106 init_utsname()->machine[1] = 102 init_utsname()->machine[1] =
107 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 103 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
108 alternative_instructions(); 104 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d814772c5bed..22018f70a671 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -920,6 +920,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
920 /* AND the already accumulated flags with these */ 920 /* AND the already accumulated flags with these */
921 for (i = 0; i < NCAPINTS; i++) 921 for (i = 0; i < NCAPINTS; i++)
922 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 922 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
923
924 /* OR, i.e. replicate the bug flags */
925 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
926 c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
923 } 927 }
924 928
925 /* Init Machine Check Exception if available. */ 929 /* Init Machine Check Exception if available. */
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 4fbd384fb645..d048d5ca43c1 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -249,7 +249,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
249 /* Emulate MTRRs using Cyrix's ARRs. */ 249 /* Emulate MTRRs using Cyrix's ARRs. */
250 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); 250 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
251 /* 6x86's contain this bug */ 251 /* 6x86's contain this bug */
252 c->coma_bug = 1; 252 set_cpu_bug(c, X86_BUG_COMA);
253 break; 253 break;
254 254
255 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ 255 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
@@ -317,7 +317,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
317 /* Enable MMX extensions (App note 108) */ 317 /* Enable MMX extensions (App note 108) */
318 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); 318 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
319 } else { 319 } else {
320 c->coma_bug = 1; /* 6x86MX, it has the bug. */ 320 /* A 6x86MX - it has the bug. */
321 set_cpu_bug(c, X86_BUG_COMA);
321 } 322 }
322 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; 323 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
323 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; 324 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1905ce98bee0..9b0c441c03f5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -96,6 +96,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
96 sched_clock_stable = 1; 96 sched_clock_stable = 1;
97 } 97 }
98 98
99 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
100 if (c->x86 == 6) {
101 switch (c->x86_model) {
102 case 0x27: /* Penwell */
103 case 0x35: /* Cloverview */
104 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
105 break;
106 default:
107 break;
108 }
109 }
110
99 /* 111 /*
100 * There is a known erratum on Pentium III and Core Solo 112 * There is a known erratum on Pentium III and Core Solo
101 * and Core Duo CPUs. 113 * and Core Duo CPUs.
@@ -164,20 +176,6 @@ int __cpuinit ppro_with_ram_bug(void)
164 return 0; 176 return 0;
165} 177}
166 178
167#ifdef CONFIG_X86_F00F_BUG
168static void __cpuinit trap_init_f00f_bug(void)
169{
170 __set_fixmap(FIX_F00F_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
171
172 /*
173 * Update the IDT descriptor and reload the IDT so that
174 * it uses the read-only mapped virtual address.
175 */
176 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
177 load_idt(&idt_descr);
178}
179#endif
180
181static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) 179static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
182{ 180{
183 /* calling is from identify_secondary_cpu() ? */ 181 /* calling is from identify_secondary_cpu() ? */
@@ -206,16 +204,14 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
206 /* 204 /*
207 * All current models of Pentium and Pentium with MMX technology CPUs 205 * All current models of Pentium and Pentium with MMX technology CPUs
208 * have the F0 0F bug, which lets nonprivileged users lock up the 206 * have the F0 0F bug, which lets nonprivileged users lock up the
209 * system. 207 * system. Announce that the fault handler will be checking for it.
210 * Note that the workaround only should be initialized once...
211 */ 208 */
212 c->f00f_bug = 0; 209 clear_cpu_bug(c, X86_BUG_F00F);
213 if (!paravirt_enabled() && c->x86 == 5) { 210 if (!paravirt_enabled() && c->x86 == 5) {
214 static int f00f_workaround_enabled; 211 static int f00f_workaround_enabled;
215 212
216 c->f00f_bug = 1; 213 set_cpu_bug(c, X86_BUG_F00F);
217 if (!f00f_workaround_enabled) { 214 if (!f00f_workaround_enabled) {
218 trap_init_f00f_bug();
219 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); 215 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
220 f00f_workaround_enabled = 1; 216 f00f_workaround_enabled = 1;
221 } 217 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 7bc126346ace..9239504b41cb 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2358,7 +2358,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2358 2358
2359 if (action == CPU_POST_DEAD) { 2359 if (action == CPU_POST_DEAD) {
2360 /* intentionally ignoring frozen here */ 2360 /* intentionally ignoring frozen here */
2361 cmci_rediscover(cpu); 2361 cmci_rediscover();
2362 } 2362 }
2363 2363
2364 return NOTIFY_OK; 2364 return NOTIFY_OK;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 1ac581f38dfa..9cb52767999a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -33,7 +33,6 @@
33#include <asm/mce.h> 33#include <asm/mce.h>
34#include <asm/msr.h> 34#include <asm/msr.h>
35 35
36#define NR_BANKS 6
37#define NR_BLOCKS 9 36#define NR_BLOCKS 9
38#define THRESHOLD_MAX 0xFFF 37#define THRESHOLD_MAX 0xFFF
39#define INT_TYPE_APIC 0x00020000 38#define INT_TYPE_APIC 0x00020000
@@ -57,12 +56,7 @@ static const char * const th_names[] = {
57 "execution_unit", 56 "execution_unit",
58}; 57};
59 58
60static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); 59static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
61
62static unsigned char shared_bank[NR_BANKS] = {
63 0, 0, 0, 0, 1
64};
65
66static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ 60static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
67 61
68static void amd_threshold_interrupt(void); 62static void amd_threshold_interrupt(void);
@@ -79,6 +73,12 @@ struct thresh_restart {
79 u16 old_limit; 73 u16 old_limit;
80}; 74};
81 75
76static inline bool is_shared_bank(int bank)
77{
78 /* Bank 4 is for northbridge reporting and is thus shared */
79 return (bank == 4);
80}
81
82static const char * const bank4_names(struct threshold_block *b) 82static const char * const bank4_names(struct threshold_block *b)
83{ 83{
84 switch (b->address) { 84 switch (b->address) {
@@ -214,7 +214,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
214 unsigned int bank, block; 214 unsigned int bank, block;
215 int offset = -1; 215 int offset = -1;
216 216
217 for (bank = 0; bank < NR_BANKS; ++bank) { 217 for (bank = 0; bank < mca_cfg.banks; ++bank) {
218 for (block = 0; block < NR_BLOCKS; ++block) { 218 for (block = 0; block < NR_BLOCKS; ++block) {
219 if (block == 0) 219 if (block == 0)
220 address = MSR_IA32_MC0_MISC + bank * 4; 220 address = MSR_IA32_MC0_MISC + bank * 4;
@@ -276,7 +276,7 @@ static void amd_threshold_interrupt(void)
276 mce_setup(&m); 276 mce_setup(&m);
277 277
278 /* assume first bank caused it */ 278 /* assume first bank caused it */
279 for (bank = 0; bank < NR_BANKS; ++bank) { 279 for (bank = 0; bank < mca_cfg.banks; ++bank) {
280 if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) 280 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
281 continue; 281 continue;
282 for (block = 0; block < NR_BLOCKS; ++block) { 282 for (block = 0; block < NR_BLOCKS; ++block) {
@@ -467,7 +467,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
467 u32 low, high; 467 u32 low, high;
468 int err; 468 int err;
469 469
470 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) 470 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
471 return 0; 471 return 0;
472 472
473 if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) 473 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
@@ -575,7 +575,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
575 const char *name = th_names[bank]; 575 const char *name = th_names[bank];
576 int err = 0; 576 int err = 0;
577 577
578 if (shared_bank[bank]) { 578 if (is_shared_bank(bank)) {
579 nb = node_to_amd_nb(amd_get_nb_id(cpu)); 579 nb = node_to_amd_nb(amd_get_nb_id(cpu));
580 580
581 /* threshold descriptor already initialized on this node? */ 581 /* threshold descriptor already initialized on this node? */
@@ -609,7 +609,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
609 609
610 per_cpu(threshold_banks, cpu)[bank] = b; 610 per_cpu(threshold_banks, cpu)[bank] = b;
611 611
612 if (shared_bank[bank]) { 612 if (is_shared_bank(bank)) {
613 atomic_set(&b->cpus, 1); 613 atomic_set(&b->cpus, 1);
614 614
615 /* nb is already initialized, see above */ 615 /* nb is already initialized, see above */
@@ -635,9 +635,17 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
635static __cpuinit int threshold_create_device(unsigned int cpu) 635static __cpuinit int threshold_create_device(unsigned int cpu)
636{ 636{
637 unsigned int bank; 637 unsigned int bank;
638 struct threshold_bank **bp;
638 int err = 0; 639 int err = 0;
639 640
640 for (bank = 0; bank < NR_BANKS; ++bank) { 641 bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
642 GFP_KERNEL);
643 if (!bp)
644 return -ENOMEM;
645
646 per_cpu(threshold_banks, cpu) = bp;
647
648 for (bank = 0; bank < mca_cfg.banks; ++bank) {
641 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 649 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
642 continue; 650 continue;
643 err = threshold_create_bank(cpu, bank); 651 err = threshold_create_bank(cpu, bank);
@@ -691,7 +699,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
691 if (!b->blocks) 699 if (!b->blocks)
692 goto free_out; 700 goto free_out;
693 701
694 if (shared_bank[bank]) { 702 if (is_shared_bank(bank)) {
695 if (!atomic_dec_and_test(&b->cpus)) { 703 if (!atomic_dec_and_test(&b->cpus)) {
696 __threshold_remove_blocks(b); 704 __threshold_remove_blocks(b);
697 per_cpu(threshold_banks, cpu)[bank] = NULL; 705 per_cpu(threshold_banks, cpu)[bank] = NULL;
@@ -719,11 +727,12 @@ static void threshold_remove_device(unsigned int cpu)
719{ 727{
720 unsigned int bank; 728 unsigned int bank;
721 729
722 for (bank = 0; bank < NR_BANKS; ++bank) { 730 for (bank = 0; bank < mca_cfg.banks; ++bank) {
723 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 731 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
724 continue; 732 continue;
725 threshold_remove_bank(cpu, bank); 733 threshold_remove_bank(cpu, bank);
726 } 734 }
735 kfree(per_cpu(threshold_banks, cpu));
727} 736}
728 737
729/* get notified when a cpu comes on/off */ 738/* get notified when a cpu comes on/off */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 402c454fbff0..ae1697c2afe3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -285,39 +285,24 @@ void cmci_clear(void)
285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 285 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
286} 286}
287 287
288static long cmci_rediscover_work_func(void *arg) 288static void cmci_rediscover_work_func(void *arg)
289{ 289{
290 int banks; 290 int banks;
291 291
292 /* Recheck banks in case CPUs don't all have the same */ 292 /* Recheck banks in case CPUs don't all have the same */
293 if (cmci_supported(&banks)) 293 if (cmci_supported(&banks))
294 cmci_discover(banks); 294 cmci_discover(banks);
295
296 return 0;
297} 295}
298 296
299/* 297/* After a CPU went down cycle through all the others and rediscover */
300 * After a CPU went down cycle through all the others and rediscover 298void cmci_rediscover(void)
301 * Must run in process context.
302 */
303void cmci_rediscover(int dying)
304{ 299{
305 int cpu, banks; 300 int banks;
306 301
307 if (!cmci_supported(&banks)) 302 if (!cmci_supported(&banks))
308 return; 303 return;
309 304
310 for_each_online_cpu(cpu) { 305 on_each_cpu(cmci_rediscover_work_func, NULL, 1);
311 if (cpu == dying)
312 continue;
313
314 if (cpu == smp_processor_id()) {
315 cmci_rediscover_work_func(NULL);
316 continue;
317 }
318
319 work_on_cpu(cpu, cmci_rediscover_work_func, NULL);
320 }
321} 306}
322 307
323/* 308/*
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl
deleted file mode 100644
index 091972ef49de..000000000000
--- a/arch/x86/kernel/cpu/mkcapflags.pl
+++ /dev/null
@@ -1,48 +0,0 @@
1#!/usr/bin/perl -w
2#
3# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
4#
5
6($in, $out) = @ARGV;
7
8open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n";
9open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
10
11print OUT "#ifndef _ASM_X86_CPUFEATURE_H\n";
12print OUT "#include <asm/cpufeature.h>\n";
13print OUT "#endif\n";
14print OUT "\n";
15print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
16
17%features = ();
18$err = 0;
19
20while (defined($line = <IN>)) {
21 if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
22 $macro = $1;
23 $feature = "\L$2";
24 $tail = $3;
25 if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
26 $feature = "\L$1";
27 }
28
29 next if ($feature eq '');
30
31 if ($features{$feature}++) {
32 print STDERR "$in: duplicate feature name: $feature\n";
33 $err++;
34 }
35 printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature;
36 }
37}
38print OUT "};\n";
39
40close(IN);
41close(OUT);
42
43if ($err) {
44 unlink($out);
45 exit(1);
46}
47
48exit(0);
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
new file mode 100644
index 000000000000..2bf616505499
--- /dev/null
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -0,0 +1,41 @@
1#!/bin/sh
2#
3# Generate the x86_cap_flags[] array from include/asm/cpufeature.h
4#
5
6IN=$1
7OUT=$2
8
9TABS="$(printf '\t\t\t\t\t')"
10trap 'rm "$OUT"' EXIT
11
12(
13 echo "#ifndef _ASM_X86_CPUFEATURE_H"
14 echo "#include <asm/cpufeature.h>"
15 echo "#endif"
16 echo ""
17 echo "const char * const x86_cap_flags[NCAPINTS*32] = {"
18
19 # Iterate through any input lines starting with #define X86_FEATURE_
20 sed -n -e 's/\t/ /g' -e 's/^ *# *define *X86_FEATURE_//p' $IN |
21 while read i
22 do
23 # Name is everything up to the first whitespace
24 NAME="$(echo "$i" | sed 's/ .*//')"
25
26 # If the /* comment */ starts with a quote string, grab that.
27 VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
28 [ -z "$VALUE" ] && VALUE="\"$NAME\""
29 [ "$VALUE" == '""' ] && continue
30
31 # Name is uppercase, VALUE is all lowercase
32 VALUE="$(echo "$VALUE" | tr A-Z a-z)"
33
34 TABCOUNT=$(( ( 5*8 - 14 - $(echo "$NAME" | wc -c) ) / 8 ))
35 printf "\t[%s]%.*s = %s,\n" \
36 "X86_FEATURE_$NAME" "$TABCOUNT" "$TABS" "$VALUE"
37 done
38 echo "};"
39) > $OUT
40
41trap - EXIT
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index a7d26d83fb70..8f4be53ea04b 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void)
35 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 35 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
36 return false; 36 return false;
37 37
38 /*
39 * Xen emulates Hyper-V to support enlightened Windows.
40 * Check to see first if we are on a Xen Hypervisor.
41 */
42 if (xen_cpuid_base())
43 return false;
44
45 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, 38 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
46 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); 39 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
47 40
@@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void)
82 75
83 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) 76 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
84 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); 77 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
85#if IS_ENABLED(CONFIG_HYPERV)
86 /*
87 * Setup the IDT for hypervisor callback.
88 */
89 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
90#endif
91} 78}
92 79
93const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { 80const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr;
103 90
104void hv_register_vmbus_handler(int irq, irq_handler_t handler) 91void hv_register_vmbus_handler(int irq, irq_handler_t handler)
105{ 92{
93 /*
94 * Setup the IDT for hypervisor callback.
95 */
96 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
97
106 vmbus_irq = irq; 98 vmbus_irq = irq;
107 vmbus_isr = handler; 99 vmbus_isr = handler;
108} 100}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index bf0f01aea994..1025f3c99d20 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -180,8 +180,9 @@ static void release_pmc_hardware(void) {}
180 180
181static bool check_hw_exists(void) 181static bool check_hw_exists(void)
182{ 182{
183 u64 val, val_new = ~0; 183 u64 val, val_fail, val_new= ~0;
184 int i, reg, ret = 0; 184 int i, reg, reg_fail, ret = 0;
185 int bios_fail = 0;
185 186
186 /* 187 /*
187 * Check to see if the BIOS enabled any of the counters, if so 188 * Check to see if the BIOS enabled any of the counters, if so
@@ -192,8 +193,11 @@ static bool check_hw_exists(void)
192 ret = rdmsrl_safe(reg, &val); 193 ret = rdmsrl_safe(reg, &val);
193 if (ret) 194 if (ret)
194 goto msr_fail; 195 goto msr_fail;
195 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) 196 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
196 goto bios_fail; 197 bios_fail = 1;
198 val_fail = val;
199 reg_fail = reg;
200 }
197 } 201 }
198 202
199 if (x86_pmu.num_counters_fixed) { 203 if (x86_pmu.num_counters_fixed) {
@@ -202,8 +206,11 @@ static bool check_hw_exists(void)
202 if (ret) 206 if (ret)
203 goto msr_fail; 207 goto msr_fail;
204 for (i = 0; i < x86_pmu.num_counters_fixed; i++) { 208 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
205 if (val & (0x03 << i*4)) 209 if (val & (0x03 << i*4)) {
206 goto bios_fail; 210 bios_fail = 1;
211 val_fail = val;
212 reg_fail = reg;
213 }
207 } 214 }
208 } 215 }
209 216
@@ -221,14 +228,13 @@ static bool check_hw_exists(void)
221 if (ret || val != val_new) 228 if (ret || val != val_new)
222 goto msr_fail; 229 goto msr_fail;
223 230
224 return true;
225
226bios_fail:
227 /* 231 /*
228 * We still allow the PMU driver to operate: 232 * We still allow the PMU driver to operate:
229 */ 233 */
230 printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n"); 234 if (bios_fail) {
231 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val); 235 printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
236 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg_fail, val_fail);
237 }
232 238
233 return true; 239 return true;
234 240
@@ -1316,9 +1322,16 @@ static struct attribute_group x86_pmu_format_group = {
1316 */ 1322 */
1317static void __init filter_events(struct attribute **attrs) 1323static void __init filter_events(struct attribute **attrs)
1318{ 1324{
1325 struct device_attribute *d;
1326 struct perf_pmu_events_attr *pmu_attr;
1319 int i, j; 1327 int i, j;
1320 1328
1321 for (i = 0; attrs[i]; i++) { 1329 for (i = 0; attrs[i]; i++) {
1330 d = (struct device_attribute *)attrs[i];
1331 pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
1332 /* str trumps id */
1333 if (pmu_attr->event_str)
1334 continue;
1322 if (x86_pmu.event_map(i)) 1335 if (x86_pmu.event_map(i))
1323 continue; 1336 continue;
1324 1337
@@ -1330,22 +1343,45 @@ static void __init filter_events(struct attribute **attrs)
1330 } 1343 }
1331} 1344}
1332 1345
1333static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, 1346/* Merge two pointer arrays */
1347static __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
1348{
1349 struct attribute **new;
1350 int j, i;
1351
1352 for (j = 0; a[j]; j++)
1353 ;
1354 for (i = 0; b[i]; i++)
1355 j++;
1356 j++;
1357
1358 new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
1359 if (!new)
1360 return NULL;
1361
1362 j = 0;
1363 for (i = 0; a[i]; i++)
1364 new[j++] = a[i];
1365 for (i = 0; b[i]; i++)
1366 new[j++] = b[i];
1367 new[j] = NULL;
1368
1369 return new;
1370}
1371
1372ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
1334 char *page) 1373 char *page)
1335{ 1374{
1336 struct perf_pmu_events_attr *pmu_attr = \ 1375 struct perf_pmu_events_attr *pmu_attr = \
1337 container_of(attr, struct perf_pmu_events_attr, attr); 1376 container_of(attr, struct perf_pmu_events_attr, attr);
1338
1339 u64 config = x86_pmu.event_map(pmu_attr->id); 1377 u64 config = x86_pmu.event_map(pmu_attr->id);
1340 return x86_pmu.events_sysfs_show(page, config);
1341}
1342 1378
1343#define EVENT_VAR(_id) event_attr_##_id 1379 /* string trumps id */
1344#define EVENT_PTR(_id) &event_attr_##_id.attr.attr 1380 if (pmu_attr->event_str)
1381 return sprintf(page, "%s", pmu_attr->event_str);
1345 1382
1346#define EVENT_ATTR(_name, _id) \ 1383 return x86_pmu.events_sysfs_show(page, config);
1347 PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \ 1384}
1348 events_sysfs_show)
1349 1385
1350EVENT_ATTR(cpu-cycles, CPU_CYCLES ); 1386EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1351EVENT_ATTR(instructions, INSTRUCTIONS ); 1387EVENT_ATTR(instructions, INSTRUCTIONS );
@@ -1459,16 +1495,27 @@ static int __init init_hw_perf_events(void)
1459 1495
1460 unconstrained = (struct event_constraint) 1496 unconstrained = (struct event_constraint)
1461 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1497 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1462 0, x86_pmu.num_counters, 0); 1498 0, x86_pmu.num_counters, 0, 0);
1463 1499
1464 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 1500 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1465 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 1501 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1466 1502
1503 if (x86_pmu.event_attrs)
1504 x86_pmu_events_group.attrs = x86_pmu.event_attrs;
1505
1467 if (!x86_pmu.events_sysfs_show) 1506 if (!x86_pmu.events_sysfs_show)
1468 x86_pmu_events_group.attrs = &empty_attrs; 1507 x86_pmu_events_group.attrs = &empty_attrs;
1469 else 1508 else
1470 filter_events(x86_pmu_events_group.attrs); 1509 filter_events(x86_pmu_events_group.attrs);
1471 1510
1511 if (x86_pmu.cpu_events) {
1512 struct attribute **tmp;
1513
1514 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
1515 if (!WARN_ON(!tmp))
1516 x86_pmu_events_group.attrs = tmp;
1517 }
1518
1472 pr_info("... version: %d\n", x86_pmu.version); 1519 pr_info("... version: %d\n", x86_pmu.version);
1473 pr_info("... bit width: %d\n", x86_pmu.cntval_bits); 1520 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1474 pr_info("... generic registers: %d\n", x86_pmu.num_counters); 1521 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 7f5c75c2afdd..ba9aadfa683b 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -46,6 +46,7 @@ enum extra_reg_type {
46 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ 46 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
47 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ 47 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
48 EXTRA_REG_LBR = 2, /* lbr_select */ 48 EXTRA_REG_LBR = 2, /* lbr_select */
49 EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */
49 50
50 EXTRA_REG_MAX /* number of entries needed */ 51 EXTRA_REG_MAX /* number of entries needed */
51}; 52};
@@ -59,7 +60,13 @@ struct event_constraint {
59 u64 cmask; 60 u64 cmask;
60 int weight; 61 int weight;
61 int overlap; 62 int overlap;
63 int flags;
62}; 64};
65/*
66 * struct event_constraint flags
67 */
68#define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */
69#define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */
63 70
64struct amd_nb { 71struct amd_nb {
65 int nb_id; /* NorthBridge id */ 72 int nb_id; /* NorthBridge id */
@@ -170,16 +177,17 @@ struct cpu_hw_events {
170 void *kfree_on_online; 177 void *kfree_on_online;
171}; 178};
172 179
173#define __EVENT_CONSTRAINT(c, n, m, w, o) {\ 180#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
174 { .idxmsk64 = (n) }, \ 181 { .idxmsk64 = (n) }, \
175 .code = (c), \ 182 .code = (c), \
176 .cmask = (m), \ 183 .cmask = (m), \
177 .weight = (w), \ 184 .weight = (w), \
178 .overlap = (o), \ 185 .overlap = (o), \
186 .flags = f, \
179} 187}
180 188
181#define EVENT_CONSTRAINT(c, n, m) \ 189#define EVENT_CONSTRAINT(c, n, m) \
182 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0) 190 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
183 191
184/* 192/*
185 * The overlap flag marks event constraints with overlapping counter 193 * The overlap flag marks event constraints with overlapping counter
@@ -203,7 +211,7 @@ struct cpu_hw_events {
203 * and its counter masks must be kept at a minimum. 211 * and its counter masks must be kept at a minimum.
204 */ 212 */
205#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ 213#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
206 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1) 214 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
207 215
208/* 216/*
209 * Constraint on the Event code. 217 * Constraint on the Event code.
@@ -231,6 +239,14 @@ struct cpu_hw_events {
231#define INTEL_UEVENT_CONSTRAINT(c, n) \ 239#define INTEL_UEVENT_CONSTRAINT(c, n) \
232 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) 240 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
233 241
242#define INTEL_PLD_CONSTRAINT(c, n) \
243 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
244 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
245
246#define INTEL_PST_CONSTRAINT(c, n) \
247 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
248 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
249
234#define EVENT_CONSTRAINT_END \ 250#define EVENT_CONSTRAINT_END \
235 EVENT_CONSTRAINT(0, 0, 0) 251 EVENT_CONSTRAINT(0, 0, 0)
236 252
@@ -260,12 +276,22 @@ struct extra_reg {
260 .msr = (ms), \ 276 .msr = (ms), \
261 .config_mask = (m), \ 277 .config_mask = (m), \
262 .valid_mask = (vm), \ 278 .valid_mask = (vm), \
263 .idx = EXTRA_REG_##i \ 279 .idx = EXTRA_REG_##i, \
264 } 280 }
265 281
266#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ 282#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
267 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) 283 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
268 284
285#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
286 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
287 ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
288
289#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
290 INTEL_UEVENT_EXTRA_REG(c, \
291 MSR_PEBS_LD_LAT_THRESHOLD, \
292 0xffff, \
293 LDLAT)
294
269#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) 295#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
270 296
271union perf_capabilities { 297union perf_capabilities {
@@ -355,8 +381,10 @@ struct x86_pmu {
355 */ 381 */
356 int attr_rdpmc; 382 int attr_rdpmc;
357 struct attribute **format_attrs; 383 struct attribute **format_attrs;
384 struct attribute **event_attrs;
358 385
359 ssize_t (*events_sysfs_show)(char *page, u64 config); 386 ssize_t (*events_sysfs_show)(char *page, u64 config);
387 struct attribute **cpu_events;
360 388
361 /* 389 /*
362 * CPU Hotplug hooks 390 * CPU Hotplug hooks
@@ -421,6 +449,23 @@ do { \
421#define ERF_NO_HT_SHARING 1 449#define ERF_NO_HT_SHARING 1
422#define ERF_HAS_RSP_1 2 450#define ERF_HAS_RSP_1 2
423 451
452#define EVENT_VAR(_id) event_attr_##_id
453#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
454
455#define EVENT_ATTR(_name, _id) \
456static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
457 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
458 .id = PERF_COUNT_HW_##_id, \
459 .event_str = NULL, \
460};
461
462#define EVENT_ATTR_STR(_name, v, str) \
463static struct perf_pmu_events_attr event_attr_##v = { \
464 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
465 .id = 0, \
466 .event_str = str, \
467};
468
424extern struct x86_pmu x86_pmu __read_mostly; 469extern struct x86_pmu x86_pmu __read_mostly;
425 470
426DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 471DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
@@ -628,6 +673,9 @@ int p6_pmu_init(void);
628 673
629int knc_pmu_init(void); 674int knc_pmu_init(void);
630 675
676ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
677 char *page);
678
631#else /* CONFIG_CPU_SUP_INTEL */ 679#else /* CONFIG_CPU_SUP_INTEL */
632 680
633static inline void reserve_ds_buffers(void) 681static inline void reserve_ds_buffers(void)
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index dfdab42aed27..7e28d9467bb4 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -132,14 +132,11 @@ static u64 amd_pmu_event_map(int hw_event)
132 return amd_perfmon_event_map[hw_event]; 132 return amd_perfmon_event_map[hw_event];
133} 133}
134 134
135static struct event_constraint *amd_nb_event_constraint;
136
137/* 135/*
138 * Previously calculated offsets 136 * Previously calculated offsets
139 */ 137 */
140static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; 138static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
141static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; 139static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
142static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
143 140
144/* 141/*
145 * Legacy CPUs: 142 * Legacy CPUs:
@@ -147,14 +144,10 @@ static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
147 * 144 *
148 * CPUs with core performance counter extensions: 145 * CPUs with core performance counter extensions:
149 * 6 counters starting at 0xc0010200 each offset by 2 146 * 6 counters starting at 0xc0010200 each offset by 2
150 *
151 * CPUs with north bridge performance counter extensions:
152 * 4 additional counters starting at 0xc0010240 each offset by 2
153 * (indexed right above either one of the above core counters)
154 */ 147 */
155static inline int amd_pmu_addr_offset(int index, bool eventsel) 148static inline int amd_pmu_addr_offset(int index, bool eventsel)
156{ 149{
157 int offset, first, base; 150 int offset;
158 151
159 if (!index) 152 if (!index)
160 return index; 153 return index;
@@ -167,23 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
167 if (offset) 160 if (offset)
168 return offset; 161 return offset;
169 162
170 if (amd_nb_event_constraint && 163 if (!cpu_has_perfctr_core)
171 test_bit(index, amd_nb_event_constraint->idxmsk)) {
172 /*
173 * calculate the offset of NB counters with respect to
174 * base eventsel or perfctr
175 */
176
177 first = find_first_bit(amd_nb_event_constraint->idxmsk,
178 X86_PMC_IDX_MAX);
179
180 if (eventsel)
181 base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel;
182 else
183 base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr;
184
185 offset = base + ((index - first) << 1);
186 } else if (!cpu_has_perfctr_core)
187 offset = index; 164 offset = index;
188 else 165 else
189 offset = index << 1; 166 offset = index << 1;
@@ -196,36 +173,6 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
196 return offset; 173 return offset;
197} 174}
198 175
199static inline int amd_pmu_rdpmc_index(int index)
200{
201 int ret, first;
202
203 if (!index)
204 return index;
205
206 ret = rdpmc_indexes[index];
207
208 if (ret)
209 return ret;
210
211 if (amd_nb_event_constraint &&
212 test_bit(index, amd_nb_event_constraint->idxmsk)) {
213 /*
214 * according to the mnual, ECX value of the NB counters is
215 * the index of the NB counter (0, 1, 2 or 3) plus 6
216 */
217
218 first = find_first_bit(amd_nb_event_constraint->idxmsk,
219 X86_PMC_IDX_MAX);
220 ret = index - first + 6;
221 } else
222 ret = index;
223
224 rdpmc_indexes[index] = ret;
225
226 return ret;
227}
228
229static int amd_core_hw_config(struct perf_event *event) 176static int amd_core_hw_config(struct perf_event *event)
230{ 177{
231 if (event->attr.exclude_host && event->attr.exclude_guest) 178 if (event->attr.exclude_host && event->attr.exclude_guest)
@@ -245,34 +192,6 @@ static int amd_core_hw_config(struct perf_event *event)
245} 192}
246 193
247/* 194/*
248 * NB counters do not support the following event select bits:
249 * Host/Guest only
250 * Counter mask
251 * Invert counter mask
252 * Edge detect
253 * OS/User mode
254 */
255static int amd_nb_hw_config(struct perf_event *event)
256{
257 /* for NB, we only allow system wide counting mode */
258 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
259 return -EINVAL;
260
261 if (event->attr.exclude_user || event->attr.exclude_kernel ||
262 event->attr.exclude_host || event->attr.exclude_guest)
263 return -EINVAL;
264
265 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
266 ARCH_PERFMON_EVENTSEL_OS);
267
268 if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB |
269 ARCH_PERFMON_EVENTSEL_INT))
270 return -EINVAL;
271
272 return 0;
273}
274
275/*
276 * AMD64 events are detected based on their event codes. 195 * AMD64 events are detected based on their event codes.
277 */ 196 */
278static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) 197static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
@@ -285,11 +204,6 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
285 return (hwc->config & 0xe0) == 0xe0; 204 return (hwc->config & 0xe0) == 0xe0;
286} 205}
287 206
288static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
289{
290 return amd_nb_event_constraint && amd_is_nb_event(hwc);
291}
292
293static inline int amd_has_nb(struct cpu_hw_events *cpuc) 207static inline int amd_has_nb(struct cpu_hw_events *cpuc)
294{ 208{
295 struct amd_nb *nb = cpuc->amd_nb; 209 struct amd_nb *nb = cpuc->amd_nb;
@@ -315,9 +229,6 @@ static int amd_pmu_hw_config(struct perf_event *event)
315 if (event->attr.type == PERF_TYPE_RAW) 229 if (event->attr.type == PERF_TYPE_RAW)
316 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; 230 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
317 231
318 if (amd_is_perfctr_nb_event(&event->hw))
319 return amd_nb_hw_config(event);
320
321 return amd_core_hw_config(event); 232 return amd_core_hw_config(event);
322} 233}
323 234
@@ -341,19 +252,6 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
341 } 252 }
342} 253}
343 254
344static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
345{
346 int core_id = cpu_data(smp_processor_id()).cpu_core_id;
347
348 /* deliver interrupts only to this core */
349 if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) {
350 hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE;
351 hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK;
352 hwc->config |= (u64)(core_id) <<
353 AMD64_EVENTSEL_INT_CORE_SEL_SHIFT;
354 }
355}
356
357 /* 255 /*
358 * AMD64 NorthBridge events need special treatment because 256 * AMD64 NorthBridge events need special treatment because
359 * counter access needs to be synchronized across all cores 257 * counter access needs to be synchronized across all cores
@@ -441,9 +339,6 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
441 if (new == -1) 339 if (new == -1)
442 return &emptyconstraint; 340 return &emptyconstraint;
443 341
444 if (amd_is_perfctr_nb_event(hwc))
445 amd_nb_interrupt_hw_config(hwc);
446
447 return &nb->event_constraints[new]; 342 return &nb->event_constraints[new];
448} 343}
449 344
@@ -543,8 +438,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
543 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) 438 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
544 return &unconstrained; 439 return &unconstrained;
545 440
546 return __amd_get_nb_event_constraints(cpuc, event, 441 return __amd_get_nb_event_constraints(cpuc, event, NULL);
547 amd_nb_event_constraint);
548} 442}
549 443
550static void amd_put_event_constraints(struct cpu_hw_events *cpuc, 444static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
@@ -643,9 +537,6 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09,
643static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); 537static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
644static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); 538static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
645 539
646static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0);
647static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0);
648
649static struct event_constraint * 540static struct event_constraint *
650amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) 541amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
651{ 542{
@@ -711,8 +602,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
711 return &amd_f15_PMC20; 602 return &amd_f15_PMC20;
712 } 603 }
713 case AMD_EVENT_NB: 604 case AMD_EVENT_NB:
714 return __amd_get_nb_event_constraints(cpuc, event, 605 /* moved to perf_event_amd_uncore.c */
715 amd_nb_event_constraint); 606 return &emptyconstraint;
716 default: 607 default:
717 return &emptyconstraint; 608 return &emptyconstraint;
718 } 609 }
@@ -738,7 +629,6 @@ static __initconst const struct x86_pmu amd_pmu = {
738 .eventsel = MSR_K7_EVNTSEL0, 629 .eventsel = MSR_K7_EVNTSEL0,
739 .perfctr = MSR_K7_PERFCTR0, 630 .perfctr = MSR_K7_PERFCTR0,
740 .addr_offset = amd_pmu_addr_offset, 631 .addr_offset = amd_pmu_addr_offset,
741 .rdpmc_index = amd_pmu_rdpmc_index,
742 .event_map = amd_pmu_event_map, 632 .event_map = amd_pmu_event_map,
743 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 633 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
744 .num_counters = AMD64_NUM_COUNTERS, 634 .num_counters = AMD64_NUM_COUNTERS,
@@ -790,23 +680,6 @@ static int setup_perfctr_core(void)
790 return 0; 680 return 0;
791} 681}
792 682
793static int setup_perfctr_nb(void)
794{
795 if (!cpu_has_perfctr_nb)
796 return -ENODEV;
797
798 x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB;
799
800 if (cpu_has_perfctr_core)
801 amd_nb_event_constraint = &amd_NBPMC96;
802 else
803 amd_nb_event_constraint = &amd_NBPMC74;
804
805 printk(KERN_INFO "perf: AMD northbridge performance counters detected\n");
806
807 return 0;
808}
809
810__init int amd_pmu_init(void) 683__init int amd_pmu_init(void)
811{ 684{
812 /* Performance-monitoring supported from K7 and later: */ 685 /* Performance-monitoring supported from K7 and later: */
@@ -817,7 +690,6 @@ __init int amd_pmu_init(void)
817 690
818 setup_event_constraints(); 691 setup_event_constraints();
819 setup_perfctr_core(); 692 setup_perfctr_core();
820 setup_perfctr_nb();
821 693
822 /* Events are common for all AMDs */ 694 /* Events are common for all AMDs */
823 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 695 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
new file mode 100644
index 000000000000..c0c661adf03e
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -0,0 +1,547 @@
1/*
2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
3 *
4 * Author: Jacob Shin <jacob.shin@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/perf_event.h>
12#include <linux/percpu.h>
13#include <linux/types.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/cpu.h>
17#include <linux/cpumask.h>
18
19#include <asm/cpufeature.h>
20#include <asm/perf_event.h>
21#include <asm/msr.h>
22
23#define NUM_COUNTERS_NB 4
24#define NUM_COUNTERS_L2 4
25#define MAX_COUNTERS NUM_COUNTERS_NB
26
27#define RDPMC_BASE_NB 6
28#define RDPMC_BASE_L2 10
29
30#define COUNTER_SHIFT 16
31
32struct amd_uncore {
33 int id;
34 int refcnt;
35 int cpu;
36 int num_counters;
37 int rdpmc_base;
38 u32 msr_base;
39 cpumask_t *active_mask;
40 struct pmu *pmu;
41 struct perf_event *events[MAX_COUNTERS];
42 struct amd_uncore *free_when_cpu_online;
43};
44
45static struct amd_uncore * __percpu *amd_uncore_nb;
46static struct amd_uncore * __percpu *amd_uncore_l2;
47
48static struct pmu amd_nb_pmu;
49static struct pmu amd_l2_pmu;
50
51static cpumask_t amd_nb_active_mask;
52static cpumask_t amd_l2_active_mask;
53
54static bool is_nb_event(struct perf_event *event)
55{
56 return event->pmu->type == amd_nb_pmu.type;
57}
58
59static bool is_l2_event(struct perf_event *event)
60{
61 return event->pmu->type == amd_l2_pmu.type;
62}
63
64static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
65{
66 if (is_nb_event(event) && amd_uncore_nb)
67 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
68 else if (is_l2_event(event) && amd_uncore_l2)
69 return *per_cpu_ptr(amd_uncore_l2, event->cpu);
70
71 return NULL;
72}
73
74static void amd_uncore_read(struct perf_event *event)
75{
76 struct hw_perf_event *hwc = &event->hw;
77 u64 prev, new;
78 s64 delta;
79
80 /*
81 * since we do not enable counter overflow interrupts,
82 * we do not have to worry about prev_count changing on us
83 */
84
85 prev = local64_read(&hwc->prev_count);
86 rdpmcl(hwc->event_base_rdpmc, new);
87 local64_set(&hwc->prev_count, new);
88 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
89 delta >>= COUNTER_SHIFT;
90 local64_add(delta, &event->count);
91}
92
93static void amd_uncore_start(struct perf_event *event, int flags)
94{
95 struct hw_perf_event *hwc = &event->hw;
96
97 if (flags & PERF_EF_RELOAD)
98 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
99
100 hwc->state = 0;
101 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
102 perf_event_update_userpage(event);
103}
104
105static void amd_uncore_stop(struct perf_event *event, int flags)
106{
107 struct hw_perf_event *hwc = &event->hw;
108
109 wrmsrl(hwc->config_base, hwc->config);
110 hwc->state |= PERF_HES_STOPPED;
111
112 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
113 amd_uncore_read(event);
114 hwc->state |= PERF_HES_UPTODATE;
115 }
116}
117
118static int amd_uncore_add(struct perf_event *event, int flags)
119{
120 int i;
121 struct amd_uncore *uncore = event_to_amd_uncore(event);
122 struct hw_perf_event *hwc = &event->hw;
123
124 /* are we already assigned? */
125 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
126 goto out;
127
128 for (i = 0; i < uncore->num_counters; i++) {
129 if (uncore->events[i] == event) {
130 hwc->idx = i;
131 goto out;
132 }
133 }
134
135 /* if not, take the first available counter */
136 hwc->idx = -1;
137 for (i = 0; i < uncore->num_counters; i++) {
138 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
139 hwc->idx = i;
140 break;
141 }
142 }
143
144out:
145 if (hwc->idx == -1)
146 return -EBUSY;
147
148 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
149 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
150 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
151 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
152
153 if (flags & PERF_EF_START)
154 amd_uncore_start(event, PERF_EF_RELOAD);
155
156 return 0;
157}
158
159static void amd_uncore_del(struct perf_event *event, int flags)
160{
161 int i;
162 struct amd_uncore *uncore = event_to_amd_uncore(event);
163 struct hw_perf_event *hwc = &event->hw;
164
165 amd_uncore_stop(event, PERF_EF_UPDATE);
166
167 for (i = 0; i < uncore->num_counters; i++) {
168 if (cmpxchg(&uncore->events[i], event, NULL) == event)
169 break;
170 }
171
172 hwc->idx = -1;
173}
174
175static int amd_uncore_event_init(struct perf_event *event)
176{
177 struct amd_uncore *uncore;
178 struct hw_perf_event *hwc = &event->hw;
179
180 if (event->attr.type != event->pmu->type)
181 return -ENOENT;
182
183 /*
184 * NB and L2 counters (MSRs) are shared across all cores that share the
185 * same NB / L2 cache. Interrupts can be directed to a single target
186 * core, however, event counts generated by processes running on other
187 * cores cannot be masked out. So we do not support sampling and
188 * per-thread events.
189 */
190 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
191 return -EINVAL;
192
193 /* NB and L2 counters do not have usr/os/guest/host bits */
194 if (event->attr.exclude_user || event->attr.exclude_kernel ||
195 event->attr.exclude_host || event->attr.exclude_guest)
196 return -EINVAL;
197
198 /* and we do not enable counter overflow interrupts */
199 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
200 hwc->idx = -1;
201
202 if (event->cpu < 0)
203 return -EINVAL;
204
205 uncore = event_to_amd_uncore(event);
206 if (!uncore)
207 return -ENODEV;
208
209 /*
210 * since request can come in to any of the shared cores, we will remap
211 * to a single common cpu.
212 */
213 event->cpu = uncore->cpu;
214
215 return 0;
216}
217
218static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
219 struct device_attribute *attr,
220 char *buf)
221{
222 int n;
223 cpumask_t *active_mask;
224 struct pmu *pmu = dev_get_drvdata(dev);
225
226 if (pmu->type == amd_nb_pmu.type)
227 active_mask = &amd_nb_active_mask;
228 else if (pmu->type == amd_l2_pmu.type)
229 active_mask = &amd_l2_active_mask;
230 else
231 return 0;
232
233 n = cpulist_scnprintf(buf, PAGE_SIZE - 2, active_mask);
234 buf[n++] = '\n';
235 buf[n] = '\0';
236 return n;
237}
238static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
239
240static struct attribute *amd_uncore_attrs[] = {
241 &dev_attr_cpumask.attr,
242 NULL,
243};
244
245static struct attribute_group amd_uncore_attr_group = {
246 .attrs = amd_uncore_attrs,
247};
248
249PMU_FORMAT_ATTR(event, "config:0-7,32-35");
250PMU_FORMAT_ATTR(umask, "config:8-15");
251
252static struct attribute *amd_uncore_format_attr[] = {
253 &format_attr_event.attr,
254 &format_attr_umask.attr,
255 NULL,
256};
257
258static struct attribute_group amd_uncore_format_group = {
259 .name = "format",
260 .attrs = amd_uncore_format_attr,
261};
262
263static const struct attribute_group *amd_uncore_attr_groups[] = {
264 &amd_uncore_attr_group,
265 &amd_uncore_format_group,
266 NULL,
267};
268
269static struct pmu amd_nb_pmu = {
270 .attr_groups = amd_uncore_attr_groups,
271 .name = "amd_nb",
272 .event_init = amd_uncore_event_init,
273 .add = amd_uncore_add,
274 .del = amd_uncore_del,
275 .start = amd_uncore_start,
276 .stop = amd_uncore_stop,
277 .read = amd_uncore_read,
278};
279
280static struct pmu amd_l2_pmu = {
281 .attr_groups = amd_uncore_attr_groups,
282 .name = "amd_l2",
283 .event_init = amd_uncore_event_init,
284 .add = amd_uncore_add,
285 .del = amd_uncore_del,
286 .start = amd_uncore_start,
287 .stop = amd_uncore_stop,
288 .read = amd_uncore_read,
289};
290
291static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu)
292{
293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
294 cpu_to_node(cpu));
295}
296
297static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
298{
299 struct amd_uncore *uncore;
300
301 if (amd_uncore_nb) {
302 uncore = amd_uncore_alloc(cpu);
303 uncore->cpu = cpu;
304 uncore->num_counters = NUM_COUNTERS_NB;
305 uncore->rdpmc_base = RDPMC_BASE_NB;
306 uncore->msr_base = MSR_F15H_NB_PERF_CTL;
307 uncore->active_mask = &amd_nb_active_mask;
308 uncore->pmu = &amd_nb_pmu;
309 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
310 }
311
312 if (amd_uncore_l2) {
313 uncore = amd_uncore_alloc(cpu);
314 uncore->cpu = cpu;
315 uncore->num_counters = NUM_COUNTERS_L2;
316 uncore->rdpmc_base = RDPMC_BASE_L2;
317 uncore->msr_base = MSR_F16H_L2I_PERF_CTL;
318 uncore->active_mask = &amd_l2_active_mask;
319 uncore->pmu = &amd_l2_pmu;
320 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
321 }
322}
323
324static struct amd_uncore *
325__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
326 struct amd_uncore * __percpu *uncores)
327{
328 unsigned int cpu;
329 struct amd_uncore *that;
330
331 for_each_online_cpu(cpu) {
332 that = *per_cpu_ptr(uncores, cpu);
333
334 if (!that)
335 continue;
336
337 if (this == that)
338 continue;
339
340 if (this->id == that->id) {
341 that->free_when_cpu_online = this;
342 this = that;
343 break;
344 }
345 }
346
347 this->refcnt++;
348 return this;
349}
350
351static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
352{
353 unsigned int eax, ebx, ecx, edx;
354 struct amd_uncore *uncore;
355
356 if (amd_uncore_nb) {
357 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
358 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
359 uncore->id = ecx & 0xff;
360
361 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
362 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
363 }
364
365 if (amd_uncore_l2) {
366 unsigned int apicid = cpu_data(cpu).apicid;
367 unsigned int nshared;
368
369 uncore = *per_cpu_ptr(amd_uncore_l2, cpu);
370 cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
371 nshared = ((eax >> 14) & 0xfff) + 1;
372 uncore->id = apicid - (apicid % nshared);
373
374 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
375 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
376 }
377}
378
379static void __cpuinit uncore_online(unsigned int cpu,
380 struct amd_uncore * __percpu *uncores)
381{
382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
383
384 kfree(uncore->free_when_cpu_online);
385 uncore->free_when_cpu_online = NULL;
386
387 if (cpu == uncore->cpu)
388 cpumask_set_cpu(cpu, uncore->active_mask);
389}
390
391static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
392{
393 if (amd_uncore_nb)
394 uncore_online(cpu, amd_uncore_nb);
395
396 if (amd_uncore_l2)
397 uncore_online(cpu, amd_uncore_l2);
398}
399
400static void __cpuinit uncore_down_prepare(unsigned int cpu,
401 struct amd_uncore * __percpu *uncores)
402{
403 unsigned int i;
404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
405
406 if (this->cpu != cpu)
407 return;
408
409 /* this cpu is going down, migrate to a shared sibling if possible */
410 for_each_online_cpu(i) {
411 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
412
413 if (cpu == i)
414 continue;
415
416 if (this == that) {
417 perf_pmu_migrate_context(this->pmu, cpu, i);
418 cpumask_clear_cpu(cpu, that->active_mask);
419 cpumask_set_cpu(i, that->active_mask);
420 that->cpu = i;
421 break;
422 }
423 }
424}
425
426static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
427{
428 if (amd_uncore_nb)
429 uncore_down_prepare(cpu, amd_uncore_nb);
430
431 if (amd_uncore_l2)
432 uncore_down_prepare(cpu, amd_uncore_l2);
433}
434
435static void __cpuinit uncore_dead(unsigned int cpu,
436 struct amd_uncore * __percpu *uncores)
437{
438 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
439
440 if (cpu == uncore->cpu)
441 cpumask_clear_cpu(cpu, uncore->active_mask);
442
443 if (!--uncore->refcnt)
444 kfree(uncore);
445 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
446}
447
448static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
449{
450 if (amd_uncore_nb)
451 uncore_dead(cpu, amd_uncore_nb);
452
453 if (amd_uncore_l2)
454 uncore_dead(cpu, amd_uncore_l2);
455}
456
457static int __cpuinit
458amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
459 void *hcpu)
460{
461 unsigned int cpu = (long)hcpu;
462
463 switch (action & ~CPU_TASKS_FROZEN) {
464 case CPU_UP_PREPARE:
465 amd_uncore_cpu_up_prepare(cpu);
466 break;
467
468 case CPU_STARTING:
469 amd_uncore_cpu_starting(cpu);
470 break;
471
472 case CPU_ONLINE:
473 amd_uncore_cpu_online(cpu);
474 break;
475
476 case CPU_DOWN_PREPARE:
477 amd_uncore_cpu_down_prepare(cpu);
478 break;
479
480 case CPU_UP_CANCELED:
481 case CPU_DEAD:
482 amd_uncore_cpu_dead(cpu);
483 break;
484
485 default:
486 break;
487 }
488
489 return NOTIFY_OK;
490}
491
492static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = {
493 .notifier_call = amd_uncore_cpu_notifier,
494 .priority = CPU_PRI_PERF + 1,
495};
496
497static void __init init_cpu_already_online(void *dummy)
498{
499 unsigned int cpu = smp_processor_id();
500
501 amd_uncore_cpu_starting(cpu);
502 amd_uncore_cpu_online(cpu);
503}
504
505static int __init amd_uncore_init(void)
506{
507 unsigned int cpu;
508 int ret = -ENODEV;
509
510 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
511 return -ENODEV;
512
513 if (!cpu_has_topoext)
514 return -ENODEV;
515
516 if (cpu_has_perfctr_nb) {
517 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
518 perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
519
520 printk(KERN_INFO "perf: AMD NB counters detected\n");
521 ret = 0;
522 }
523
524 if (cpu_has_perfctr_l2) {
525 amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
526 perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
527
528 printk(KERN_INFO "perf: AMD L2I counters detected\n");
529 ret = 0;
530 }
531
532 if (ret)
533 return -ENODEV;
534
535 get_online_cpus();
536 /* init cpus already online before registering for hotplug notifier */
537 for_each_online_cpu(cpu) {
538 amd_uncore_cpu_up_prepare(cpu);
539 smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
540 }
541
542 register_cpu_notifier(&amd_uncore_cpu_notifier_block);
543 put_online_cpus();
544
545 return 0;
546}
547device_initcall(amd_uncore_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 529c8931fc02..ffd6050a1de4 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -81,6 +81,7 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
81static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = 81static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
82{ 82{
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
84 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
84 EVENT_EXTRA_END 85 EVENT_EXTRA_END
85}; 86};
86 87
@@ -101,9 +102,15 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 102 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 103 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 104 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
105 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
106 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
108 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
104 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ 109 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
105 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 110 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
106 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 111 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
112 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
113 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
107 EVENT_CONSTRAINT_END 114 EVENT_CONSTRAINT_END
108}; 115};
109 116
@@ -132,6 +139,7 @@ static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
132{ 139{
133 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 140 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
134 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), 141 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
142 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
135 EVENT_EXTRA_END 143 EVENT_EXTRA_END
136}; 144};
137 145
@@ -149,11 +157,34 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
149}; 157};
150 158
151static struct extra_reg intel_snb_extra_regs[] __read_mostly = { 159static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
152 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 160 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
153 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 161 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
162 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
163 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
154 EVENT_EXTRA_END 164 EVENT_EXTRA_END
155}; 165};
156 166
167static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
168 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
169 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
170 EVENT_EXTRA_END
171};
172
173EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
174EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
175EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
176
177struct attribute *nhm_events_attrs[] = {
178 EVENT_PTR(mem_ld_nhm),
179 NULL,
180};
181
182struct attribute *snb_events_attrs[] = {
183 EVENT_PTR(mem_ld_snb),
184 EVENT_PTR(mem_st_snb),
185 NULL,
186};
187
157static u64 intel_pmu_event_map(int hw_event) 188static u64 intel_pmu_event_map(int hw_event)
158{ 189{
159 return intel_perfmon_event_map[hw_event]; 190 return intel_perfmon_event_map[hw_event];
@@ -1388,8 +1419,11 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1388 1419
1389 if (x86_pmu.event_constraints) { 1420 if (x86_pmu.event_constraints) {
1390 for_each_event_constraint(c, x86_pmu.event_constraints) { 1421 for_each_event_constraint(c, x86_pmu.event_constraints) {
1391 if ((event->hw.config & c->cmask) == c->code) 1422 if ((event->hw.config & c->cmask) == c->code) {
1423 /* hw.flags zeroed at initialization */
1424 event->hw.flags |= c->flags;
1392 return c; 1425 return c;
1426 }
1393 } 1427 }
1394 } 1428 }
1395 1429
@@ -1434,6 +1468,7 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1434static void intel_put_event_constraints(struct cpu_hw_events *cpuc, 1468static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1435 struct perf_event *event) 1469 struct perf_event *event)
1436{ 1470{
1471 event->hw.flags = 0;
1437 intel_put_shared_regs_event_constraints(cpuc, event); 1472 intel_put_shared_regs_event_constraints(cpuc, event);
1438} 1473}
1439 1474
@@ -1757,6 +1792,8 @@ static void intel_pmu_flush_branch_stack(void)
1757 1792
1758PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 1793PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
1759 1794
1795PMU_FORMAT_ATTR(ldlat, "config1:0-15");
1796
1760static struct attribute *intel_arch3_formats_attr[] = { 1797static struct attribute *intel_arch3_formats_attr[] = {
1761 &format_attr_event.attr, 1798 &format_attr_event.attr,
1762 &format_attr_umask.attr, 1799 &format_attr_umask.attr,
@@ -1767,6 +1804,7 @@ static struct attribute *intel_arch3_formats_attr[] = {
1767 &format_attr_cmask.attr, 1804 &format_attr_cmask.attr,
1768 1805
1769 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */ 1806 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
1807 &format_attr_ldlat.attr, /* PEBS load latency */
1770 NULL, 1808 NULL,
1771}; 1809};
1772 1810
@@ -2027,6 +2065,8 @@ __init int intel_pmu_init(void)
2027 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 2065 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2028 x86_pmu.extra_regs = intel_nehalem_extra_regs; 2066 x86_pmu.extra_regs = intel_nehalem_extra_regs;
2029 2067
2068 x86_pmu.cpu_events = nhm_events_attrs;
2069
2030 /* UOPS_ISSUED.STALLED_CYCLES */ 2070 /* UOPS_ISSUED.STALLED_CYCLES */
2031 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 2071 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2032 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 2072 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
@@ -2070,6 +2110,8 @@ __init int intel_pmu_init(void)
2070 x86_pmu.extra_regs = intel_westmere_extra_regs; 2110 x86_pmu.extra_regs = intel_westmere_extra_regs;
2071 x86_pmu.er_flags |= ERF_HAS_RSP_1; 2111 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2072 2112
2113 x86_pmu.cpu_events = nhm_events_attrs;
2114
2073 /* UOPS_ISSUED.STALLED_CYCLES */ 2115 /* UOPS_ISSUED.STALLED_CYCLES */
2074 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 2116 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2075 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 2117 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
@@ -2093,11 +2135,16 @@ __init int intel_pmu_init(void)
2093 x86_pmu.event_constraints = intel_snb_event_constraints; 2135 x86_pmu.event_constraints = intel_snb_event_constraints;
2094 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 2136 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
2095 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 2137 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2096 x86_pmu.extra_regs = intel_snb_extra_regs; 2138 if (boot_cpu_data.x86_model == 45)
2139 x86_pmu.extra_regs = intel_snbep_extra_regs;
2140 else
2141 x86_pmu.extra_regs = intel_snb_extra_regs;
2097 /* all extra regs are per-cpu when HT is on */ 2142 /* all extra regs are per-cpu when HT is on */
2098 x86_pmu.er_flags |= ERF_HAS_RSP_1; 2143 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2099 x86_pmu.er_flags |= ERF_NO_HT_SHARING; 2144 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2100 2145
2146 x86_pmu.cpu_events = snb_events_attrs;
2147
2101 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 2148 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2102 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 2149 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2103 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 2150 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
@@ -2119,11 +2166,16 @@ __init int intel_pmu_init(void)
2119 x86_pmu.event_constraints = intel_ivb_event_constraints; 2166 x86_pmu.event_constraints = intel_ivb_event_constraints;
2120 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; 2167 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2121 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 2168 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2122 x86_pmu.extra_regs = intel_snb_extra_regs; 2169 if (boot_cpu_data.x86_model == 62)
2170 x86_pmu.extra_regs = intel_snbep_extra_regs;
2171 else
2172 x86_pmu.extra_regs = intel_snb_extra_regs;
2123 /* all extra regs are per-cpu when HT is on */ 2173 /* all extra regs are per-cpu when HT is on */
2124 x86_pmu.er_flags |= ERF_HAS_RSP_1; 2174 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2125 x86_pmu.er_flags |= ERF_NO_HT_SHARING; 2175 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2126 2176
2177 x86_pmu.cpu_events = snb_events_attrs;
2178
2127 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ 2179 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2128 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 2180 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2129 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); 2181 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 826054a4f2ee..60250f687052 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -24,6 +24,130 @@ struct pebs_record_32 {
24 24
25 */ 25 */
26 26
27union intel_x86_pebs_dse {
28 u64 val;
29 struct {
30 unsigned int ld_dse:4;
31 unsigned int ld_stlb_miss:1;
32 unsigned int ld_locked:1;
33 unsigned int ld_reserved:26;
34 };
35 struct {
36 unsigned int st_l1d_hit:1;
37 unsigned int st_reserved1:3;
38 unsigned int st_stlb_miss:1;
39 unsigned int st_locked:1;
40 unsigned int st_reserved2:26;
41 };
42};
43
44
45/*
46 * Map PEBS Load Latency Data Source encodings to generic
47 * memory data source information
48 */
49#define P(a, b) PERF_MEM_S(a, b)
50#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
51#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
52
53static const u64 pebs_data_source[] = {
54 P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
55 OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */
56 OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
57 OP_LH | P(LVL, L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
58 OP_LH | P(LVL, L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
59 OP_LH | P(LVL, L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
60 OP_LH | P(LVL, L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
61 OP_LH | P(LVL, L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
62 OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
63 OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
64 OP_LH | P(LVL, LOC_RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
65 OP_LH | P(LVL, REM_RAM1) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
66 OP_LH | P(LVL, LOC_RAM) | SNOOP_NONE_MISS,/* 0x0c: L3 miss, excl */
67 OP_LH | P(LVL, REM_RAM1) | SNOOP_NONE_MISS,/* 0x0d: L3 miss, excl */
68 OP_LH | P(LVL, IO) | P(SNOOP, NONE), /* 0x0e: I/O */
69 OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
70};
71
72static u64 precise_store_data(u64 status)
73{
74 union intel_x86_pebs_dse dse;
75 u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
76
77 dse.val = status;
78
79 /*
80 * bit 4: TLB access
81 * 1 = stored missed 2nd level TLB
82 *
83 * so it either hit the walker or the OS
84 * otherwise hit 2nd level TLB
85 */
86 if (dse.st_stlb_miss)
87 val |= P(TLB, MISS);
88 else
89 val |= P(TLB, HIT);
90
91 /*
92 * bit 0: hit L1 data cache
93 * if not set, then all we know is that
94 * it missed L1D
95 */
96 if (dse.st_l1d_hit)
97 val |= P(LVL, HIT);
98 else
99 val |= P(LVL, MISS);
100
101 /*
102 * bit 5: Locked prefix
103 */
104 if (dse.st_locked)
105 val |= P(LOCK, LOCKED);
106
107 return val;
108}
109
110static u64 load_latency_data(u64 status)
111{
112 union intel_x86_pebs_dse dse;
113 u64 val;
114 int model = boot_cpu_data.x86_model;
115 int fam = boot_cpu_data.x86;
116
117 dse.val = status;
118
119 /*
120 * use the mapping table for bit 0-3
121 */
122 val = pebs_data_source[dse.ld_dse];
123
124 /*
125 * Nehalem models do not support TLB, Lock infos
126 */
127 if (fam == 0x6 && (model == 26 || model == 30
128 || model == 31 || model == 46)) {
129 val |= P(TLB, NA) | P(LOCK, NA);
130 return val;
131 }
132 /*
133 * bit 4: TLB access
134 * 0 = did not miss 2nd level TLB
135 * 1 = missed 2nd level TLB
136 */
137 if (dse.ld_stlb_miss)
138 val |= P(TLB, MISS) | P(TLB, L2);
139 else
140 val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
141
142 /*
143 * bit 5: locked prefix
144 */
145 if (dse.ld_locked)
146 val |= P(LOCK, LOCKED);
147
148 return val;
149}
150
27struct pebs_record_core { 151struct pebs_record_core {
28 u64 flags, ip; 152 u64 flags, ip;
29 u64 ax, bx, cx, dx; 153 u64 ax, bx, cx, dx;
@@ -314,10 +438,11 @@ int intel_pmu_drain_bts_buffer(void)
314 if (top <= at) 438 if (top <= at)
315 return 0; 439 return 0;
316 440
441 memset(&regs, 0, sizeof(regs));
442
317 ds->bts_index = ds->bts_buffer_base; 443 ds->bts_index = ds->bts_buffer_base;
318 444
319 perf_sample_data_init(&data, 0, event->hw.last_period); 445 perf_sample_data_init(&data, 0, event->hw.last_period);
320 regs.ip = 0;
321 446
322 /* 447 /*
323 * Prepare a generic sample, i.e. fill in the invariant fields. 448 * Prepare a generic sample, i.e. fill in the invariant fields.
@@ -364,7 +489,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
364}; 489};
365 490
366struct event_constraint intel_nehalem_pebs_event_constraints[] = { 491struct event_constraint intel_nehalem_pebs_event_constraints[] = {
367 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ 492 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
368 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 493 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
369 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 494 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
370 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */ 495 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
@@ -379,7 +504,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
379}; 504};
380 505
381struct event_constraint intel_westmere_pebs_event_constraints[] = { 506struct event_constraint intel_westmere_pebs_event_constraints[] = {
382 INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */ 507 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
383 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 508 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
384 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */ 509 INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
385 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */ 510 INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
@@ -399,7 +524,8 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
399 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ 524 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
400 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ 525 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
401 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ 526 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
402 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ 527 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
528 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
403 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 529 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
404 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 530 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
405 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 531 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -413,7 +539,8 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
413 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ 539 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
414 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ 540 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
415 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ 541 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
416 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ 542 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
543 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
417 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 544 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
418 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 545 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
419 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 546 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -430,8 +557,10 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
430 557
431 if (x86_pmu.pebs_constraints) { 558 if (x86_pmu.pebs_constraints) {
432 for_each_event_constraint(c, x86_pmu.pebs_constraints) { 559 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
433 if ((event->hw.config & c->cmask) == c->code) 560 if ((event->hw.config & c->cmask) == c->code) {
561 event->hw.flags |= c->flags;
434 return c; 562 return c;
563 }
435 } 564 }
436 } 565 }
437 566
@@ -446,6 +575,11 @@ void intel_pmu_pebs_enable(struct perf_event *event)
446 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; 575 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
447 576
448 cpuc->pebs_enabled |= 1ULL << hwc->idx; 577 cpuc->pebs_enabled |= 1ULL << hwc->idx;
578
579 if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
580 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
581 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
582 cpuc->pebs_enabled |= 1ULL << 63;
449} 583}
450 584
451void intel_pmu_pebs_disable(struct perf_event *event) 585void intel_pmu_pebs_disable(struct perf_event *event)
@@ -558,20 +692,51 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
558 struct pt_regs *iregs, void *__pebs) 692 struct pt_regs *iregs, void *__pebs)
559{ 693{
560 /* 694 /*
561 * We cast to pebs_record_core since that is a subset of 695 * We cast to pebs_record_nhm to get the load latency data
562 * both formats and we don't use the other fields in this 696 * if extra_reg MSR_PEBS_LD_LAT_THRESHOLD used
563 * routine.
564 */ 697 */
565 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 698 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
566 struct pebs_record_core *pebs = __pebs; 699 struct pebs_record_nhm *pebs = __pebs;
567 struct perf_sample_data data; 700 struct perf_sample_data data;
568 struct pt_regs regs; 701 struct pt_regs regs;
702 u64 sample_type;
703 int fll, fst;
569 704
570 if (!intel_pmu_save_and_restart(event)) 705 if (!intel_pmu_save_and_restart(event))
571 return; 706 return;
572 707
708 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
709 fst = event->hw.flags & PERF_X86_EVENT_PEBS_ST;
710
573 perf_sample_data_init(&data, 0, event->hw.last_period); 711 perf_sample_data_init(&data, 0, event->hw.last_period);
574 712
713 data.period = event->hw.last_period;
714 sample_type = event->attr.sample_type;
715
716 /*
717 * if PEBS-LL or PreciseStore
718 */
719 if (fll || fst) {
720 if (sample_type & PERF_SAMPLE_ADDR)
721 data.addr = pebs->dla;
722
723 /*
724 * Use latency for weight (only avail with PEBS-LL)
725 */
726 if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
727 data.weight = pebs->lat;
728
729 /*
730 * data.data_src encodes the data source
731 */
732 if (sample_type & PERF_SAMPLE_DATA_SRC) {
733 if (fll)
734 data.data_src.val = load_latency_data(pebs->dse);
735 else
736 data.data_src.val = precise_store_data(pebs->dse);
737 }
738 }
739
575 /* 740 /*
576 * We use the interrupt regs as a base because the PEBS record 741 * We use the interrupt regs as a base because the PEBS record
577 * does not contain a full regs set, specifically it seems to 742 * does not contain a full regs set, specifically it seems to
@@ -729,3 +894,13 @@ void intel_ds_init(void)
729 } 894 }
730 } 895 }
731} 896}
897
898void perf_restore_debug_store(void)
899{
900 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
901
902 if (!x86_pmu.bts && !x86_pmu.pebs)
903 return;
904
905 wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
906}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index b43200dbfe7e..d0f9e5aa2151 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -17,6 +17,9 @@ static struct event_constraint constraint_fixed =
17static struct event_constraint constraint_empty = 17static struct event_constraint constraint_empty =
18 EVENT_CONSTRAINT(0, 0, 0); 18 EVENT_CONSTRAINT(0, 0, 0);
19 19
20#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
21 ((1ULL << (n)) - 1)))
22
20DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 23DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
21DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); 24DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
22DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 25DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
@@ -31,9 +34,13 @@ DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
31DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); 34DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
32DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); 35DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
33DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); 36DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
37DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
34DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); 38DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
39DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
35DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); 40DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
41DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
36DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); 42DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
43DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
37DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); 44DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
38DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); 45DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
39DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); 46DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
@@ -110,6 +117,21 @@ static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_even
110 reg1->alloc = 0; 117 reg1->alloc = 0;
111} 118}
112 119
120static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
121{
122 struct intel_uncore_extra_reg *er;
123 unsigned long flags;
124 u64 config;
125
126 er = &box->shared_regs[idx];
127
128 raw_spin_lock_irqsave(&er->lock, flags);
129 config = er->config;
130 raw_spin_unlock_irqrestore(&er->lock, flags);
131
132 return config;
133}
134
113/* Sandy Bridge-EP uncore support */ 135/* Sandy Bridge-EP uncore support */
114static struct intel_uncore_type snbep_uncore_cbox; 136static struct intel_uncore_type snbep_uncore_cbox;
115static struct intel_uncore_type snbep_uncore_pcu; 137static struct intel_uncore_type snbep_uncore_pcu;
@@ -205,7 +227,7 @@ static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
205 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 227 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
206 228
207 if (reg1->idx != EXTRA_REG_NONE) 229 if (reg1->idx != EXTRA_REG_NONE)
208 wrmsrl(reg1->reg, reg1->config); 230 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
209 231
210 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 232 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
211} 233}
@@ -226,29 +248,6 @@ static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
226 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT); 248 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
227} 249}
228 250
229static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
230{
231 struct hw_perf_event *hwc = &event->hw;
232 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
233
234 if (box->pmu->type == &snbep_uncore_cbox) {
235 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
236 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
237 reg1->config = event->attr.config1 &
238 SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
239 } else {
240 if (box->pmu->type == &snbep_uncore_pcu) {
241 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
242 reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
243 } else {
244 return 0;
245 }
246 }
247 reg1->idx = 0;
248
249 return 0;
250}
251
252static struct attribute *snbep_uncore_formats_attr[] = { 251static struct attribute *snbep_uncore_formats_attr[] = {
253 &format_attr_event.attr, 252 &format_attr_event.attr,
254 &format_attr_umask.attr, 253 &format_attr_umask.attr,
@@ -345,16 +344,16 @@ static struct attribute_group snbep_uncore_qpi_format_group = {
345 .attrs = snbep_uncore_qpi_formats_attr, 344 .attrs = snbep_uncore_qpi_formats_attr,
346}; 345};
347 346
347#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
348 .init_box = snbep_uncore_msr_init_box, \
349 .disable_box = snbep_uncore_msr_disable_box, \
350 .enable_box = snbep_uncore_msr_enable_box, \
351 .disable_event = snbep_uncore_msr_disable_event, \
352 .enable_event = snbep_uncore_msr_enable_event, \
353 .read_counter = uncore_msr_read_counter
354
348static struct intel_uncore_ops snbep_uncore_msr_ops = { 355static struct intel_uncore_ops snbep_uncore_msr_ops = {
349 .init_box = snbep_uncore_msr_init_box, 356 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
350 .disable_box = snbep_uncore_msr_disable_box,
351 .enable_box = snbep_uncore_msr_enable_box,
352 .disable_event = snbep_uncore_msr_disable_event,
353 .enable_event = snbep_uncore_msr_enable_event,
354 .read_counter = uncore_msr_read_counter,
355 .get_constraint = uncore_get_constraint,
356 .put_constraint = uncore_put_constraint,
357 .hw_config = snbep_uncore_hw_config,
358}; 357};
359 358
360static struct intel_uncore_ops snbep_uncore_pci_ops = { 359static struct intel_uncore_ops snbep_uncore_pci_ops = {
@@ -372,6 +371,7 @@ static struct event_constraint snbep_uncore_cbox_constraints[] = {
372 UNCORE_EVENT_CONSTRAINT(0x04, 0x3), 371 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
373 UNCORE_EVENT_CONSTRAINT(0x05, 0x3), 372 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
374 UNCORE_EVENT_CONSTRAINT(0x07, 0x3), 373 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
374 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
375 UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 375 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
376 UNCORE_EVENT_CONSTRAINT(0x12, 0x3), 376 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
377 UNCORE_EVENT_CONSTRAINT(0x13, 0x3), 377 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
@@ -421,6 +421,14 @@ static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
421 UNCORE_EVENT_CONSTRAINT(0x24, 0x3), 421 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
422 UNCORE_EVENT_CONSTRAINT(0x25, 0x3), 422 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
423 UNCORE_EVENT_CONSTRAINT(0x26, 0x3), 423 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
424 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
427 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
428 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
429 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
430 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
431 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
424 UNCORE_EVENT_CONSTRAINT(0x30, 0x3), 432 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
425 UNCORE_EVENT_CONSTRAINT(0x31, 0x3), 433 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
426 UNCORE_EVENT_CONSTRAINT(0x32, 0x3), 434 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
@@ -428,6 +436,8 @@ static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
428 UNCORE_EVENT_CONSTRAINT(0x34, 0x3), 436 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
429 UNCORE_EVENT_CONSTRAINT(0x36, 0x3), 437 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
430 UNCORE_EVENT_CONSTRAINT(0x37, 0x3), 438 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
439 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
440 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
431 EVENT_CONSTRAINT_END 441 EVENT_CONSTRAINT_END
432}; 442};
433 443
@@ -446,6 +456,145 @@ static struct intel_uncore_type snbep_uncore_ubox = {
446 .format_group = &snbep_uncore_ubox_format_group, 456 .format_group = &snbep_uncore_ubox_format_group,
447}; 457};
448 458
459static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
460 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
461 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
462 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
463 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
464 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
465 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
466 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
467 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
468 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
469 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
470 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
471 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
472 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
473 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
474 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
475 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
476 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
477 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
478 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
479 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
480 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
481 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
482 EVENT_EXTRA_END
483};
484
485static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
486{
487 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
488 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
489 int i;
490
491 if (uncore_box_is_fake(box))
492 return;
493
494 for (i = 0; i < 5; i++) {
495 if (reg1->alloc & (0x1 << i))
496 atomic_sub(1 << (i * 6), &er->ref);
497 }
498 reg1->alloc = 0;
499}
500
501static struct event_constraint *
502__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
503 u64 (*cbox_filter_mask)(int fields))
504{
505 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
506 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
507 int i, alloc = 0;
508 unsigned long flags;
509 u64 mask;
510
511 if (reg1->idx == EXTRA_REG_NONE)
512 return NULL;
513
514 raw_spin_lock_irqsave(&er->lock, flags);
515 for (i = 0; i < 5; i++) {
516 if (!(reg1->idx & (0x1 << i)))
517 continue;
518 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
519 continue;
520
521 mask = cbox_filter_mask(0x1 << i);
522 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
523 !((reg1->config ^ er->config) & mask)) {
524 atomic_add(1 << (i * 6), &er->ref);
525 er->config &= ~mask;
526 er->config |= reg1->config & mask;
527 alloc |= (0x1 << i);
528 } else {
529 break;
530 }
531 }
532 raw_spin_unlock_irqrestore(&er->lock, flags);
533 if (i < 5)
534 goto fail;
535
536 if (!uncore_box_is_fake(box))
537 reg1->alloc |= alloc;
538
539 return 0;
540fail:
541 for (; i >= 0; i--) {
542 if (alloc & (0x1 << i))
543 atomic_sub(1 << (i * 6), &er->ref);
544 }
545 return &constraint_empty;
546}
547
548static u64 snbep_cbox_filter_mask(int fields)
549{
550 u64 mask = 0;
551
552 if (fields & 0x1)
553 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
554 if (fields & 0x2)
555 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
556 if (fields & 0x4)
557 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
558 if (fields & 0x8)
559 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
560
561 return mask;
562}
563
564static struct event_constraint *
565snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
566{
567 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
568}
569
570static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
571{
572 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
573 struct extra_reg *er;
574 int idx = 0;
575
576 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
577 if (er->event != (event->hw.config & er->config_mask))
578 continue;
579 idx |= er->idx;
580 }
581
582 if (idx) {
583 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
584 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
585 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
586 reg1->idx = idx;
587 }
588 return 0;
589}
590
591static struct intel_uncore_ops snbep_uncore_cbox_ops = {
592 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
593 .hw_config = snbep_cbox_hw_config,
594 .get_constraint = snbep_cbox_get_constraint,
595 .put_constraint = snbep_cbox_put_constraint,
596};
597
449static struct intel_uncore_type snbep_uncore_cbox = { 598static struct intel_uncore_type snbep_uncore_cbox = {
450 .name = "cbox", 599 .name = "cbox",
451 .num_counters = 4, 600 .num_counters = 4,
@@ -458,10 +607,104 @@ static struct intel_uncore_type snbep_uncore_cbox = {
458 .msr_offset = SNBEP_CBO_MSR_OFFSET, 607 .msr_offset = SNBEP_CBO_MSR_OFFSET,
459 .num_shared_regs = 1, 608 .num_shared_regs = 1,
460 .constraints = snbep_uncore_cbox_constraints, 609 .constraints = snbep_uncore_cbox_constraints,
461 .ops = &snbep_uncore_msr_ops, 610 .ops = &snbep_uncore_cbox_ops,
462 .format_group = &snbep_uncore_cbox_format_group, 611 .format_group = &snbep_uncore_cbox_format_group,
463}; 612};
464 613
614static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
615{
616 struct hw_perf_event *hwc = &event->hw;
617 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
618 u64 config = reg1->config;
619
620 if (new_idx > reg1->idx)
621 config <<= 8 * (new_idx - reg1->idx);
622 else
623 config >>= 8 * (reg1->idx - new_idx);
624
625 if (modify) {
626 hwc->config += new_idx - reg1->idx;
627 reg1->config = config;
628 reg1->idx = new_idx;
629 }
630 return config;
631}
632
633static struct event_constraint *
634snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
635{
636 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
637 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
638 unsigned long flags;
639 int idx = reg1->idx;
640 u64 mask, config1 = reg1->config;
641 bool ok = false;
642
643 if (reg1->idx == EXTRA_REG_NONE ||
644 (!uncore_box_is_fake(box) && reg1->alloc))
645 return NULL;
646again:
647 mask = 0xff << (idx * 8);
648 raw_spin_lock_irqsave(&er->lock, flags);
649 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
650 !((config1 ^ er->config) & mask)) {
651 atomic_add(1 << (idx * 8), &er->ref);
652 er->config &= ~mask;
653 er->config |= config1 & mask;
654 ok = true;
655 }
656 raw_spin_unlock_irqrestore(&er->lock, flags);
657
658 if (!ok) {
659 idx = (idx + 1) % 4;
660 if (idx != reg1->idx) {
661 config1 = snbep_pcu_alter_er(event, idx, false);
662 goto again;
663 }
664 return &constraint_empty;
665 }
666
667 if (!uncore_box_is_fake(box)) {
668 if (idx != reg1->idx)
669 snbep_pcu_alter_er(event, idx, true);
670 reg1->alloc = 1;
671 }
672 return NULL;
673}
674
675static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
676{
677 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
678 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
679
680 if (uncore_box_is_fake(box) || !reg1->alloc)
681 return;
682
683 atomic_sub(1 << (reg1->idx * 8), &er->ref);
684 reg1->alloc = 0;
685}
686
687static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
688{
689 struct hw_perf_event *hwc = &event->hw;
690 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
691 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
692
693 if (ev_sel >= 0xb && ev_sel <= 0xe) {
694 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
695 reg1->idx = ev_sel - 0xb;
696 reg1->config = event->attr.config1 & (0xff << reg1->idx);
697 }
698 return 0;
699}
700
701static struct intel_uncore_ops snbep_uncore_pcu_ops = {
702 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
703 .hw_config = snbep_pcu_hw_config,
704 .get_constraint = snbep_pcu_get_constraint,
705 .put_constraint = snbep_pcu_put_constraint,
706};
707
465static struct intel_uncore_type snbep_uncore_pcu = { 708static struct intel_uncore_type snbep_uncore_pcu = {
466 .name = "pcu", 709 .name = "pcu",
467 .num_counters = 4, 710 .num_counters = 4,
@@ -472,7 +715,7 @@ static struct intel_uncore_type snbep_uncore_pcu = {
472 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 715 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
473 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL, 716 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
474 .num_shared_regs = 1, 717 .num_shared_regs = 1,
475 .ops = &snbep_uncore_msr_ops, 718 .ops = &snbep_uncore_pcu_ops,
476 .format_group = &snbep_uncore_pcu_format_group, 719 .format_group = &snbep_uncore_pcu_format_group,
477}; 720};
478 721
@@ -544,55 +787,63 @@ static struct intel_uncore_type snbep_uncore_r3qpi = {
544 SNBEP_UNCORE_PCI_COMMON_INIT(), 787 SNBEP_UNCORE_PCI_COMMON_INIT(),
545}; 788};
546 789
790enum {
791 SNBEP_PCI_UNCORE_HA,
792 SNBEP_PCI_UNCORE_IMC,
793 SNBEP_PCI_UNCORE_QPI,
794 SNBEP_PCI_UNCORE_R2PCIE,
795 SNBEP_PCI_UNCORE_R3QPI,
796};
797
547static struct intel_uncore_type *snbep_pci_uncores[] = { 798static struct intel_uncore_type *snbep_pci_uncores[] = {
548 &snbep_uncore_ha, 799 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
549 &snbep_uncore_imc, 800 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
550 &snbep_uncore_qpi, 801 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
551 &snbep_uncore_r2pcie, 802 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
552 &snbep_uncore_r3qpi, 803 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
553 NULL, 804 NULL,
554}; 805};
555 806
556static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = { 807static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
557 { /* Home Agent */ 808 { /* Home Agent */
558 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), 809 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
559 .driver_data = (unsigned long)&snbep_uncore_ha, 810 .driver_data = SNBEP_PCI_UNCORE_HA,
560 }, 811 },
561 { /* MC Channel 0 */ 812 { /* MC Channel 0 */
562 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), 813 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
563 .driver_data = (unsigned long)&snbep_uncore_imc, 814 .driver_data = SNBEP_PCI_UNCORE_IMC,
564 }, 815 },
565 { /* MC Channel 1 */ 816 { /* MC Channel 1 */
566 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), 817 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
567 .driver_data = (unsigned long)&snbep_uncore_imc, 818 .driver_data = SNBEP_PCI_UNCORE_IMC,
568 }, 819 },
569 { /* MC Channel 2 */ 820 { /* MC Channel 2 */
570 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), 821 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
571 .driver_data = (unsigned long)&snbep_uncore_imc, 822 .driver_data = SNBEP_PCI_UNCORE_IMC,
572 }, 823 },
573 { /* MC Channel 3 */ 824 { /* MC Channel 3 */
574 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), 825 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
575 .driver_data = (unsigned long)&snbep_uncore_imc, 826 .driver_data = SNBEP_PCI_UNCORE_IMC,
576 }, 827 },
577 { /* QPI Port 0 */ 828 { /* QPI Port 0 */
578 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), 829 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
579 .driver_data = (unsigned long)&snbep_uncore_qpi, 830 .driver_data = SNBEP_PCI_UNCORE_QPI,
580 }, 831 },
581 { /* QPI Port 1 */ 832 { /* QPI Port 1 */
582 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), 833 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
583 .driver_data = (unsigned long)&snbep_uncore_qpi, 834 .driver_data = SNBEP_PCI_UNCORE_QPI,
584 }, 835 },
585 { /* P2PCIe */ 836 { /* R2PCIe */
586 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), 837 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
587 .driver_data = (unsigned long)&snbep_uncore_r2pcie, 838 .driver_data = SNBEP_PCI_UNCORE_R2PCIE,
588 }, 839 },
589 { /* R3QPI Link 0 */ 840 { /* R3QPI Link 0 */
590 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), 841 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
591 .driver_data = (unsigned long)&snbep_uncore_r3qpi, 842 .driver_data = SNBEP_PCI_UNCORE_R3QPI,
592 }, 843 },
593 { /* R3QPI Link 1 */ 844 { /* R3QPI Link 1 */
594 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), 845 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
595 .driver_data = (unsigned long)&snbep_uncore_r3qpi, 846 .driver_data = SNBEP_PCI_UNCORE_R3QPI,
596 }, 847 },
597 { /* end: all zeroes */ } 848 { /* end: all zeroes */ }
598}; 849};
@@ -605,7 +856,7 @@ static struct pci_driver snbep_uncore_pci_driver = {
605/* 856/*
606 * build pci bus to socket mapping 857 * build pci bus to socket mapping
607 */ 858 */
608static int snbep_pci2phy_map_init(void) 859static int snbep_pci2phy_map_init(int devid)
609{ 860{
610 struct pci_dev *ubox_dev = NULL; 861 struct pci_dev *ubox_dev = NULL;
611 int i, bus, nodeid; 862 int i, bus, nodeid;
@@ -614,9 +865,7 @@ static int snbep_pci2phy_map_init(void)
614 865
615 while (1) { 866 while (1) {
616 /* find the UBOX device */ 867 /* find the UBOX device */
617 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 868 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
618 PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
619 ubox_dev);
620 if (!ubox_dev) 869 if (!ubox_dev)
621 break; 870 break;
622 bus = ubox_dev->bus->number; 871 bus = ubox_dev->bus->number;
@@ -639,7 +888,7 @@ static int snbep_pci2phy_map_init(void)
639 break; 888 break;
640 } 889 }
641 } 890 }
642 }; 891 }
643 892
644 if (ubox_dev) 893 if (ubox_dev)
645 pci_dev_put(ubox_dev); 894 pci_dev_put(ubox_dev);
@@ -648,6 +897,440 @@ static int snbep_pci2phy_map_init(void)
648} 897}
649/* end of Sandy Bridge-EP uncore support */ 898/* end of Sandy Bridge-EP uncore support */
650 899
900/* IvyTown uncore support */
901static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
902{
903 unsigned msr = uncore_msr_box_ctl(box);
904 if (msr)
905 wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
906}
907
908static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
909{
910 struct pci_dev *pdev = box->pci_dev;
911
912 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
913}
914
915#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
916 .init_box = ivt_uncore_msr_init_box, \
917 .disable_box = snbep_uncore_msr_disable_box, \
918 .enable_box = snbep_uncore_msr_enable_box, \
919 .disable_event = snbep_uncore_msr_disable_event, \
920 .enable_event = snbep_uncore_msr_enable_event, \
921 .read_counter = uncore_msr_read_counter
922
923static struct intel_uncore_ops ivt_uncore_msr_ops = {
924 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
925};
926
927static struct intel_uncore_ops ivt_uncore_pci_ops = {
928 .init_box = ivt_uncore_pci_init_box,
929 .disable_box = snbep_uncore_pci_disable_box,
930 .enable_box = snbep_uncore_pci_enable_box,
931 .disable_event = snbep_uncore_pci_disable_event,
932 .enable_event = snbep_uncore_pci_enable_event,
933 .read_counter = snbep_uncore_pci_read_counter,
934};
935
936#define IVT_UNCORE_PCI_COMMON_INIT() \
937 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
938 .event_ctl = SNBEP_PCI_PMON_CTL0, \
939 .event_mask = IVT_PMON_RAW_EVENT_MASK, \
940 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
941 .ops = &ivt_uncore_pci_ops, \
942 .format_group = &ivt_uncore_format_group
943
944static struct attribute *ivt_uncore_formats_attr[] = {
945 &format_attr_event.attr,
946 &format_attr_umask.attr,
947 &format_attr_edge.attr,
948 &format_attr_inv.attr,
949 &format_attr_thresh8.attr,
950 NULL,
951};
952
953static struct attribute *ivt_uncore_ubox_formats_attr[] = {
954 &format_attr_event.attr,
955 &format_attr_umask.attr,
956 &format_attr_edge.attr,
957 &format_attr_inv.attr,
958 &format_attr_thresh5.attr,
959 NULL,
960};
961
962static struct attribute *ivt_uncore_cbox_formats_attr[] = {
963 &format_attr_event.attr,
964 &format_attr_umask.attr,
965 &format_attr_edge.attr,
966 &format_attr_tid_en.attr,
967 &format_attr_thresh8.attr,
968 &format_attr_filter_tid.attr,
969 &format_attr_filter_link.attr,
970 &format_attr_filter_state2.attr,
971 &format_attr_filter_nid2.attr,
972 &format_attr_filter_opc2.attr,
973 NULL,
974};
975
976static struct attribute *ivt_uncore_pcu_formats_attr[] = {
977 &format_attr_event_ext.attr,
978 &format_attr_occ_sel.attr,
979 &format_attr_edge.attr,
980 &format_attr_thresh5.attr,
981 &format_attr_occ_invert.attr,
982 &format_attr_occ_edge.attr,
983 &format_attr_filter_band0.attr,
984 &format_attr_filter_band1.attr,
985 &format_attr_filter_band2.attr,
986 &format_attr_filter_band3.attr,
987 NULL,
988};
989
990static struct attribute *ivt_uncore_qpi_formats_attr[] = {
991 &format_attr_event_ext.attr,
992 &format_attr_umask.attr,
993 &format_attr_edge.attr,
994 &format_attr_thresh8.attr,
995 NULL,
996};
997
998static struct attribute_group ivt_uncore_format_group = {
999 .name = "format",
1000 .attrs = ivt_uncore_formats_attr,
1001};
1002
1003static struct attribute_group ivt_uncore_ubox_format_group = {
1004 .name = "format",
1005 .attrs = ivt_uncore_ubox_formats_attr,
1006};
1007
1008static struct attribute_group ivt_uncore_cbox_format_group = {
1009 .name = "format",
1010 .attrs = ivt_uncore_cbox_formats_attr,
1011};
1012
1013static struct attribute_group ivt_uncore_pcu_format_group = {
1014 .name = "format",
1015 .attrs = ivt_uncore_pcu_formats_attr,
1016};
1017
1018static struct attribute_group ivt_uncore_qpi_format_group = {
1019 .name = "format",
1020 .attrs = ivt_uncore_qpi_formats_attr,
1021};
1022
1023static struct intel_uncore_type ivt_uncore_ubox = {
1024 .name = "ubox",
1025 .num_counters = 2,
1026 .num_boxes = 1,
1027 .perf_ctr_bits = 44,
1028 .fixed_ctr_bits = 48,
1029 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1030 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
1031 .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
1032 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1033 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1034 .ops = &ivt_uncore_msr_ops,
1035 .format_group = &ivt_uncore_ubox_format_group,
1036};
1037
1038static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1039 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1040 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1041 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1042 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1043 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1044 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1045 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1046 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1047 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1048 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1049 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1050 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1051 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1052 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1053 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1054 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1055 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1056 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1057 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1058 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1059 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1060 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1061 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1062 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1063 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1064 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1065 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1066 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1067 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1068 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1069 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1070 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1071 EVENT_EXTRA_END
1072};
1073
1074static u64 ivt_cbox_filter_mask(int fields)
1075{
1076 u64 mask = 0;
1077
1078 if (fields & 0x1)
1079 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
1080 if (fields & 0x2)
1081 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
1082 if (fields & 0x4)
1083 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
1084 if (fields & 0x8)
1085 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
1086 if (fields & 0x10)
1087 mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
1088
1089 return mask;
1090}
1091
1092static struct event_constraint *
1093ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1094{
1095 return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
1096}
1097
1098static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1099{
1100 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1101 struct extra_reg *er;
1102 int idx = 0;
1103
1104 for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
1105 if (er->event != (event->hw.config & er->config_mask))
1106 continue;
1107 idx |= er->idx;
1108 }
1109
1110 if (idx) {
1111 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1112 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1113 reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
1114 reg1->idx = idx;
1115 }
1116 return 0;
1117}
1118
1119static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1120{
1121 struct hw_perf_event *hwc = &event->hw;
1122 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1123
1124 if (reg1->idx != EXTRA_REG_NONE) {
1125 u64 filter = uncore_shared_reg_config(box, 0);
1126 wrmsrl(reg1->reg, filter & 0xffffffff);
1127 wrmsrl(reg1->reg + 6, filter >> 32);
1128 }
1129
1130 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1131}
1132
1133static struct intel_uncore_ops ivt_uncore_cbox_ops = {
1134 .init_box = ivt_uncore_msr_init_box,
1135 .disable_box = snbep_uncore_msr_disable_box,
1136 .enable_box = snbep_uncore_msr_enable_box,
1137 .disable_event = snbep_uncore_msr_disable_event,
1138 .enable_event = ivt_cbox_enable_event,
1139 .read_counter = uncore_msr_read_counter,
1140 .hw_config = ivt_cbox_hw_config,
1141 .get_constraint = ivt_cbox_get_constraint,
1142 .put_constraint = snbep_cbox_put_constraint,
1143};
1144
1145static struct intel_uncore_type ivt_uncore_cbox = {
1146 .name = "cbox",
1147 .num_counters = 4,
1148 .num_boxes = 15,
1149 .perf_ctr_bits = 44,
1150 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1151 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
1152 .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
1153 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1154 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1155 .num_shared_regs = 1,
1156 .constraints = snbep_uncore_cbox_constraints,
1157 .ops = &ivt_uncore_cbox_ops,
1158 .format_group = &ivt_uncore_cbox_format_group,
1159};
1160
1161static struct intel_uncore_ops ivt_uncore_pcu_ops = {
1162 IVT_UNCORE_MSR_OPS_COMMON_INIT(),
1163 .hw_config = snbep_pcu_hw_config,
1164 .get_constraint = snbep_pcu_get_constraint,
1165 .put_constraint = snbep_pcu_put_constraint,
1166};
1167
1168static struct intel_uncore_type ivt_uncore_pcu = {
1169 .name = "pcu",
1170 .num_counters = 4,
1171 .num_boxes = 1,
1172 .perf_ctr_bits = 48,
1173 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1174 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1175 .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
1176 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1177 .num_shared_regs = 1,
1178 .ops = &ivt_uncore_pcu_ops,
1179 .format_group = &ivt_uncore_pcu_format_group,
1180};
1181
1182static struct intel_uncore_type *ivt_msr_uncores[] = {
1183 &ivt_uncore_ubox,
1184 &ivt_uncore_cbox,
1185 &ivt_uncore_pcu,
1186 NULL,
1187};
1188
1189static struct intel_uncore_type ivt_uncore_ha = {
1190 .name = "ha",
1191 .num_counters = 4,
1192 .num_boxes = 2,
1193 .perf_ctr_bits = 48,
1194 IVT_UNCORE_PCI_COMMON_INIT(),
1195};
1196
1197static struct intel_uncore_type ivt_uncore_imc = {
1198 .name = "imc",
1199 .num_counters = 4,
1200 .num_boxes = 8,
1201 .perf_ctr_bits = 48,
1202 .fixed_ctr_bits = 48,
1203 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1204 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1205 IVT_UNCORE_PCI_COMMON_INIT(),
1206};
1207
1208static struct intel_uncore_type ivt_uncore_qpi = {
1209 .name = "qpi",
1210 .num_counters = 4,
1211 .num_boxes = 3,
1212 .perf_ctr_bits = 48,
1213 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1214 .event_ctl = SNBEP_PCI_PMON_CTL0,
1215 .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
1216 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1217 .ops = &ivt_uncore_pci_ops,
1218 .format_group = &ivt_uncore_qpi_format_group,
1219};
1220
1221static struct intel_uncore_type ivt_uncore_r2pcie = {
1222 .name = "r2pcie",
1223 .num_counters = 4,
1224 .num_boxes = 1,
1225 .perf_ctr_bits = 44,
1226 .constraints = snbep_uncore_r2pcie_constraints,
1227 IVT_UNCORE_PCI_COMMON_INIT(),
1228};
1229
1230static struct intel_uncore_type ivt_uncore_r3qpi = {
1231 .name = "r3qpi",
1232 .num_counters = 3,
1233 .num_boxes = 2,
1234 .perf_ctr_bits = 44,
1235 .constraints = snbep_uncore_r3qpi_constraints,
1236 IVT_UNCORE_PCI_COMMON_INIT(),
1237};
1238
1239enum {
1240 IVT_PCI_UNCORE_HA,
1241 IVT_PCI_UNCORE_IMC,
1242 IVT_PCI_UNCORE_QPI,
1243 IVT_PCI_UNCORE_R2PCIE,
1244 IVT_PCI_UNCORE_R3QPI,
1245};
1246
1247static struct intel_uncore_type *ivt_pci_uncores[] = {
1248 [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
1249 [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
1250 [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
1251 [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
1252 [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
1253 NULL,
1254};
1255
1256static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1257 { /* Home Agent 0 */
1258 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1259 .driver_data = IVT_PCI_UNCORE_HA,
1260 },
1261 { /* Home Agent 1 */
1262 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1263 .driver_data = IVT_PCI_UNCORE_HA,
1264 },
1265 { /* MC0 Channel 0 */
1266 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1267 .driver_data = IVT_PCI_UNCORE_IMC,
1268 },
1269 { /* MC0 Channel 1 */
1270 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1271 .driver_data = IVT_PCI_UNCORE_IMC,
1272 },
1273 { /* MC0 Channel 3 */
1274 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1275 .driver_data = IVT_PCI_UNCORE_IMC,
1276 },
1277 { /* MC0 Channel 4 */
1278 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1279 .driver_data = IVT_PCI_UNCORE_IMC,
1280 },
1281 { /* MC1 Channel 0 */
1282 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1283 .driver_data = IVT_PCI_UNCORE_IMC,
1284 },
1285 { /* MC1 Channel 1 */
1286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1287 .driver_data = IVT_PCI_UNCORE_IMC,
1288 },
1289 { /* MC1 Channel 3 */
1290 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1291 .driver_data = IVT_PCI_UNCORE_IMC,
1292 },
1293 { /* MC1 Channel 4 */
1294 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1295 .driver_data = IVT_PCI_UNCORE_IMC,
1296 },
1297 { /* QPI0 Port 0 */
1298 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1299 .driver_data = IVT_PCI_UNCORE_QPI,
1300 },
1301 { /* QPI0 Port 1 */
1302 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1303 .driver_data = IVT_PCI_UNCORE_QPI,
1304 },
1305 { /* QPI1 Port 2 */
1306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1307 .driver_data = IVT_PCI_UNCORE_QPI,
1308 },
1309 { /* R2PCIe */
1310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1311 .driver_data = IVT_PCI_UNCORE_R2PCIE,
1312 },
1313 { /* R3QPI0 Link 0 */
1314 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1315 .driver_data = IVT_PCI_UNCORE_R3QPI,
1316 },
1317 { /* R3QPI0 Link 1 */
1318 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1319 .driver_data = IVT_PCI_UNCORE_R3QPI,
1320 },
1321 { /* R3QPI1 Link 2 */
1322 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1323 .driver_data = IVT_PCI_UNCORE_R3QPI,
1324 },
1325 { /* end: all zeroes */ }
1326};
1327
1328static struct pci_driver ivt_uncore_pci_driver = {
1329 .name = "ivt_uncore",
1330 .id_table = ivt_uncore_pci_ids,
1331};
1332/* end of IvyTown uncore support */
1333
651/* Sandy Bridge uncore support */ 1334/* Sandy Bridge uncore support */
652static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1335static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
653{ 1336{
@@ -808,9 +1491,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = {
808/* end of Nehalem uncore support */ 1491/* end of Nehalem uncore support */
809 1492
810/* Nehalem-EX uncore support */ 1493/* Nehalem-EX uncore support */
811#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
812 ((1ULL << (n)) - 1)))
813
814DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); 1494DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
815DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); 1495DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
816DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); 1496DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
@@ -1161,7 +1841,7 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
1161}; 1841};
1162 1842
1163/* Nehalem-EX or Westmere-EX ? */ 1843/* Nehalem-EX or Westmere-EX ? */
1164bool uncore_nhmex; 1844static bool uncore_nhmex;
1165 1845
1166static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) 1846static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
1167{ 1847{
@@ -1239,7 +1919,7 @@ static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
1239 atomic_sub(1 << (idx * 8), &er->ref); 1919 atomic_sub(1 << (idx * 8), &er->ref);
1240} 1920}
1241 1921
1242u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) 1922static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
1243{ 1923{
1244 struct hw_perf_event *hwc = &event->hw; 1924 struct hw_perf_event *hwc = &event->hw;
1245 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1925 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@ -1554,7 +2234,7 @@ static struct intel_uncore_type nhmex_uncore_mbox = {
1554 .format_group = &nhmex_uncore_mbox_format_group, 2234 .format_group = &nhmex_uncore_mbox_format_group,
1555}; 2235};
1556 2236
1557void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) 2237static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
1558{ 2238{
1559 struct hw_perf_event *hwc = &event->hw; 2239 struct hw_perf_event *hwc = &event->hw;
1560 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 2240 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@ -1724,21 +2404,6 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event
1724 return 0; 2404 return 0;
1725} 2405}
1726 2406
1727static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
1728{
1729 struct intel_uncore_extra_reg *er;
1730 unsigned long flags;
1731 u64 config;
1732
1733 er = &box->shared_regs[idx];
1734
1735 raw_spin_lock_irqsave(&er->lock, flags);
1736 config = er->config;
1737 raw_spin_unlock_irqrestore(&er->lock, flags);
1738
1739 return config;
1740}
1741
1742static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 2407static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1743{ 2408{
1744 struct hw_perf_event *hwc = &event->hw; 2409 struct hw_perf_event *hwc = &event->hw;
@@ -1759,7 +2424,7 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
1759 case 2: 2424 case 2:
1760 case 3: 2425 case 3:
1761 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), 2426 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
1762 nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5)); 2427 uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
1763 break; 2428 break;
1764 case 4: 2429 case 4:
1765 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), 2430 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
@@ -2285,7 +2950,7 @@ out:
2285 return ret; 2950 return ret;
2286} 2951}
2287 2952
2288int uncore_pmu_event_init(struct perf_event *event) 2953static int uncore_pmu_event_init(struct perf_event *event)
2289{ 2954{
2290 struct intel_uncore_pmu *pmu; 2955 struct intel_uncore_pmu *pmu;
2291 struct intel_uncore_box *box; 2956 struct intel_uncore_box *box;
@@ -2438,7 +3103,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
2438 3103
2439 type->unconstrainted = (struct event_constraint) 3104 type->unconstrainted = (struct event_constraint)
2440 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, 3105 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
2441 0, type->num_counters, 0); 3106 0, type->num_counters, 0, 0);
2442 3107
2443 for (i = 0; i < type->num_boxes; i++) { 3108 for (i = 0; i < type->num_boxes; i++) {
2444 pmus[i].func_id = -1; 3109 pmus[i].func_id = -1;
@@ -2556,6 +3221,8 @@ static void uncore_pci_remove(struct pci_dev *pdev)
2556 if (WARN_ON_ONCE(phys_id != box->phys_id)) 3221 if (WARN_ON_ONCE(phys_id != box->phys_id))
2557 return; 3222 return;
2558 3223
3224 pci_set_drvdata(pdev, NULL);
3225
2559 raw_spin_lock(&uncore_box_lock); 3226 raw_spin_lock(&uncore_box_lock);
2560 list_del(&box->list); 3227 list_del(&box->list);
2561 raw_spin_unlock(&uncore_box_lock); 3228 raw_spin_unlock(&uncore_box_lock);
@@ -2574,11 +3241,7 @@ static void uncore_pci_remove(struct pci_dev *pdev)
2574static int uncore_pci_probe(struct pci_dev *pdev, 3241static int uncore_pci_probe(struct pci_dev *pdev,
2575 const struct pci_device_id *id) 3242 const struct pci_device_id *id)
2576{ 3243{
2577 struct intel_uncore_type *type; 3244 return uncore_pci_add(pci_uncores[id->driver_data], pdev);
2578
2579 type = (struct intel_uncore_type *)id->driver_data;
2580
2581 return uncore_pci_add(type, pdev);
2582} 3245}
2583 3246
2584static int __init uncore_pci_init(void) 3247static int __init uncore_pci_init(void)
@@ -2587,12 +3250,19 @@ static int __init uncore_pci_init(void)
2587 3250
2588 switch (boot_cpu_data.x86_model) { 3251 switch (boot_cpu_data.x86_model) {
2589 case 45: /* Sandy Bridge-EP */ 3252 case 45: /* Sandy Bridge-EP */
2590 ret = snbep_pci2phy_map_init(); 3253 ret = snbep_pci2phy_map_init(0x3ce0);
2591 if (ret) 3254 if (ret)
2592 return ret; 3255 return ret;
2593 pci_uncores = snbep_pci_uncores; 3256 pci_uncores = snbep_pci_uncores;
2594 uncore_pci_driver = &snbep_uncore_pci_driver; 3257 uncore_pci_driver = &snbep_uncore_pci_driver;
2595 break; 3258 break;
3259 case 62: /* IvyTown */
3260 ret = snbep_pci2phy_map_init(0x0e1e);
3261 if (ret)
3262 return ret;
3263 pci_uncores = ivt_pci_uncores;
3264 uncore_pci_driver = &ivt_uncore_pci_driver;
3265 break;
2596 default: 3266 default:
2597 return 0; 3267 return 0;
2598 } 3268 }
@@ -2622,6 +3292,21 @@ static void __init uncore_pci_exit(void)
2622 } 3292 }
2623} 3293}
2624 3294
3295/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3296static LIST_HEAD(boxes_to_free);
3297
3298static void __cpuinit uncore_kfree_boxes(void)
3299{
3300 struct intel_uncore_box *box;
3301
3302 while (!list_empty(&boxes_to_free)) {
3303 box = list_entry(boxes_to_free.next,
3304 struct intel_uncore_box, list);
3305 list_del(&box->list);
3306 kfree(box);
3307 }
3308}
3309
2625static void __cpuinit uncore_cpu_dying(int cpu) 3310static void __cpuinit uncore_cpu_dying(int cpu)
2626{ 3311{
2627 struct intel_uncore_type *type; 3312 struct intel_uncore_type *type;
@@ -2636,7 +3321,7 @@ static void __cpuinit uncore_cpu_dying(int cpu)
2636 box = *per_cpu_ptr(pmu->box, cpu); 3321 box = *per_cpu_ptr(pmu->box, cpu);
2637 *per_cpu_ptr(pmu->box, cpu) = NULL; 3322 *per_cpu_ptr(pmu->box, cpu) = NULL;
2638 if (box && atomic_dec_and_test(&box->refcnt)) 3323 if (box && atomic_dec_and_test(&box->refcnt))
2639 kfree(box); 3324 list_add(&box->list, &boxes_to_free);
2640 } 3325 }
2641 } 3326 }
2642} 3327}
@@ -2666,8 +3351,11 @@ static int __cpuinit uncore_cpu_starting(int cpu)
2666 if (exist && exist->phys_id == phys_id) { 3351 if (exist && exist->phys_id == phys_id) {
2667 atomic_inc(&exist->refcnt); 3352 atomic_inc(&exist->refcnt);
2668 *per_cpu_ptr(pmu->box, cpu) = exist; 3353 *per_cpu_ptr(pmu->box, cpu) = exist;
2669 kfree(box); 3354 if (box) {
2670 box = NULL; 3355 list_add(&box->list,
3356 &boxes_to_free);
3357 box = NULL;
3358 }
2671 break; 3359 break;
2672 } 3360 }
2673 } 3361 }
@@ -2806,6 +3494,10 @@ static int
2806 case CPU_DYING: 3494 case CPU_DYING:
2807 uncore_cpu_dying(cpu); 3495 uncore_cpu_dying(cpu);
2808 break; 3496 break;
3497 case CPU_ONLINE:
3498 case CPU_DEAD:
3499 uncore_kfree_boxes();
3500 break;
2809 default: 3501 default:
2810 break; 3502 break;
2811 } 3503 }
@@ -2871,6 +3563,12 @@ static int __init uncore_cpu_init(void)
2871 nhmex_uncore_cbox.num_boxes = max_cores; 3563 nhmex_uncore_cbox.num_boxes = max_cores;
2872 msr_uncores = nhmex_msr_uncores; 3564 msr_uncores = nhmex_msr_uncores;
2873 break; 3565 break;
3566 case 62: /* IvyTown */
3567 if (ivt_uncore_cbox.num_boxes > max_cores)
3568 ivt_uncore_cbox.num_boxes = max_cores;
3569 msr_uncores = ivt_msr_uncores;
3570 break;
3571
2874 default: 3572 default:
2875 return 0; 3573 return 0;
2876 } 3574 }
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index e68a4550e952..f9528917f6e8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -76,7 +76,7 @@
76#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00 76#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
77#define SNBEP_PMON_CTL_RST (1 << 17) 77#define SNBEP_PMON_CTL_RST (1 << 17)
78#define SNBEP_PMON_CTL_EDGE_DET (1 << 18) 78#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
79#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */ 79#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
80#define SNBEP_PMON_CTL_EN (1 << 22) 80#define SNBEP_PMON_CTL_EN (1 << 22)
81#define SNBEP_PMON_CTL_INVERT (1 << 23) 81#define SNBEP_PMON_CTL_INVERT (1 << 23)
82#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000 82#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
@@ -148,9 +148,20 @@
148#define SNBEP_C0_MSR_PMON_CTL0 0xd10 148#define SNBEP_C0_MSR_PMON_CTL0 0xd10
149#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04 149#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
150#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14 150#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
151#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f
152#define SNBEP_CBO_MSR_OFFSET 0x20 151#define SNBEP_CBO_MSR_OFFSET 0x20
153 152
153#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
154#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
155#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
156#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
157
158#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
159 .event = (e), \
160 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
161 .config_mask = (m), \
162 .idx = (i) \
163}
164
154/* SNB-EP PCU register */ 165/* SNB-EP PCU register */
155#define SNBEP_PCU_MSR_PMON_CTR0 0xc36 166#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
156#define SNBEP_PCU_MSR_PMON_CTL0 0xc30 167#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
@@ -160,6 +171,55 @@
160#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc 171#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
161#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd 172#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
162 173
174/* IVT event control */
175#define IVT_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
176 SNBEP_PMON_BOX_CTL_RST_CTRS)
177#define IVT_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
178 SNBEP_PMON_CTL_UMASK_MASK | \
179 SNBEP_PMON_CTL_EDGE_DET | \
180 SNBEP_PMON_CTL_TRESH_MASK)
181/* IVT Ubox */
182#define IVT_U_MSR_PMON_GLOBAL_CTL 0xc00
183#define IVT_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
184#define IVT_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
185
186#define IVT_U_MSR_PMON_RAW_EVENT_MASK \
187 (SNBEP_PMON_CTL_EV_SEL_MASK | \
188 SNBEP_PMON_CTL_UMASK_MASK | \
189 SNBEP_PMON_CTL_EDGE_DET | \
190 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
191/* IVT Cbo */
192#define IVT_CBO_MSR_PMON_RAW_EVENT_MASK (IVT_PMON_RAW_EVENT_MASK | \
193 SNBEP_CBO_PMON_CTL_TID_EN)
194
195#define IVT_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
196#define IVT_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
197#define IVT_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
198#define IVT_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
199#define IVT_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
200#define IVT_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
201#define IVT_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
202#define IVT_CB0_MSR_PMON_BOX_FILTER_IOSC (0x1ULL << 63)
203
204/* IVT home agent */
205#define IVT_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
206#define IVT_HA_PCI_PMON_RAW_EVENT_MASK \
207 (IVT_PMON_RAW_EVENT_MASK | \
208 IVT_HA_PCI_PMON_CTL_Q_OCC_RST)
209/* IVT PCU */
210#define IVT_PCU_MSR_PMON_RAW_EVENT_MASK \
211 (SNBEP_PMON_CTL_EV_SEL_MASK | \
212 SNBEP_PMON_CTL_EV_SEL_EXT | \
213 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
214 SNBEP_PMON_CTL_EDGE_DET | \
215 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
216 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
217 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
218/* IVT QPI */
219#define IVT_QPI_PCI_PMON_RAW_EVENT_MASK \
220 (IVT_PMON_RAW_EVENT_MASK | \
221 SNBEP_PMON_CTL_EV_SEL_EXT)
222
163/* NHM-EX event control */ 223/* NHM-EX event control */
164#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff 224#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
165#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 225#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 4b7731bf23a8..838fa8772c62 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -17,7 +17,7 @@ static const u64 knc_perfmon_event_map[] =
17 [PERF_COUNT_HW_BRANCH_MISSES] = 0x002b, 17 [PERF_COUNT_HW_BRANCH_MISSES] = 0x002b,
18}; 18};
19 19
20static __initconst u64 knc_hw_cache_event_ids 20static const u64 __initconst knc_hw_cache_event_ids
21 [PERF_COUNT_HW_CACHE_MAX] 21 [PERF_COUNT_HW_CACHE_MAX]
22 [PERF_COUNT_HW_CACHE_OP_MAX] 22 [PERF_COUNT_HW_CACHE_OP_MAX]
23 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 23 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -284,7 +284,7 @@ static struct attribute *intel_knc_formats_attr[] = {
284 NULL, 284 NULL,
285}; 285};
286 286
287static __initconst struct x86_pmu knc_pmu = { 287static const struct x86_pmu knc_pmu __initconst = {
288 .name = "knc", 288 .name = "knc",
289 .handle_irq = knc_pmu_handle_irq, 289 .handle_irq = knc_pmu_handle_irq,
290 .disable_all = knc_pmu_disable_all, 290 .disable_all = knc_pmu_disable_all,
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 92c7e39a079f..3486e6660357 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
895 * So at moment let leave metrics turned on forever -- it's 895 * So at moment let leave metrics turned on forever -- it's
896 * ok for now but need to be revisited! 896 * ok for now but need to be revisited!
897 * 897 *
898 * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0); 898 * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0);
899 * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0); 899 * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0);
900 */ 900 */
901} 901}
902 902
@@ -910,8 +910,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
910 * asserted again and again 910 * asserted again and again
911 */ 911 */
912 (void)wrmsrl_safe(hwc->config_base, 912 (void)wrmsrl_safe(hwc->config_base,
913 (u64)(p4_config_unpack_cccr(hwc->config)) & 913 p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
914 ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
915} 914}
916 915
917static void p4_pmu_disable_all(void) 916static void p4_pmu_disable_all(void)
@@ -957,7 +956,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
957 u64 escr_addr, cccr; 956 u64 escr_addr, cccr;
958 957
959 bind = &p4_event_bind_map[idx]; 958 bind = &p4_event_bind_map[idx];
960 escr_addr = (u64)bind->escr_msr[thread]; 959 escr_addr = bind->escr_msr[thread];
961 960
962 /* 961 /*
963 * - we dont support cascaded counters yet 962 * - we dont support cascaded counters yet
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 4820c232a0b9..b1e2fe115323 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
19 19
20}; 20};
21 21
22static u64 p6_hw_cache_event_ids 22static const u64 __initconst p6_hw_cache_event_ids
23 [PERF_COUNT_HW_CACHE_MAX] 23 [PERF_COUNT_HW_CACHE_MAX]
24 [PERF_COUNT_HW_CACHE_OP_MAX] 24 [PERF_COUNT_HW_CACHE_OP_MAX]
25 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 25 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index e280253f6f94..37a198bd48c8 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -34,9 +34,9 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
34 "fpu_exception\t: %s\n" 34 "fpu_exception\t: %s\n"
35 "cpuid level\t: %d\n" 35 "cpuid level\t: %d\n"
36 "wp\t\t: %s\n", 36 "wp\t\t: %s\n",
37 c->fdiv_bug ? "yes" : "no", 37 static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
38 c->f00f_bug ? "yes" : "no", 38 static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
39 c->coma_bug ? "yes" : "no", 39 static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
40 c->hard_math ? "yes" : "no", 40 c->hard_math ? "yes" : "no",
41 c->hard_math ? "yes" : "no", 41 c->hard_math ? "yes" : "no",
42 c->cpuid_level, 42 c->cpuid_level,
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
index 37250fe490b1..155a13f33ed8 100644
--- a/arch/x86/kernel/doublefault_32.c
+++ b/arch/x86/kernel/doublefault_32.c
@@ -20,7 +20,7 @@ static void doublefault_fn(void)
20 struct desc_ptr gdt_desc = {0, 0}; 20 struct desc_ptr gdt_desc = {0, 0};
21 unsigned long gdt, tss; 21 unsigned long gdt, tss;
22 22
23 store_gdt(&gdt_desc); 23 native_store_gdt(&gdt_desc);
24 gdt = gdt_desc.address; 24 gdt = gdt_desc.address;
25 25
26 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); 26 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 9b9f18b49918..d15f575a861b 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -169,25 +169,9 @@ static struct console early_serial_console = {
169 .index = -1, 169 .index = -1,
170}; 170};
171 171
172/* Direct interface for emergencies */
173static struct console *early_console = &early_vga_console;
174static int __initdata early_console_initialized;
175
176asmlinkage void early_printk(const char *fmt, ...)
177{
178 char buf[512];
179 int n;
180 va_list ap;
181
182 va_start(ap, fmt);
183 n = vscnprintf(buf, sizeof(buf), fmt, ap);
184 early_console->write(early_console, buf, n);
185 va_end(ap);
186}
187
188static inline void early_console_register(struct console *con, int keep_early) 172static inline void early_console_register(struct console *con, int keep_early)
189{ 173{
190 if (early_console->index != -1) { 174 if (con->index != -1) {
191 printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n", 175 printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
192 con->name); 176 con->name);
193 return; 177 return;
@@ -207,9 +191,8 @@ static int __init setup_early_printk(char *buf)
207 if (!buf) 191 if (!buf)
208 return 0; 192 return 0;
209 193
210 if (early_console_initialized) 194 if (early_console)
211 return 0; 195 return 0;
212 early_console_initialized = 1;
213 196
214 keep = (strstr(buf, "keep") != NULL); 197 keep = (strstr(buf, "keep") != NULL);
215 198
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index c5e403f6d869..101ac1a9263e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -144,10 +144,10 @@ void __init x86_64_start_kernel(char * real_mode_data)
144 * Build-time sanity checks on the kernel image and module 144 * Build-time sanity checks on the kernel image and module
145 * area mappings. (these are purely build-time and produce no code) 145 * area mappings. (these are purely build-time and produce no code)
146 */ 146 */
147 BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); 147 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
148 BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); 148 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
149 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); 149 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
150 BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); 150 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
151 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); 151 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
152 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); 152 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
153 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 153 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 3f06e6149981..9895a9a41380 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -353,7 +353,11 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
353 * have given. 353 * have given.
354 */ 354 */
355 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; 355 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
356 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ 356 if ((s64) (s32) newdisp != newdisp) {
357 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
358 pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
359 return 0;
360 }
357 disp = (u8 *) dest + insn_offset_displacement(&insn); 361 disp = (u8 *) dest + insn_offset_displacement(&insn);
358 *(s32 *) disp = (s32) newdisp; 362 *(s32 *) disp = (s32) newdisp;
359 } 363 }
@@ -375,6 +379,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
375 else 379 else
376 p->ainsn.boostable = -1; 380 p->ainsn.boostable = -1;
377 381
382 /* Check whether the instruction modifies Interrupt Flag or not */
383 p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
384
378 /* Also, displacement change doesn't affect the first byte */ 385 /* Also, displacement change doesn't affect the first byte */
379 p->opcode = p->ainsn.insn[0]; 386 p->opcode = p->ainsn.insn[0];
380} 387}
@@ -434,7 +441,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
434 __this_cpu_write(current_kprobe, p); 441 __this_cpu_write(current_kprobe, p);
435 kcb->kprobe_saved_flags = kcb->kprobe_old_flags 442 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
436 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); 443 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
437 if (is_IF_modifier(p->ainsn.insn)) 444 if (p->ainsn.if_modifier)
438 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; 445 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
439} 446}
440 447
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index b686a904d7c3..cd6d9a5a42f6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -20,6 +20,7 @@
20 * Authors: Anthony Liguori <aliguori@us.ibm.com> 20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */ 21 */
22 22
23#include <linux/context_tracking.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/kvm_para.h> 26#include <linux/kvm_para.h>
@@ -43,7 +44,6 @@
43#include <asm/apicdef.h> 44#include <asm/apicdef.h>
44#include <asm/hypervisor.h> 45#include <asm/hypervisor.h>
45#include <asm/kvm_guest.h> 46#include <asm/kvm_guest.h>
46#include <asm/context_tracking.h>
47 47
48static int kvmapf = 1; 48static int kvmapf = 1;
49 49
@@ -254,16 +254,18 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
254dotraplinkage void __kprobes 254dotraplinkage void __kprobes
255do_async_page_fault(struct pt_regs *regs, unsigned long error_code) 255do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
256{ 256{
257 enum ctx_state prev_state;
258
257 switch (kvm_read_and_reset_pf_reason()) { 259 switch (kvm_read_and_reset_pf_reason()) {
258 default: 260 default:
259 do_page_fault(regs, error_code); 261 do_page_fault(regs, error_code);
260 break; 262 break;
261 case KVM_PV_REASON_PAGE_NOT_PRESENT: 263 case KVM_PV_REASON_PAGE_NOT_PRESENT:
262 /* page is swapped out by the host. */ 264 /* page is swapped out by the host. */
263 exception_enter(regs); 265 prev_state = exception_enter();
264 exit_idle(); 266 exit_idle();
265 kvm_async_pf_task_wait((u32)read_cr2()); 267 kvm_async_pf_task_wait((u32)read_cr2());
266 exception_exit(regs); 268 exception_exit(prev_state);
267 break; 269 break;
268 case KVM_PV_REASON_PAGE_READY: 270 case KVM_PV_REASON_PAGE_READY:
269 rcu_irq_enter(); 271 rcu_irq_enter();
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c
index 577db8417d15..833d51d6ee06 100644
--- a/arch/x86/kernel/microcode_core_early.c
+++ b/arch/x86/kernel/microcode_core_early.c
@@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void)
45 u32 eax = 0x00000000; 45 u32 eax = 0x00000000;
46 u32 ebx, ecx = 0, edx; 46 u32 ebx, ecx = 0, edx;
47 47
48 if (!have_cpuid_p())
49 return X86_VENDOR_UNKNOWN;
50
51 native_cpuid(&eax, &ebx, &ecx, &edx); 48 native_cpuid(&eax, &ebx, &ecx, &edx);
52 49
53 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) 50 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
@@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void)
59 return X86_VENDOR_UNKNOWN; 56 return X86_VENDOR_UNKNOWN;
60} 57}
61 58
59static int __cpuinit x86_family(void)
60{
61 u32 eax = 0x00000001;
62 u32 ebx, ecx = 0, edx;
63 int x86;
64
65 native_cpuid(&eax, &ebx, &ecx, &edx);
66
67 x86 = (eax >> 8) & 0xf;
68 if (x86 == 15)
69 x86 += (eax >> 20) & 0xff;
70
71 return x86;
72}
73
62void __init load_ucode_bsp(void) 74void __init load_ucode_bsp(void)
63{ 75{
64 int vendor = x86_vendor(); 76 int vendor, x86;
77
78 if (!have_cpuid_p())
79 return;
65 80
66 if (vendor == X86_VENDOR_INTEL) 81 vendor = x86_vendor();
82 x86 = x86_family();
83
84 if (vendor == X86_VENDOR_INTEL && x86 >= 6)
67 load_ucode_intel_bsp(); 85 load_ucode_intel_bsp();
68} 86}
69 87
70void __cpuinit load_ucode_ap(void) 88void __cpuinit load_ucode_ap(void)
71{ 89{
72 int vendor = x86_vendor(); 90 int vendor, x86;
91
92 if (!have_cpuid_p())
93 return;
94
95 vendor = x86_vendor();
96 x86 = x86_family();
73 97
74 if (vendor == X86_VENDOR_INTEL) 98 if (vendor == X86_VENDOR_INTEL && x86 >= 6)
75 load_ucode_intel_ap(); 99 load_ucode_intel_ap();
76} 100}
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index 7890bc838952..d893e8ed8ac9 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
90 struct microcode_intel ***mc_saved; 90 struct microcode_intel ***mc_saved;
91 91
92 mc_saved = (struct microcode_intel ***) 92 mc_saved = (struct microcode_intel ***)
93 __pa_symbol(&mc_saved_data->mc_saved); 93 __pa_nodebug(&mc_saved_data->mc_saved);
94 for (i = 0; i < mc_saved_data->mc_saved_count; i++) { 94 for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
95 struct microcode_intel *p; 95 struct microcode_intel *p;
96 96
97 p = *(struct microcode_intel **) 97 p = *(struct microcode_intel **)
98 __pa(mc_saved_data->mc_saved + i); 98 __pa_nodebug(mc_saved_data->mc_saved + i);
99 mc_saved_tmp[i] = (struct microcode_intel *)__pa(p); 99 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
100 } 100 }
101} 101}
102#endif 102#endif
@@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
562 struct cpio_data cd; 562 struct cpio_data cd;
563 long offset = 0; 563 long offset = 0;
564#ifdef CONFIG_X86_32 564#ifdef CONFIG_X86_32
565 char *p = (char *)__pa_symbol(ucode_name); 565 char *p = (char *)__pa_nodebug(ucode_name);
566#else 566#else
567 char *p = ucode_name; 567 char *p = ucode_name;
568#endif 568#endif
@@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
630 if (mc_intel == NULL) 630 if (mc_intel == NULL)
631 return; 631 return;
632 632
633 delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info); 633 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
634 current_mc_date_p = (int *)__pa_symbol(&current_mc_date); 634 current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
635 635
636 *delay_ucode_info_p = 1; 636 *delay_ucode_info_p = 1;
637 *current_mc_date_p = mc_intel->hdr.date; 637 *current_mc_date_p = mc_intel->hdr.date;
@@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
659} 659}
660#endif 660#endif
661 661
662static int apply_microcode_early(struct mc_saved_data *mc_saved_data, 662static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
663 struct ucode_cpu_info *uci) 663 struct ucode_cpu_info *uci)
664{ 664{
665 struct microcode_intel *mc_intel; 665 struct microcode_intel *mc_intel;
666 unsigned int val[2]; 666 unsigned int val[2];
@@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
741#ifdef CONFIG_X86_32 741#ifdef CONFIG_X86_32
742 struct boot_params *boot_params_p; 742 struct boot_params *boot_params_p;
743 743
744 boot_params_p = (struct boot_params *)__pa_symbol(&boot_params); 744 boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
745 ramdisk_image = boot_params_p->hdr.ramdisk_image; 745 ramdisk_image = boot_params_p->hdr.ramdisk_image;
746 ramdisk_size = boot_params_p->hdr.ramdisk_size; 746 ramdisk_size = boot_params_p->hdr.ramdisk_size;
747 initrd_start_early = ramdisk_image; 747 initrd_start_early = ramdisk_image;
748 initrd_end_early = initrd_start_early + ramdisk_size; 748 initrd_end_early = initrd_start_early + ramdisk_size;
749 749
750 _load_ucode_intel_bsp( 750 _load_ucode_intel_bsp(
751 (struct mc_saved_data *)__pa_symbol(&mc_saved_data), 751 (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
752 (unsigned long *)__pa_symbol(&mc_saved_in_initrd), 752 (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
753 initrd_start_early, initrd_end_early, &uci); 753 initrd_start_early, initrd_end_early, &uci);
754#else 754#else
755 ramdisk_image = boot_params.hdr.ramdisk_image; 755 ramdisk_image = boot_params.hdr.ramdisk_image;
@@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
772 unsigned long *initrd_start_p; 772 unsigned long *initrd_start_p;
773 773
774 mc_saved_in_initrd_p = 774 mc_saved_in_initrd_p =
775 (unsigned long *)__pa_symbol(mc_saved_in_initrd); 775 (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
776 mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data); 776 mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
777 initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start); 777 initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
778 initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p); 778 initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
779#else 779#else
780 mc_saved_data_p = &mc_saved_data; 780 mc_saved_data_p = &mc_saved_data;
781 mc_saved_in_initrd_p = mc_saved_in_initrd; 781 mc_saved_in_initrd_p = mc_saved_in_initrd;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 17fff18a1031..cd6de64cc480 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
263 leave_lazy(PARAVIRT_LAZY_MMU); 263 leave_lazy(PARAVIRT_LAZY_MMU);
264} 264}
265 265
266void paravirt_flush_lazy_mmu(void)
267{
268 preempt_disable();
269
270 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
271 arch_leave_lazy_mmu_mode();
272 arch_enter_lazy_mmu_mode();
273 }
274
275 preempt_enable();
276}
277
266void paravirt_start_context_switch(struct task_struct *prev) 278void paravirt_start_context_switch(struct task_struct *prev)
267{ 279{
268 BUG_ON(preemptible()); 280 BUG_ON(preemptible());
@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
292 return this_cpu_read(paravirt_lazy_mode); 304 return this_cpu_read(paravirt_lazy_mode);
293} 305}
294 306
295void arch_flush_lazy_mmu_mode(void)
296{
297 preempt_disable();
298
299 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
300 arch_leave_lazy_mmu_mode();
301 arch_enter_lazy_mmu_mode();
302 }
303
304 preempt_enable();
305}
306
307struct pv_info pv_info = { 307struct pv_info pv_info = {
308 .name = "bare hardware", 308 .name = "bare hardware",
309 .paravirt_enabled = 0, 309 .paravirt_enabled = 0,
@@ -360,7 +360,6 @@ struct pv_cpu_ops pv_cpu_ops = {
360 .set_ldt = native_set_ldt, 360 .set_ldt = native_set_ldt,
361 .load_gdt = native_load_gdt, 361 .load_gdt = native_load_gdt,
362 .load_idt = native_load_idt, 362 .load_idt = native_load_idt,
363 .store_gdt = native_store_gdt,
364 .store_idt = native_store_idt, 363 .store_idt = native_store_idt,
365 .store_tr = native_store_tr, 364 .store_tr = native_store_tr,
366 .load_tls = native_load_tls, 365 .load_tls = native_load_tls,
@@ -475,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
475 .lazy_mode = { 474 .lazy_mode = {
476 .enter = paravirt_nop, 475 .enter = paravirt_nop,
477 .leave = paravirt_nop, 476 .leave = paravirt_nop,
477 .flush = paravirt_nop,
478 }, 478 },
479 479
480 .set_fixmap = native_set_fixmap, 480 .set_fixmap = native_set_fixmap,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 14ae10031ff0..14fcf55a5c5b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -301,13 +301,7 @@ void exit_idle(void)
301} 301}
302#endif 302#endif
303 303
304/* 304void arch_cpu_idle_prepare(void)
305 * The idle thread. There's no useful work to be
306 * done, so just try to conserve power and have a
307 * low exit latency (ie sit in a loop waiting for
308 * somebody to say that they'd like to reschedule)
309 */
310void cpu_idle(void)
311{ 305{
312 /* 306 /*
313 * If we're the non-boot CPU, nothing set the stack canary up 307 * If we're the non-boot CPU, nothing set the stack canary up
@@ -317,71 +311,40 @@ void cpu_idle(void)
317 * canaries already on the stack wont ever trigger). 311 * canaries already on the stack wont ever trigger).
318 */ 312 */
319 boot_init_stack_canary(); 313 boot_init_stack_canary();
320 current_thread_info()->status |= TS_POLLING; 314}
321
322 while (1) {
323 tick_nohz_idle_enter();
324
325 while (!need_resched()) {
326 rmb();
327
328 if (cpu_is_offline(smp_processor_id()))
329 play_dead();
330
331 /*
332 * Idle routines should keep interrupts disabled
333 * from here on, until they go to idle.
334 * Otherwise, idle callbacks can misfire.
335 */
336 local_touch_nmi();
337 local_irq_disable();
338
339 enter_idle();
340
341 /* Don't trace irqs off for idle */
342 stop_critical_timings();
343
344 /* enter_idle() needs rcu for notifiers */
345 rcu_idle_enter();
346 315
347 if (cpuidle_idle_call()) 316void arch_cpu_idle_enter(void)
348 x86_idle(); 317{
318 local_touch_nmi();
319 enter_idle();
320}
349 321
350 rcu_idle_exit(); 322void arch_cpu_idle_exit(void)
351 start_critical_timings(); 323{
324 __exit_idle();
325}
352 326
353 /* In many cases the interrupt that ended idle 327void arch_cpu_idle_dead(void)
354 has already called exit_idle. But some idle 328{
355 loops can be woken up without interrupt. */ 329 play_dead();
356 __exit_idle(); 330}
357 }
358 331
359 tick_nohz_idle_exit(); 332/*
360 preempt_enable_no_resched(); 333 * Called from the generic idle code.
361 schedule(); 334 */
362 preempt_disable(); 335void arch_cpu_idle(void)
363 } 336{
337 if (cpuidle_idle_call())
338 x86_idle();
364} 339}
365 340
366/* 341/*
367 * We use this if we don't have any better 342 * We use this if we don't have any better idle routine..
368 * idle routine..
369 */ 343 */
370void default_idle(void) 344void default_idle(void)
371{ 345{
372 trace_cpu_idle_rcuidle(1, smp_processor_id()); 346 trace_cpu_idle_rcuidle(1, smp_processor_id());
373 current_thread_info()->status &= ~TS_POLLING; 347 safe_halt();
374 /*
375 * TS_POLLING-cleared state must be visible before we
376 * test NEED_RESCHED:
377 */
378 smp_mb();
379
380 if (!need_resched())
381 safe_halt(); /* enables interrupts racelessly */
382 else
383 local_irq_enable();
384 current_thread_info()->status |= TS_POLLING;
385 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 348 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
386} 349}
387#ifdef CONFIG_APM_MODULE 350#ifdef CONFIG_APM_MODULE
@@ -411,20 +374,6 @@ void stop_this_cpu(void *dummy)
411 halt(); 374 halt();
412} 375}
413 376
414/*
415 * On SMP it's slightly faster (but much more power-consuming!)
416 * to poll the ->work.need_resched flag instead of waiting for the
417 * cross-CPU IPI to arrive. Use this option with caution.
418 */
419static void poll_idle(void)
420{
421 trace_cpu_idle_rcuidle(0, smp_processor_id());
422 local_irq_enable();
423 while (!need_resched())
424 cpu_relax();
425 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
426}
427
428bool amd_e400_c1e_detected; 377bool amd_e400_c1e_detected;
429EXPORT_SYMBOL(amd_e400_c1e_detected); 378EXPORT_SYMBOL(amd_e400_c1e_detected);
430 379
@@ -489,13 +438,13 @@ static void amd_e400_idle(void)
489void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 438void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
490{ 439{
491#ifdef CONFIG_SMP 440#ifdef CONFIG_SMP
492 if (x86_idle == poll_idle && smp_num_siblings > 1) 441 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
493 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 442 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
494#endif 443#endif
495 if (x86_idle) 444 if (x86_idle || boot_option_idle_override == IDLE_POLL)
496 return; 445 return;
497 446
498 if (cpu_has_amd_erratum(amd_erratum_400)) { 447 if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
499 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 448 /* E400: APIC timer interrupt does not wake up CPU from C1e */
500 pr_info("using AMD E400 aware idle routine\n"); 449 pr_info("using AMD E400 aware idle routine\n");
501 x86_idle = amd_e400_idle; 450 x86_idle = amd_e400_idle;
@@ -517,8 +466,8 @@ static int __init idle_setup(char *str)
517 466
518 if (!strcmp(str, "poll")) { 467 if (!strcmp(str, "poll")) {
519 pr_info("using polling idle threads\n"); 468 pr_info("using polling idle threads\n");
520 x86_idle = poll_idle;
521 boot_option_idle_override = IDLE_POLL; 469 boot_option_idle_override = IDLE_POLL;
470 cpu_idle_poll_ctrl(true);
522 } else if (!strcmp(str, "halt")) { 471 } else if (!strcmp(str, "halt")) {
523 /* 472 /*
524 * When the boot option of idle=halt is added, halt is 473 * When the boot option of idle=halt is added, halt is
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 26ee48a33dc4..04ee1e2e4c02 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -354,18 +354,22 @@ static void ati_force_hpet_resume(void)
354 354
355static u32 ati_ixp4x0_rev(struct pci_dev *dev) 355static u32 ati_ixp4x0_rev(struct pci_dev *dev)
356{ 356{
357 u32 d; 357 int err = 0;
358 u8 b; 358 u32 d = 0;
359 u8 b = 0;
359 360
360 pci_read_config_byte(dev, 0xac, &b); 361 err = pci_read_config_byte(dev, 0xac, &b);
361 b &= ~(1<<5); 362 b &= ~(1<<5);
362 pci_write_config_byte(dev, 0xac, b); 363 err |= pci_write_config_byte(dev, 0xac, b);
363 pci_read_config_dword(dev, 0x70, &d); 364 err |= pci_read_config_dword(dev, 0x70, &d);
364 d |= 1<<8; 365 d |= 1<<8;
365 pci_write_config_dword(dev, 0x70, d); 366 err |= pci_write_config_dword(dev, 0x70, d);
366 pci_read_config_dword(dev, 0x8, &d); 367 err |= pci_read_config_dword(dev, 0x8, &d);
367 d &= 0xff; 368 d &= 0xff;
368 dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); 369 dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
370
371 WARN_ON_ONCE(err);
372
369 return d; 373 return d;
370} 374}
371 375
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 2e8f3d3b5641..198eb201ed3b 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -13,6 +13,7 @@
13#include <asm/x86_init.h> 13#include <asm/x86_init.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/mrst.h> 15#include <asm/mrst.h>
16#include <asm/rtc.h>
16 17
17#ifdef CONFIG_X86_32 18#ifdef CONFIG_X86_32
18/* 19/*
@@ -36,70 +37,24 @@ EXPORT_SYMBOL(rtc_lock);
36 * nowtime is written into the registers of the CMOS clock, it will 37 * nowtime is written into the registers of the CMOS clock, it will
37 * jump to the next second precisely 500 ms later. Check the Motorola 38 * jump to the next second precisely 500 ms later. Check the Motorola
38 * MC146818A or Dallas DS12887 data sheet for details. 39 * MC146818A or Dallas DS12887 data sheet for details.
39 *
40 * BUG: This routine does not handle hour overflow properly; it just
41 * sets the minutes. Usually you'll only notice that after reboot!
42 */ 40 */
43int mach_set_rtc_mmss(unsigned long nowtime) 41int mach_set_rtc_mmss(unsigned long nowtime)
44{ 42{
45 int real_seconds, real_minutes, cmos_minutes; 43 struct rtc_time tm;
46 unsigned char save_control, save_freq_select;
47 unsigned long flags;
48 int retval = 0; 44 int retval = 0;
49 45
50 spin_lock_irqsave(&rtc_lock, flags); 46 rtc_time_to_tm(nowtime, &tm);
51 47 if (!rtc_valid_tm(&tm)) {
52 /* tell the clock it's being set */ 48 retval = set_rtc_time(&tm);
53 save_control = CMOS_READ(RTC_CONTROL); 49 if (retval)
54 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); 50 printk(KERN_ERR "%s: RTC write failed with error %d\n",
55 51 __FUNCTION__, retval);
56 /* stop and reset prescaler */
57 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
58 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
59
60 cmos_minutes = CMOS_READ(RTC_MINUTES);
61 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
62 cmos_minutes = bcd2bin(cmos_minutes);
63
64 /*
65 * since we're only adjusting minutes and seconds,
66 * don't interfere with hour overflow. This avoids
67 * messing with unknown time zones but requires your
68 * RTC not to be off by more than 15 minutes
69 */
70 real_seconds = nowtime % 60;
71 real_minutes = nowtime / 60;
72 /* correct for half hour time zone */
73 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
74 real_minutes += 30;
75 real_minutes %= 60;
76
77 if (abs(real_minutes - cmos_minutes) < 30) {
78 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
79 real_seconds = bin2bcd(real_seconds);
80 real_minutes = bin2bcd(real_minutes);
81 }
82 CMOS_WRITE(real_seconds, RTC_SECONDS);
83 CMOS_WRITE(real_minutes, RTC_MINUTES);
84 } else { 52 } else {
85 printk_once(KERN_NOTICE 53 printk(KERN_ERR
86 "set_rtc_mmss: can't update from %d to %d\n", 54 "%s: Invalid RTC value: write of %lx to RTC failed\n",
87 cmos_minutes, real_minutes); 55 __FUNCTION__, nowtime);
88 retval = -1; 56 retval = -EINVAL;
89 } 57 }
90
91 /* The following flags have to be released exactly in this order,
92 * otherwise the DS12887 (popular MC146818A clone with integrated
93 * battery and quartz) will not reset the oscillator and will not
94 * update precisely 500 ms later. You won't find this mentioned in
95 * the Dallas Semiconductor data sheets, but who believes data
96 * sheets anyway ... -- Markus Kuhn
97 */
98 CMOS_WRITE(save_control, RTC_CONTROL);
99 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
100
101 spin_unlock_irqrestore(&rtc_lock, flags);
102
103 return retval; 58 return retval;
104} 59}
105 60
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 90d8cc930f5e..4689855c2f8a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -82,7 +82,6 @@
82#include <asm/timer.h> 82#include <asm/timer.h>
83#include <asm/i8259.h> 83#include <asm/i8259.h>
84#include <asm/sections.h> 84#include <asm/sections.h>
85#include <asm/dmi.h>
86#include <asm/io_apic.h> 85#include <asm/io_apic.h>
87#include <asm/ist.h> 86#include <asm/ist.h>
88#include <asm/setup_arch.h> 87#include <asm/setup_arch.h>
@@ -173,12 +172,10 @@ static struct resource bss_resource = {
173/* cpu data as detected by the assembly code in head.S */ 172/* cpu data as detected by the assembly code in head.S */
174struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 173struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
175 .wp_works_ok = -1, 174 .wp_works_ok = -1,
176 .fdiv_bug = -1,
177}; 175};
178/* common cpu data for all cpus */ 176/* common cpu data for all cpus */
179struct cpuinfo_x86 boot_cpu_data __read_mostly = { 177struct cpuinfo_x86 boot_cpu_data __read_mostly = {
180 .wp_works_ok = -1, 178 .wp_works_ok = -1,
181 .fdiv_bug = -1,
182}; 179};
183EXPORT_SYMBOL(boot_cpu_data); 180EXPORT_SYMBOL(boot_cpu_data);
184 181
@@ -507,11 +504,14 @@ static void __init memblock_x86_reserve_range_setup_data(void)
507/* 504/*
508 * Keep the crash kernel below this limit. On 32 bits earlier kernels 505 * Keep the crash kernel below this limit. On 32 bits earlier kernels
509 * would limit the kernel to the low 512 MiB due to mapping restrictions. 506 * would limit the kernel to the low 512 MiB due to mapping restrictions.
507 * On 64bit, old kexec-tools need to under 896MiB.
510 */ 508 */
511#ifdef CONFIG_X86_32 509#ifdef CONFIG_X86_32
512# define CRASH_KERNEL_ADDR_MAX (512 << 20) 510# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20)
511# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20)
513#else 512#else
514# define CRASH_KERNEL_ADDR_MAX MAXMEM 513# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20)
514# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM
515#endif 515#endif
516 516
517static void __init reserve_crashkernel_low(void) 517static void __init reserve_crashkernel_low(void)
@@ -521,19 +521,35 @@ static void __init reserve_crashkernel_low(void)
521 unsigned long long low_base = 0, low_size = 0; 521 unsigned long long low_base = 0, low_size = 0;
522 unsigned long total_low_mem; 522 unsigned long total_low_mem;
523 unsigned long long base; 523 unsigned long long base;
524 bool auto_set = false;
524 int ret; 525 int ret;
525 526
526 total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); 527 total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
528 /* crashkernel=Y,low */
527 ret = parse_crashkernel_low(boot_command_line, total_low_mem, 529 ret = parse_crashkernel_low(boot_command_line, total_low_mem,
528 &low_size, &base); 530 &low_size, &base);
529 if (ret != 0 || low_size <= 0) 531 if (ret != 0) {
530 return; 532 /*
533 * two parts from lib/swiotlb.c:
534 * swiotlb size: user specified with swiotlb= or default.
535 * swiotlb overflow buffer: now is hardcoded to 32k.
536 * We round it to 8M for other buffers that
537 * may need to stay low too.
538 */
539 low_size = swiotlb_size_or_default() + (8UL<<20);
540 auto_set = true;
541 } else {
542 /* passed with crashkernel=0,low ? */
543 if (!low_size)
544 return;
545 }
531 546
532 low_base = memblock_find_in_range(low_size, (1ULL<<32), 547 low_base = memblock_find_in_range(low_size, (1ULL<<32),
533 low_size, alignment); 548 low_size, alignment);
534 549
535 if (!low_base) { 550 if (!low_base) {
536 pr_info("crashkernel low reservation failed - No suitable area found.\n"); 551 if (!auto_set)
552 pr_info("crashkernel low reservation failed - No suitable area found.\n");
537 553
538 return; 554 return;
539 } 555 }
@@ -554,14 +570,22 @@ static void __init reserve_crashkernel(void)
554 const unsigned long long alignment = 16<<20; /* 16M */ 570 const unsigned long long alignment = 16<<20; /* 16M */
555 unsigned long long total_mem; 571 unsigned long long total_mem;
556 unsigned long long crash_size, crash_base; 572 unsigned long long crash_size, crash_base;
573 bool high = false;
557 int ret; 574 int ret;
558 575
559 total_mem = memblock_phys_mem_size(); 576 total_mem = memblock_phys_mem_size();
560 577
578 /* crashkernel=XM */
561 ret = parse_crashkernel(boot_command_line, total_mem, 579 ret = parse_crashkernel(boot_command_line, total_mem,
562 &crash_size, &crash_base); 580 &crash_size, &crash_base);
563 if (ret != 0 || crash_size <= 0) 581 if (ret != 0 || crash_size <= 0) {
564 return; 582 /* crashkernel=X,high */
583 ret = parse_crashkernel_high(boot_command_line, total_mem,
584 &crash_size, &crash_base);
585 if (ret != 0 || crash_size <= 0)
586 return;
587 high = true;
588 }
565 589
566 /* 0 means: find the address automatically */ 590 /* 0 means: find the address automatically */
567 if (crash_base <= 0) { 591 if (crash_base <= 0) {
@@ -569,7 +593,9 @@ static void __init reserve_crashkernel(void)
569 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX 593 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
570 */ 594 */
571 crash_base = memblock_find_in_range(alignment, 595 crash_base = memblock_find_in_range(alignment,
572 CRASH_KERNEL_ADDR_MAX, crash_size, alignment); 596 high ? CRASH_KERNEL_ADDR_HIGH_MAX :
597 CRASH_KERNEL_ADDR_LOW_MAX,
598 crash_size, alignment);
573 599
574 if (!crash_base) { 600 if (!crash_base) {
575 pr_info("crashkernel reservation failed - No suitable area found.\n"); 601 pr_info("crashkernel reservation failed - No suitable area found.\n");
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9f190a2a00e9..9c73b51817e4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -284,7 +284,7 @@ notrace static void __cpuinit start_secondary(void *unused)
284 x86_cpuinit.setup_percpu_clockev(); 284 x86_cpuinit.setup_percpu_clockev();
285 285
286 wmb(); 286 wmb();
287 cpu_idle(); 287 cpu_startup_entry(CPUHP_ONLINE);
288} 288}
289 289
290void __init smp_store_boot_cpu_info(void) 290void __init smp_store_boot_cpu_info(void)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 68bda7a84159..772e2a846dec 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -12,6 +12,7 @@
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 14
15#include <linux/context_tracking.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
16#include <linux/kallsyms.h> 17#include <linux/kallsyms.h>
17#include <linux/spinlock.h> 18#include <linux/spinlock.h>
@@ -55,8 +56,7 @@
55#include <asm/i387.h> 56#include <asm/i387.h>
56#include <asm/fpu-internal.h> 57#include <asm/fpu-internal.h>
57#include <asm/mce.h> 58#include <asm/mce.h>
58#include <asm/context_tracking.h> 59#include <asm/fixmap.h>
59
60#include <asm/mach_traps.h> 60#include <asm/mach_traps.h>
61 61
62#ifdef CONFIG_X86_64 62#ifdef CONFIG_X86_64
@@ -176,34 +176,38 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
176#define DO_ERROR(trapnr, signr, str, name) \ 176#define DO_ERROR(trapnr, signr, str, name) \
177dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 177dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
178{ \ 178{ \
179 exception_enter(regs); \ 179 enum ctx_state prev_state; \
180 \
181 prev_state = exception_enter(); \
180 if (notify_die(DIE_TRAP, str, regs, error_code, \ 182 if (notify_die(DIE_TRAP, str, regs, error_code, \
181 trapnr, signr) == NOTIFY_STOP) { \ 183 trapnr, signr) == NOTIFY_STOP) { \
182 exception_exit(regs); \ 184 exception_exit(prev_state); \
183 return; \ 185 return; \
184 } \ 186 } \
185 conditional_sti(regs); \ 187 conditional_sti(regs); \
186 do_trap(trapnr, signr, str, regs, error_code, NULL); \ 188 do_trap(trapnr, signr, str, regs, error_code, NULL); \
187 exception_exit(regs); \ 189 exception_exit(prev_state); \
188} 190}
189 191
190#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 192#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
191dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ 193dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
192{ \ 194{ \
193 siginfo_t info; \ 195 siginfo_t info; \
196 enum ctx_state prev_state; \
197 \
194 info.si_signo = signr; \ 198 info.si_signo = signr; \
195 info.si_errno = 0; \ 199 info.si_errno = 0; \
196 info.si_code = sicode; \ 200 info.si_code = sicode; \
197 info.si_addr = (void __user *)siaddr; \ 201 info.si_addr = (void __user *)siaddr; \
198 exception_enter(regs); \ 202 prev_state = exception_enter(); \
199 if (notify_die(DIE_TRAP, str, regs, error_code, \ 203 if (notify_die(DIE_TRAP, str, regs, error_code, \
200 trapnr, signr) == NOTIFY_STOP) { \ 204 trapnr, signr) == NOTIFY_STOP) { \
201 exception_exit(regs); \ 205 exception_exit(prev_state); \
202 return; \ 206 return; \
203 } \ 207 } \
204 conditional_sti(regs); \ 208 conditional_sti(regs); \
205 do_trap(trapnr, signr, str, regs, error_code, &info); \ 209 do_trap(trapnr, signr, str, regs, error_code, &info); \
206 exception_exit(regs); \ 210 exception_exit(prev_state); \
207} 211}
208 212
209DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, 213DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
@@ -226,14 +230,16 @@ DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
226/* Runs on IST stack */ 230/* Runs on IST stack */
227dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 231dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
228{ 232{
229 exception_enter(regs); 233 enum ctx_state prev_state;
234
235 prev_state = exception_enter();
230 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 236 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
231 X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { 237 X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
232 preempt_conditional_sti(regs); 238 preempt_conditional_sti(regs);
233 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); 239 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
234 preempt_conditional_cli(regs); 240 preempt_conditional_cli(regs);
235 } 241 }
236 exception_exit(regs); 242 exception_exit(prev_state);
237} 243}
238 244
239dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 245dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
@@ -241,7 +247,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
241 static const char str[] = "double fault"; 247 static const char str[] = "double fault";
242 struct task_struct *tsk = current; 248 struct task_struct *tsk = current;
243 249
244 exception_enter(regs); 250 exception_enter();
245 /* Return not checked because double check cannot be ignored */ 251 /* Return not checked because double check cannot be ignored */
246 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 252 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
247 253
@@ -261,8 +267,9 @@ dotraplinkage void __kprobes
261do_general_protection(struct pt_regs *regs, long error_code) 267do_general_protection(struct pt_regs *regs, long error_code)
262{ 268{
263 struct task_struct *tsk; 269 struct task_struct *tsk;
270 enum ctx_state prev_state;
264 271
265 exception_enter(regs); 272 prev_state = exception_enter();
266 conditional_sti(regs); 273 conditional_sti(regs);
267 274
268#ifdef CONFIG_X86_32 275#ifdef CONFIG_X86_32
@@ -300,12 +307,14 @@ do_general_protection(struct pt_regs *regs, long error_code)
300 307
301 force_sig(SIGSEGV, tsk); 308 force_sig(SIGSEGV, tsk);
302exit: 309exit:
303 exception_exit(regs); 310 exception_exit(prev_state);
304} 311}
305 312
306/* May run on IST stack. */ 313/* May run on IST stack. */
307dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) 314dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
308{ 315{
316 enum ctx_state prev_state;
317
309#ifdef CONFIG_DYNAMIC_FTRACE 318#ifdef CONFIG_DYNAMIC_FTRACE
310 /* 319 /*
311 * ftrace must be first, everything else may cause a recursive crash. 320 * ftrace must be first, everything else may cause a recursive crash.
@@ -315,7 +324,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
315 ftrace_int3_handler(regs)) 324 ftrace_int3_handler(regs))
316 return; 325 return;
317#endif 326#endif
318 exception_enter(regs); 327 prev_state = exception_enter();
319#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 328#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
320 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 329 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
321 SIGTRAP) == NOTIFY_STOP) 330 SIGTRAP) == NOTIFY_STOP)
@@ -336,7 +345,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
336 preempt_conditional_cli(regs); 345 preempt_conditional_cli(regs);
337 debug_stack_usage_dec(); 346 debug_stack_usage_dec();
338exit: 347exit:
339 exception_exit(regs); 348 exception_exit(prev_state);
340} 349}
341 350
342#ifdef CONFIG_X86_64 351#ifdef CONFIG_X86_64
@@ -393,11 +402,12 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
393dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) 402dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
394{ 403{
395 struct task_struct *tsk = current; 404 struct task_struct *tsk = current;
405 enum ctx_state prev_state;
396 int user_icebp = 0; 406 int user_icebp = 0;
397 unsigned long dr6; 407 unsigned long dr6;
398 int si_code; 408 int si_code;
399 409
400 exception_enter(regs); 410 prev_state = exception_enter();
401 411
402 get_debugreg(dr6, 6); 412 get_debugreg(dr6, 6);
403 413
@@ -467,7 +477,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
467 debug_stack_usage_dec(); 477 debug_stack_usage_dec();
468 478
469exit: 479exit:
470 exception_exit(regs); 480 exception_exit(prev_state);
471} 481}
472 482
473/* 483/*
@@ -561,17 +571,21 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
561 571
562dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) 572dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
563{ 573{
564 exception_enter(regs); 574 enum ctx_state prev_state;
575
576 prev_state = exception_enter();
565 math_error(regs, error_code, X86_TRAP_MF); 577 math_error(regs, error_code, X86_TRAP_MF);
566 exception_exit(regs); 578 exception_exit(prev_state);
567} 579}
568 580
569dotraplinkage void 581dotraplinkage void
570do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 582do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
571{ 583{
572 exception_enter(regs); 584 enum ctx_state prev_state;
585
586 prev_state = exception_enter();
573 math_error(regs, error_code, X86_TRAP_XF); 587 math_error(regs, error_code, X86_TRAP_XF);
574 exception_exit(regs); 588 exception_exit(prev_state);
575} 589}
576 590
577dotraplinkage void 591dotraplinkage void
@@ -639,7 +653,9 @@ EXPORT_SYMBOL_GPL(math_state_restore);
639dotraplinkage void __kprobes 653dotraplinkage void __kprobes
640do_device_not_available(struct pt_regs *regs, long error_code) 654do_device_not_available(struct pt_regs *regs, long error_code)
641{ 655{
642 exception_enter(regs); 656 enum ctx_state prev_state;
657
658 prev_state = exception_enter();
643 BUG_ON(use_eager_fpu()); 659 BUG_ON(use_eager_fpu());
644 660
645#ifdef CONFIG_MATH_EMULATION 661#ifdef CONFIG_MATH_EMULATION
@@ -650,7 +666,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
650 666
651 info.regs = regs; 667 info.regs = regs;
652 math_emulate(&info); 668 math_emulate(&info);
653 exception_exit(regs); 669 exception_exit(prev_state);
654 return; 670 return;
655 } 671 }
656#endif 672#endif
@@ -658,15 +674,16 @@ do_device_not_available(struct pt_regs *regs, long error_code)
658#ifdef CONFIG_X86_32 674#ifdef CONFIG_X86_32
659 conditional_sti(regs); 675 conditional_sti(regs);
660#endif 676#endif
661 exception_exit(regs); 677 exception_exit(prev_state);
662} 678}
663 679
664#ifdef CONFIG_X86_32 680#ifdef CONFIG_X86_32
665dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) 681dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
666{ 682{
667 siginfo_t info; 683 siginfo_t info;
684 enum ctx_state prev_state;
668 685
669 exception_enter(regs); 686 prev_state = exception_enter();
670 local_irq_enable(); 687 local_irq_enable();
671 688
672 info.si_signo = SIGILL; 689 info.si_signo = SIGILL;
@@ -678,7 +695,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
678 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 695 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
679 &info); 696 &info);
680 } 697 }
681 exception_exit(regs); 698 exception_exit(prev_state);
682} 699}
683#endif 700#endif
684 701
@@ -753,6 +770,14 @@ void __init trap_init(void)
753#endif 770#endif
754 771
755 /* 772 /*
773 * Set the IDT descriptor to a fixed read-only location, so that the
774 * "sidt" instruction will not leak the location of the kernel, and
775 * to defend the IDT against arbitrary memory write vulnerabilities.
776 * It will be reloaded in cpu_init() */
777 __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
778 idt_descr.address = fix_to_virt(FIX_RO_IDT);
779
780 /*
756 * Should be a barrier for any external CPU state: 781 * Should be a barrier for any external CPU state:
757 */ 782 */
758 cpu_init(); 783 cpu_init();
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 4b9ea101fe3b..098b3cfda72e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -768,7 +768,8 @@ static cycle_t read_tsc(struct clocksource *cs)
768 768
769static void resume_tsc(struct clocksource *cs) 769static void resume_tsc(struct clocksource *cs)
770{ 770{
771 clocksource_tsc.cycle_last = 0; 771 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
772 clocksource_tsc.cycle_last = 0;
772} 773}
773 774
774static struct clocksource clocksource_tsc = { 775static struct clocksource clocksource_tsc = {
@@ -939,6 +940,9 @@ static int __init init_tsc_clocksource(void)
939 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 940 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
940 } 941 }
941 942
943 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
944 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
945
942 /* 946 /*
943 * Trust the results of the earlier calibration on systems 947 * Trust the results of the earlier calibration on systems
944 * exporting a reliable TSC. 948 * exporting a reliable TSC.
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 0ba4cfb4f412..2ed845928b5f 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -697,3 +697,32 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
697 send_sig(SIGTRAP, current, 0); 697 send_sig(SIGTRAP, current, 0);
698 return ret; 698 return ret;
699} 699}
700
701unsigned long
702arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
703{
704 int rasize, ncopied;
705 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
706
707 rasize = is_ia32_task() ? 4 : 8;
708 ncopied = copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize);
709 if (unlikely(ncopied))
710 return -1;
711
712 /* check whether address has been already hijacked */
713 if (orig_ret_vaddr == trampoline_vaddr)
714 return orig_ret_vaddr;
715
716 ncopied = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
717 if (likely(!ncopied))
718 return orig_ret_vaddr;
719
720 if (ncopied != rasize) {
721 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
722 "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
723
724 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
725 }
726
727 return -1;
728}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 22a1530146a8..10c4f3006afd 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -94,10 +94,6 @@ SECTIONS
94 _text = .; 94 _text = .;
95 /* bootstrapping code */ 95 /* bootstrapping code */
96 HEAD_TEXT 96 HEAD_TEXT
97#ifdef CONFIG_X86_32
98 . = ALIGN(PAGE_SIZE);
99 *(.text..page_aligned)
100#endif
101 . = ALIGN(8); 97 . = ALIGN(8);
102 _stext = .; 98 _stext = .;
103 TEXT_TEXT 99 TEXT_TEXT
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 02b51dd4e4ad..f77df1c5de6e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
1857 if (!pv_eoi_enabled(vcpu)) 1857 if (!pv_eoi_enabled(vcpu))
1858 return 0; 1858 return 0;
1859 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, 1859 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
1860 addr); 1860 addr, sizeof(u8));
1861} 1861}
1862 1862
1863void kvm_lapic_init(void) 1863void kvm_lapic_init(void)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e1b1ce21bc00..7d39d70647e3 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -555,7 +555,7 @@ static void svm_init_erratum_383(void)
555 int err; 555 int err;
556 u64 val; 556 u64 val;
557 557
558 if (!cpu_has_amd_erratum(amd_erratum_383)) 558 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
559 return; 559 return;
560 560
561 /* Use _safe variants to not break nested virtualization */ 561 /* Use _safe variants to not break nested virtualization */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6667042714cc..867b81037f96 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2459,7 +2459,7 @@ static int hardware_enable(void *garbage)
2459 ept_sync_global(); 2459 ept_sync_global();
2460 } 2460 }
2461 2461
2462 store_gdt(&__get_cpu_var(host_gdt)); 2462 native_store_gdt(&__get_cpu_var(host_gdt));
2463 2463
2464 return 0; 2464 return 0;
2465} 2465}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f71500af1f81..e1721324c271 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1406 unsigned long flags, this_tsc_khz; 1406 unsigned long flags, this_tsc_khz;
1407 struct kvm_vcpu_arch *vcpu = &v->arch; 1407 struct kvm_vcpu_arch *vcpu = &v->arch;
1408 struct kvm_arch *ka = &v->kvm->arch; 1408 struct kvm_arch *ka = &v->kvm->arch;
1409 void *shared_kaddr;
1410 s64 kernel_ns, max_kernel_ns; 1409 s64 kernel_ns, max_kernel_ns;
1411 u64 tsc_timestamp, host_tsc; 1410 u64 tsc_timestamp, host_tsc;
1412 struct pvclock_vcpu_time_info *guest_hv_clock; 1411 struct pvclock_vcpu_time_info guest_hv_clock;
1413 u8 pvclock_flags; 1412 u8 pvclock_flags;
1414 bool use_master_clock; 1413 bool use_master_clock;
1415 1414
1416 kernel_ns = 0; 1415 kernel_ns = 0;
1417 host_tsc = 0; 1416 host_tsc = 0;
1418 1417
1419 /* Keep irq disabled to prevent changes to the clock */
1420 local_irq_save(flags);
1421 this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1422 if (unlikely(this_tsc_khz == 0)) {
1423 local_irq_restore(flags);
1424 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1425 return 1;
1426 }
1427
1428 /* 1418 /*
1429 * If the host uses TSC clock, then passthrough TSC as stable 1419 * If the host uses TSC clock, then passthrough TSC as stable
1430 * to the guest. 1420 * to the guest.
@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1436 kernel_ns = ka->master_kernel_ns; 1426 kernel_ns = ka->master_kernel_ns;
1437 } 1427 }
1438 spin_unlock(&ka->pvclock_gtod_sync_lock); 1428 spin_unlock(&ka->pvclock_gtod_sync_lock);
1429
1430 /* Keep irq disabled to prevent changes to the clock */
1431 local_irq_save(flags);
1432 this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1433 if (unlikely(this_tsc_khz == 0)) {
1434 local_irq_restore(flags);
1435 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1436 return 1;
1437 }
1439 if (!use_master_clock) { 1438 if (!use_master_clock) {
1440 host_tsc = native_read_tsc(); 1439 host_tsc = native_read_tsc();
1441 kernel_ns = get_kernel_ns(); 1440 kernel_ns = get_kernel_ns();
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1463 1462
1464 local_irq_restore(flags); 1463 local_irq_restore(flags);
1465 1464
1466 if (!vcpu->time_page) 1465 if (!vcpu->pv_time_enabled)
1467 return 0; 1466 return 0;
1468 1467
1469 /* 1468 /*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1525 */ 1524 */
1526 vcpu->hv_clock.version += 2; 1525 vcpu->hv_clock.version += 2;
1527 1526
1528 shared_kaddr = kmap_atomic(vcpu->time_page); 1527 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1529 1528 &guest_hv_clock, sizeof(guest_hv_clock))))
1530 guest_hv_clock = shared_kaddr + vcpu->time_offset; 1529 return 0;
1531 1530
1532 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ 1531 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1533 pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); 1532 pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
1534 1533
1535 if (vcpu->pvclock_set_guest_stopped_request) { 1534 if (vcpu->pvclock_set_guest_stopped_request) {
1536 pvclock_flags |= PVCLOCK_GUEST_STOPPED; 1535 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1543 1542
1544 vcpu->hv_clock.flags = pvclock_flags; 1543 vcpu->hv_clock.flags = pvclock_flags;
1545 1544
1546 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, 1545 kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1547 sizeof(vcpu->hv_clock)); 1546 &vcpu->hv_clock,
1548 1547 sizeof(vcpu->hv_clock));
1549 kunmap_atomic(shared_kaddr);
1550
1551 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
1552 return 0; 1548 return 0;
1553} 1549}
1554 1550
@@ -1827,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1827 return 0; 1823 return 0;
1828 } 1824 }
1829 1825
1830 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) 1826 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
1827 sizeof(u32)))
1831 return 1; 1828 return 1;
1832 1829
1833 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); 1830 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -1837,10 +1834,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1837 1834
1838static void kvmclock_reset(struct kvm_vcpu *vcpu) 1835static void kvmclock_reset(struct kvm_vcpu *vcpu)
1839{ 1836{
1840 if (vcpu->arch.time_page) { 1837 vcpu->arch.pv_time_enabled = false;
1841 kvm_release_page_dirty(vcpu->arch.time_page);
1842 vcpu->arch.time_page = NULL;
1843 }
1844} 1838}
1845 1839
1846static void accumulate_steal_time(struct kvm_vcpu *vcpu) 1840static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1941,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1947 break; 1941 break;
1948 case MSR_KVM_SYSTEM_TIME_NEW: 1942 case MSR_KVM_SYSTEM_TIME_NEW:
1949 case MSR_KVM_SYSTEM_TIME: { 1943 case MSR_KVM_SYSTEM_TIME: {
1944 u64 gpa_offset;
1950 kvmclock_reset(vcpu); 1945 kvmclock_reset(vcpu);
1951 1946
1952 vcpu->arch.time = data; 1947 vcpu->arch.time = data;
@@ -1956,14 +1951,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1956 if (!(data & 1)) 1951 if (!(data & 1))
1957 break; 1952 break;
1958 1953
1959 /* ...but clean it before doing the actual write */ 1954 gpa_offset = data & ~(PAGE_MASK | 1);
1960 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1961
1962 vcpu->arch.time_page =
1963 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
1964 1955
1965 if (is_error_page(vcpu->arch.time_page)) 1956 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1966 vcpu->arch.time_page = NULL; 1957 &vcpu->arch.pv_time, data & ~1ULL,
1958 sizeof(struct pvclock_vcpu_time_info)))
1959 vcpu->arch.pv_time_enabled = false;
1960 else
1961 vcpu->arch.pv_time_enabled = true;
1967 1962
1968 break; 1963 break;
1969 } 1964 }
@@ -1980,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1980 return 1; 1975 return 1;
1981 1976
1982 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, 1977 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
1983 data & KVM_STEAL_VALID_BITS)) 1978 data & KVM_STEAL_VALID_BITS,
1979 sizeof(struct kvm_steal_time)))
1984 return 1; 1980 return 1;
1985 1981
1986 vcpu->arch.st.msr_val = data; 1982 vcpu->arch.st.msr_val = data;
@@ -2967,7 +2963,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2967 */ 2963 */
2968static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) 2964static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
2969{ 2965{
2970 if (!vcpu->arch.time_page) 2966 if (!vcpu->arch.pv_time_enabled)
2971 return -EINVAL; 2967 return -EINVAL;
2972 vcpu->arch.pvclock_set_guest_stopped_request = true; 2968 vcpu->arch.pvclock_set_guest_stopped_request = true;
2973 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 2969 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6718,6 +6714,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6718 goto fail_free_wbinvd_dirty_mask; 6714 goto fail_free_wbinvd_dirty_mask;
6719 6715
6720 vcpu->arch.ia32_tsc_adjust_msr = 0x0; 6716 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
6717 vcpu->arch.pv_time_enabled = false;
6721 kvm_async_pf_hash_reset(vcpu); 6718 kvm_async_pf_hash_reset(vcpu);
6722 kvm_pmu_init(vcpu); 6719 kvm_pmu_init(vcpu);
6723 6720
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 29043d2048a0..4a0890f815c4 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -1,7 +1,6 @@
1config LGUEST_GUEST 1config LGUEST_GUEST
2 bool "Lguest guest support" 2 bool "Lguest guest support"
3 select PARAVIRT 3 depends on X86_32 && PARAVIRT
4 depends on X86_32
5 select TTY 4 select TTY
6 select VIRTUALIZATION 5 select VIRTUALIZATION
7 select VIRTIO 6 select VIRTIO
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 1cbd89ca5569..7114c63f047d 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1334,6 +1334,7 @@ __init void lguest_init(void)
1334 pv_mmu_ops.read_cr3 = lguest_read_cr3; 1334 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1335 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; 1335 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1336 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; 1336 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
1337 pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
1337 pv_mmu_ops.pte_update = lguest_pte_update; 1338 pv_mmu_ops.pte_update = lguest_pte_update;
1338 pv_mmu_ops.pte_update_defer = lguest_pte_update; 1339 pv_mmu_ops.pte_update_defer = lguest_pte_update;
1339 1340
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 2af5df3ade7c..e78b8eee6615 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -61,7 +61,7 @@ ENTRY(csum_partial)
61 testl $3, %esi # Check alignment. 61 testl $3, %esi # Check alignment.
62 jz 2f # Jump if alignment is ok. 62 jz 2f # Jump if alignment is ok.
63 testl $1, %esi # Check alignment. 63 testl $1, %esi # Check alignment.
64 jz 10f # Jump if alignment is boundary of 2bytes. 64 jz 10f # Jump if alignment is boundary of 2 bytes.
65 65
66 # buf is odd 66 # buf is odd
67 dec %ecx 67 dec %ecx
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index b908a59eccf5..e78761d6b7f8 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -26,7 +26,7 @@ void *memmove(void *dest, const void *src, size_t n)
26 char *ret = dest; 26 char *ret = dest;
27 27
28 __asm__ __volatile__( 28 __asm__ __volatile__(
29 /* Handle more 16bytes in loop */ 29 /* Handle more 16 bytes in loop */
30 "cmp $0x10, %0\n\t" 30 "cmp $0x10, %0\n\t"
31 "jb 1f\n\t" 31 "jb 1f\n\t"
32 32
@@ -51,7 +51,7 @@ void *memmove(void *dest, const void *src, size_t n)
51 "sub $0x10, %0\n\t" 51 "sub $0x10, %0\n\t"
52 52
53 /* 53 /*
54 * We gobble 16byts forward in each loop. 54 * We gobble 16 bytes forward in each loop.
55 */ 55 */
56 "3:\n\t" 56 "3:\n\t"
57 "sub $0x10, %0\n\t" 57 "sub $0x10, %0\n\t"
@@ -117,7 +117,7 @@ void *memmove(void *dest, const void *src, size_t n)
117 "sub $0x10, %0\n\t" 117 "sub $0x10, %0\n\t"
118 118
119 /* 119 /*
120 * We gobble 16byts backward in each loop. 120 * We gobble 16 bytes backward in each loop.
121 */ 121 */
122 "7:\n\t" 122 "7:\n\t"
123 "sub $0x10, %0\n\t" 123 "sub $0x10, %0\n\t"
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 1c273be7c97e..56313a326188 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -98,7 +98,7 @@ ENTRY(memcpy)
98 subq $0x20, %rdx 98 subq $0x20, %rdx
99 /* 99 /*
100 * At most 3 ALU operations in one cycle, 100 * At most 3 ALU operations in one cycle,
101 * so append NOPS in the same 16bytes trunk. 101 * so append NOPS in the same 16 bytes trunk.
102 */ 102 */
103 .p2align 4 103 .p2align 4
104.Lcopy_backward_loop: 104.Lcopy_backward_loop:
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index ee164610ec46..65268a6104f4 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -27,7 +27,7 @@
27ENTRY(memmove) 27ENTRY(memmove)
28 CFI_STARTPROC 28 CFI_STARTPROC
29 29
30 /* Handle more 32bytes in loop */ 30 /* Handle more 32 bytes in loop */
31 mov %rdi, %rax 31 mov %rdi, %rax
32 cmp $0x20, %rdx 32 cmp $0x20, %rdx
33 jb 1f 33 jb 1f
@@ -56,7 +56,7 @@ ENTRY(memmove)
563: 563:
57 sub $0x20, %rdx 57 sub $0x20, %rdx
58 /* 58 /*
59 * We gobble 32byts forward in each loop. 59 * We gobble 32 bytes forward in each loop.
60 */ 60 */
615: 615:
62 sub $0x20, %rdx 62 sub $0x20, %rdx
@@ -122,7 +122,7 @@ ENTRY(memmove)
122 addq %rdx, %rdi 122 addq %rdx, %rdi
123 subq $0x20, %rdx 123 subq $0x20, %rdx
124 /* 124 /*
125 * We gobble 32byts backward in each loop. 125 * We gobble 32 bytes backward in each loop.
126 */ 126 */
1278: 1278:
128 subq $0x20, %rdx 128 subq $0x20, %rdx
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 05928aae911e..906fea315791 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
74 char c; 74 char c;
75 unsigned zero_len; 75 unsigned zero_len;
76 76
77 for (; len; --len) { 77 for (; len; --len, to++) {
78 if (__get_user_nocheck(c, from++, sizeof(char))) 78 if (__get_user_nocheck(c, from++, sizeof(char)))
79 break; 79 break;
80 if (__put_user_nocheck(c, to++, sizeof(char))) 80 if (__put_user_nocheck(c, to, sizeof(char)))
81 break; 81 break;
82 } 82 }
83 83
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 5247d01329ca..2ca15b59fb3f 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -130,9 +130,8 @@ int __init amd_numa_init(void)
130 } 130 }
131 131
132 limit >>= 16; 132 limit >>= 16;
133 limit <<= 24;
134 limit |= (1<<24)-1;
135 limit++; 133 limit++;
134 limit <<= 24;
136 135
137 if (limit > end) 136 if (limit > end)
138 limit = end; 137 limit = end;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2b97525246d4..654be4ae3047 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -13,12 +13,12 @@
13#include <linux/perf_event.h> /* perf_sw_event */ 13#include <linux/perf_event.h> /* perf_sw_event */
14#include <linux/hugetlb.h> /* hstate_index_to_shift */ 14#include <linux/hugetlb.h> /* hstate_index_to_shift */
15#include <linux/prefetch.h> /* prefetchw */ 15#include <linux/prefetch.h> /* prefetchw */
16#include <linux/context_tracking.h> /* exception_enter(), ... */
16 17
17#include <asm/traps.h> /* dotraplinkage, ... */ 18#include <asm/traps.h> /* dotraplinkage, ... */
18#include <asm/pgalloc.h> /* pgd_*(), ... */ 19#include <asm/pgalloc.h> /* pgd_*(), ... */
19#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 20#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20#include <asm/fixmap.h> /* VSYSCALL_START */ 21#include <asm/fixmap.h> /* VSYSCALL_START */
21#include <asm/context_tracking.h> /* exception_enter(), ... */
22 22
23/* 23/*
24 * Page fault error code bits: 24 * Page fault error code bits:
@@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
378 if (pgd_none(*pgd_ref)) 378 if (pgd_none(*pgd_ref))
379 return -1; 379 return -1;
380 380
381 if (pgd_none(*pgd)) 381 if (pgd_none(*pgd)) {
382 set_pgd(pgd, *pgd_ref); 382 set_pgd(pgd, *pgd_ref);
383 else 383 arch_flush_lazy_mmu_mode();
384 } else {
384 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 385 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
386 }
385 387
386 /* 388 /*
387 * Below here mismatches are bugs because these lower tables 389 * Below here mismatches are bugs because these lower tables
@@ -555,7 +557,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
555 /* 557 /*
556 * Pentium F0 0F C7 C8 bug workaround: 558 * Pentium F0 0F C7 C8 bug workaround:
557 */ 559 */
558 if (boot_cpu_data.f00f_bug) { 560 if (boot_cpu_has_bug(X86_BUG_F00F)) {
559 nr = (address - idt_descr.address) >> 3; 561 nr = (address - idt_descr.address) >> 3;
560 562
561 if (nr == 6) { 563 if (nr == 6) {
@@ -1222,7 +1224,9 @@ good_area:
1222dotraplinkage void __kprobes 1224dotraplinkage void __kprobes
1223do_page_fault(struct pt_regs *regs, unsigned long error_code) 1225do_page_fault(struct pt_regs *regs, unsigned long error_code)
1224{ 1226{
1225 exception_enter(regs); 1227 enum ctx_state prev_state;
1228
1229 prev_state = exception_enter();
1226 __do_page_fault(regs, error_code); 1230 __do_page_fault(regs, error_code);
1227 exception_exit(regs); 1231 exception_exit(prev_state);
1228} 1232}
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 6f31ee56c008..252b8f5489ba 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -137,5 +137,4 @@ void __init set_highmem_pages_init(void)
137 add_highpages_with_active_regions(nid, zone_start_pfn, 137 add_highpages_with_active_regions(nid, zone_start_pfn,
138 zone_end_pfn); 138 zone_end_pfn);
139 } 139 }
140 totalram_pages += totalhigh_pages;
141} 140}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 59b7fc453277..fdc5dca14fb3 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -515,11 +515,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
515 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); 515 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
516 516
517 for (; addr < end; addr += PAGE_SIZE) { 517 for (; addr < end; addr += PAGE_SIZE) {
518 ClearPageReserved(virt_to_page(addr));
519 init_page_count(virt_to_page(addr));
520 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 518 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
521 free_page(addr); 519 free_reserved_page(virt_to_page(addr));
522 totalram_pages++;
523 } 520 }
524#endif 521#endif
525} 522}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 2d19001151d5..3ac7e319918d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -427,14 +427,6 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
427 pkmap_page_table = pte; 427 pkmap_page_table = pte;
428} 428}
429 429
430static void __init add_one_highpage_init(struct page *page)
431{
432 ClearPageReserved(page);
433 init_page_count(page);
434 __free_page(page);
435 totalhigh_pages++;
436}
437
438void __init add_highpages_with_active_regions(int nid, 430void __init add_highpages_with_active_regions(int nid,
439 unsigned long start_pfn, unsigned long end_pfn) 431 unsigned long start_pfn, unsigned long end_pfn)
440{ 432{
@@ -448,7 +440,7 @@ void __init add_highpages_with_active_regions(int nid,
448 start_pfn, end_pfn); 440 start_pfn, end_pfn);
449 for ( ; pfn < e_pfn; pfn++) 441 for ( ; pfn < e_pfn; pfn++)
450 if (pfn_valid(pfn)) 442 if (pfn_valid(pfn))
451 add_one_highpage_init(pfn_to_page(pfn)); 443 free_highmem_page(pfn_to_page(pfn));
452 } 444 }
453} 445}
454#else 446#else
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 474e28f10815..caad9a0ee19f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1011,14 +1011,12 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
1011 flush_tlb_all(); 1011 flush_tlb_all();
1012} 1012}
1013 1013
1014void __ref vmemmap_free(struct page *memmap, unsigned long nr_pages) 1014void __ref vmemmap_free(unsigned long start, unsigned long end)
1015{ 1015{
1016 unsigned long start = (unsigned long)memmap;
1017 unsigned long end = (unsigned long)(memmap + nr_pages);
1018
1019 remove_pagetable(start, end, false); 1016 remove_pagetable(start, end, false);
1020} 1017}
1021 1018
1019#ifdef CONFIG_MEMORY_HOTREMOVE
1022static void __meminit 1020static void __meminit
1023kernel_physical_mapping_remove(unsigned long start, unsigned long end) 1021kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1024{ 1022{
@@ -1028,7 +1026,6 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1028 remove_pagetable(start, end, true); 1026 remove_pagetable(start, end, true);
1029} 1027}
1030 1028
1031#ifdef CONFIG_MEMORY_HOTREMOVE
1032int __ref arch_remove_memory(u64 start, u64 size) 1029int __ref arch_remove_memory(u64 start, u64 size)
1033{ 1030{
1034 unsigned long start_pfn = start >> PAGE_SHIFT; 1031 unsigned long start_pfn = start >> PAGE_SHIFT;
@@ -1067,10 +1064,9 @@ void __init mem_init(void)
1067 1064
1068 /* clear_bss() already clear the empty_zero_page */ 1065 /* clear_bss() already clear the empty_zero_page */
1069 1066
1070 reservedpages = 0;
1071
1072 /* this will put all low memory onto the freelists */
1073 register_page_bootmem_info(); 1067 register_page_bootmem_info();
1068
1069 /* this will put all memory onto the freelists */
1074 totalram_pages = free_all_bootmem(); 1070 totalram_pages = free_all_bootmem();
1075 1071
1076 absent_pages = absent_pages_in_range(0, max_pfn); 1072 absent_pages = absent_pages_in_range(0, max_pfn);
@@ -1285,18 +1281,17 @@ static long __meminitdata addr_start, addr_end;
1285static void __meminitdata *p_start, *p_end; 1281static void __meminitdata *p_start, *p_end;
1286static int __meminitdata node_start; 1282static int __meminitdata node_start;
1287 1283
1288int __meminit 1284static int __meminit vmemmap_populate_hugepages(unsigned long start,
1289vmemmap_populate(struct page *start_page, unsigned long size, int node) 1285 unsigned long end, int node)
1290{ 1286{
1291 unsigned long addr = (unsigned long)start_page; 1287 unsigned long addr;
1292 unsigned long end = (unsigned long)(start_page + size);
1293 unsigned long next; 1288 unsigned long next;
1294 pgd_t *pgd; 1289 pgd_t *pgd;
1295 pud_t *pud; 1290 pud_t *pud;
1296 pmd_t *pmd; 1291 pmd_t *pmd;
1297 1292
1298 for (; addr < end; addr = next) { 1293 for (addr = start; addr < end; addr = next) {
1299 void *p = NULL; 1294 next = pmd_addr_end(addr, end);
1300 1295
1301 pgd = vmemmap_pgd_populate(addr, node); 1296 pgd = vmemmap_pgd_populate(addr, node);
1302 if (!pgd) 1297 if (!pgd)
@@ -1306,31 +1301,14 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1306 if (!pud) 1301 if (!pud)
1307 return -ENOMEM; 1302 return -ENOMEM;
1308 1303
1309 if (!cpu_has_pse) { 1304 pmd = pmd_offset(pud, addr);
1310 next = (addr + PAGE_SIZE) & PAGE_MASK; 1305 if (pmd_none(*pmd)) {
1311 pmd = vmemmap_pmd_populate(pud, addr, node); 1306 void *p;
1312
1313 if (!pmd)
1314 return -ENOMEM;
1315
1316 p = vmemmap_pte_populate(pmd, addr, node);
1317 1307
1318 if (!p) 1308 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1319 return -ENOMEM; 1309 if (p) {
1320
1321 addr_end = addr + PAGE_SIZE;
1322 p_end = p + PAGE_SIZE;
1323 } else {
1324 next = pmd_addr_end(addr, end);
1325
1326 pmd = pmd_offset(pud, addr);
1327 if (pmd_none(*pmd)) {
1328 pte_t entry; 1310 pte_t entry;
1329 1311
1330 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1331 if (!p)
1332 return -ENOMEM;
1333
1334 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, 1312 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1335 PAGE_KERNEL_LARGE); 1313 PAGE_KERNEL_LARGE);
1336 set_pmd(pmd, __pmd(pte_val(entry))); 1314 set_pmd(pmd, __pmd(pte_val(entry)));
@@ -1347,15 +1325,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1347 1325
1348 addr_end = addr + PMD_SIZE; 1326 addr_end = addr + PMD_SIZE;
1349 p_end = p + PMD_SIZE; 1327 p_end = p + PMD_SIZE;
1350 } else 1328 continue;
1351 vmemmap_verify((pte_t *)pmd, node, addr, next); 1329 }
1330 } else if (pmd_large(*pmd)) {
1331 vmemmap_verify((pte_t *)pmd, node, addr, next);
1332 continue;
1352 } 1333 }
1353 1334 pr_warn_once("vmemmap: falling back to regular page backing\n");
1335 if (vmemmap_populate_basepages(addr, next, node))
1336 return -ENOMEM;
1354 } 1337 }
1355 sync_global_pgds((unsigned long)start_page, end - 1);
1356 return 0; 1338 return 0;
1357} 1339}
1358 1340
1341int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1342{
1343 int err;
1344
1345 if (cpu_has_pse)
1346 err = vmemmap_populate_hugepages(start, end, node);
1347 else
1348 err = vmemmap_populate_basepages(start, end, node);
1349 if (!err)
1350 sync_global_pgds(start, end - 1);
1351 return err;
1352}
1353
1359#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) 1354#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1360void register_page_bootmem_memmap(unsigned long section_nr, 1355void register_page_bootmem_memmap(unsigned long section_nr,
1361 struct page *start_page, unsigned long size) 1356 struct page *start_page, unsigned long size)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 78fe3f1ac49f..9a1e6583910c 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
282 in parallel. Reuse of the virtual address is prevented by 282 in parallel. Reuse of the virtual address is prevented by
283 leaving it in the global lists until we're done with it. 283 leaving it in the global lists until we're done with it.
284 cpa takes care of the direct mappings. */ 284 cpa takes care of the direct mappings. */
285 read_lock(&vmlist_lock); 285 p = find_vm_area((void __force *)addr);
286 for (p = vmlist; p; p = p->next) {
287 if (p->addr == (void __force *)addr)
288 break;
289 }
290 read_unlock(&vmlist_lock);
291 286
292 if (!p) { 287 if (!p) {
293 printk(KERN_ERR "iounmap: bad address %p\n", addr); 288 printk(KERN_ERR "iounmap: bad address %p\n", addr);
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 72fe01e9e414..a71c4e207679 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -114,14 +114,11 @@ void numa_clear_node(int cpu)
114 */ 114 */
115void __init setup_node_to_cpumask_map(void) 115void __init setup_node_to_cpumask_map(void)
116{ 116{
117 unsigned int node, num = 0; 117 unsigned int node;
118 118
119 /* setup nr_node_ids if not done yet */ 119 /* setup nr_node_ids if not done yet */
120 if (nr_node_ids == MAX_NUMNODES) { 120 if (nr_node_ids == MAX_NUMNODES)
121 for_each_node_mask(node, node_possible_map) 121 setup_nr_node_ids();
122 num = node;
123 nr_node_ids = num + 1;
124 }
125 122
126 /* allocate the map */ 123 /* allocate the map */
127 for (node = 0; node < nr_node_ids; node++) 124 for (node = 0; node < nr_node_ids; node++)
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index b0086567271c..d0b1773d9d2e 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
68 s->gpg++; 68 s->gpg++;
69 i += GPS/PAGE_SIZE; 69 i += GPS/PAGE_SIZE;
70 } else if (level == PG_LEVEL_2M) { 70 } else if (level == PG_LEVEL_2M) {
71 if (!(pte_val(*pte) & _PAGE_PSE)) { 71 if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
72 printk(KERN_ERR 72 printk(KERN_ERR
73 "%lx level %d but not PSE %Lx\n", 73 "%lx level %d but not PSE %Lx\n",
74 addr, level, (u64)pte_val(*pte)); 74 addr, level, (u64)pte_val(*pte));
@@ -130,13 +130,12 @@ static int pageattr_test(void)
130 } 130 }
131 131
132 failed += print_split(&sa); 132 failed += print_split(&sa);
133 srandom32(100);
134 133
135 for (i = 0; i < NTEST; i++) { 134 for (i = 0; i < NTEST; i++) {
136 unsigned long pfn = random32() % max_pfn_mapped; 135 unsigned long pfn = prandom_u32() % max_pfn_mapped;
137 136
138 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); 137 addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
139 len[i] = random32() % 100; 138 len[i] = prandom_u32() % 100;
140 len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); 139 len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
141 140
142 if (len[i] == 0) 141 if (len[i] == 0)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 091934e1d0d9..bb32480c2d71 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
467 * We are safe now. Check whether the new pgprot is the same: 467 * We are safe now. Check whether the new pgprot is the same:
468 */ 468 */
469 old_pte = *kpte; 469 old_pte = *kpte;
470 old_prot = new_prot = req_prot = pte_pgprot(old_pte); 470 old_prot = req_prot = pte_pgprot(old_pte);
471 471
472 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); 472 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
473 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); 473 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
478 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL 478 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
479 * for the ancient hardware that doesn't support it. 479 * for the ancient hardware that doesn't support it.
480 */ 480 */
481 if (pgprot_val(new_prot) & _PAGE_PRESENT) 481 if (pgprot_val(req_prot) & _PAGE_PRESENT)
482 pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; 482 pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
483 else 483 else
484 pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); 484 pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
485 485
486 new_prot = canon_pgprot(new_prot); 486 req_prot = canon_pgprot(req_prot);
487 487
488 /* 488 /*
489 * old_pte points to the large page base address. So we need 489 * old_pte points to the large page base address. So we need
@@ -542,13 +542,14 @@ out_unlock:
542 return do_split; 542 return do_split;
543} 543}
544 544
545int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase) 545static int
546__split_large_page(pte_t *kpte, unsigned long address, struct page *base)
546{ 547{
548 pte_t *pbase = (pte_t *)page_address(base);
547 unsigned long pfn, pfninc = 1; 549 unsigned long pfn, pfninc = 1;
548 unsigned int i, level; 550 unsigned int i, level;
549 pte_t *tmp; 551 pte_t *tmp;
550 pgprot_t ref_prot; 552 pgprot_t ref_prot;
551 struct page *base = virt_to_page(pbase);
552 553
553 spin_lock(&pgd_lock); 554 spin_lock(&pgd_lock);
554 /* 555 /*
@@ -633,7 +634,6 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase)
633 634
634static int split_large_page(pte_t *kpte, unsigned long address) 635static int split_large_page(pte_t *kpte, unsigned long address)
635{ 636{
636 pte_t *pbase;
637 struct page *base; 637 struct page *base;
638 638
639 if (!debug_pagealloc) 639 if (!debug_pagealloc)
@@ -644,8 +644,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
644 if (!base) 644 if (!base)
645 return -ENOMEM; 645 return -ENOMEM;
646 646
647 pbase = (pte_t *)page_address(base); 647 if (__split_large_page(kpte, address, base))
648 if (__split_large_page(kpte, address, pbase))
649 __free_page(base); 648 __free_page(base);
650 649
651 return 0; 650 return 0;
@@ -1413,6 +1412,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1413 * but that can deadlock->flush only current cpu: 1412 * but that can deadlock->flush only current cpu:
1414 */ 1413 */
1415 __flush_tlb_all(); 1414 __flush_tlb_all();
1415
1416 arch_flush_lazy_mmu_mode();
1416} 1417}
1417 1418
1418#ifdef CONFIG_HIBERNATION 1419#ifdef CONFIG_HIBERNATION
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 193350b51f90..17fda6a8b3c2 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
59{ 59{
60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
61 /*
62 * NOTE! For PAE, any changes to the top page-directory-pointer-table
63 * entries need a full cr3 reload to flush.
64 */
65#ifdef CONFIG_X86_PAE
66 tlb->need_flush_all = 1;
67#endif
61 tlb_remove_page(tlb, virt_to_page(pmd)); 68 tlb_remove_page(tlb, virt_to_page(pmd));
62} 69}
63 70
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 901177d75ff5..305c68b8d538 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -6,6 +6,7 @@
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/pci-acpi.h>
9#include <linux/ioport.h> 10#include <linux/ioport.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/dmi.h> 12#include <linux/dmi.h>
@@ -170,6 +171,16 @@ void pcibios_fixup_bus(struct pci_bus *b)
170 pcibios_fixup_device_resources(dev); 171 pcibios_fixup_device_resources(dev);
171} 172}
172 173
174void pcibios_add_bus(struct pci_bus *bus)
175{
176 acpi_pci_add_bus(bus);
177}
178
179void pcibios_remove_bus(struct pci_bus *bus)
180{
181 acpi_pci_remove_bus(bus);
182}
183
173/* 184/*
174 * Only use DMI information to set this if nothing was passed 185 * Only use DMI information to set this if nothing was passed
175 * on the kernel command line (which was parsed earlier). 186 * on the kernel command line (which was parsed earlier).
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 94e76620460f..4a9be6ddf054 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -177,7 +177,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
177 goto error; 177 goto error;
178 i = 0; 178 i = 0;
179 list_for_each_entry(msidesc, &dev->msi_list, list) { 179 list_for_each_entry(msidesc, &dev->msi_list, list) {
180 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, 180 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
181 (type == PCI_CAP_ID_MSIX) ? 181 (type == PCI_CAP_ID_MSIX) ?
182 "pcifront-msi-x" : 182 "pcifront-msi-x" :
183 "pcifront-msi", 183 "pcifront-msi",
@@ -244,7 +244,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
244 dev_dbg(&dev->dev, 244 dev_dbg(&dev->dev,
245 "xen: msi already bound to pirq=%d\n", pirq); 245 "xen: msi already bound to pirq=%d\n", pirq);
246 } 246 }
247 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, 247 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
248 (type == PCI_CAP_ID_MSIX) ? 248 (type == PCI_CAP_ID_MSIX) ?
249 "msi-x" : "msi", 249 "msi-x" : "msi",
250 DOMID_SELF); 250 DOMID_SELF);
@@ -326,7 +326,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
326 } 326 }
327 327
328 ret = xen_bind_pirq_msi_to_irq(dev, msidesc, 328 ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
329 map_irq.pirq, map_irq.index, 329 map_irq.pirq,
330 (type == PCI_CAP_ID_MSIX) ? 330 (type == PCI_CAP_ID_MSIX) ?
331 "msi-x" : "msi", 331 "msi-x" : "msi",
332 domid); 332 domid);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 5f2ecaf3f9d8..b55d174e5034 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -41,6 +41,7 @@
41#include <linux/io.h> 41#include <linux/io.h>
42#include <linux/reboot.h> 42#include <linux/reboot.h>
43#include <linux/bcd.h> 43#include <linux/bcd.h>
44#include <linux/ucs2_string.h>
44 45
45#include <asm/setup.h> 46#include <asm/setup.h>
46#include <asm/efi.h> 47#include <asm/efi.h>
@@ -48,9 +49,17 @@
48#include <asm/cacheflush.h> 49#include <asm/cacheflush.h>
49#include <asm/tlbflush.h> 50#include <asm/tlbflush.h>
50#include <asm/x86_init.h> 51#include <asm/x86_init.h>
52#include <asm/rtc.h>
51 53
52#define EFI_DEBUG 1 54#define EFI_DEBUG 1
53 55
56/*
57 * There's some additional metadata associated with each
58 * variable. Intel's reference implementation is 60 bytes - bump that
59 * to account for potential alignment constraints
60 */
61#define VAR_METADATA_SIZE 64
62
54struct efi __read_mostly efi = { 63struct efi __read_mostly efi = {
55 .mps = EFI_INVALID_TABLE_ADDR, 64 .mps = EFI_INVALID_TABLE_ADDR,
56 .acpi = EFI_INVALID_TABLE_ADDR, 65 .acpi = EFI_INVALID_TABLE_ADDR,
@@ -69,6 +78,13 @@ struct efi_memory_map memmap;
69static struct efi efi_phys __initdata; 78static struct efi efi_phys __initdata;
70static efi_system_table_t efi_systab __initdata; 79static efi_system_table_t efi_systab __initdata;
71 80
81static u64 efi_var_store_size;
82static u64 efi_var_remaining_size;
83static u64 efi_var_max_var_size;
84static u64 boot_used_size;
85static u64 boot_var_size;
86static u64 active_size;
87
72unsigned long x86_efi_facility; 88unsigned long x86_efi_facility;
73 89
74/* 90/*
@@ -98,6 +114,15 @@ static int __init setup_add_efi_memmap(char *arg)
98} 114}
99early_param("add_efi_memmap", setup_add_efi_memmap); 115early_param("add_efi_memmap", setup_add_efi_memmap);
100 116
117static bool efi_no_storage_paranoia;
118
119static int __init setup_storage_paranoia(char *arg)
120{
121 efi_no_storage_paranoia = true;
122 return 0;
123}
124early_param("efi_no_storage_paranoia", setup_storage_paranoia);
125
101 126
102static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) 127static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
103{ 128{
@@ -162,8 +187,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
162 efi_char16_t *name, 187 efi_char16_t *name,
163 efi_guid_t *vendor) 188 efi_guid_t *vendor)
164{ 189{
165 return efi_call_virt3(get_next_variable, 190 efi_status_t status;
166 name_size, name, vendor); 191 static bool finished = false;
192 static u64 var_size;
193
194 status = efi_call_virt3(get_next_variable,
195 name_size, name, vendor);
196
197 if (status == EFI_NOT_FOUND) {
198 finished = true;
199 if (var_size < boot_used_size) {
200 boot_var_size = boot_used_size - var_size;
201 active_size += boot_var_size;
202 } else {
203 printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n");
204 }
205 }
206
207 if (boot_used_size && !finished) {
208 unsigned long size;
209 u32 attr;
210 efi_status_t s;
211 void *tmp;
212
213 s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
214
215 if (s != EFI_BUFFER_TOO_SMALL || !size)
216 return status;
217
218 tmp = kmalloc(size, GFP_ATOMIC);
219
220 if (!tmp)
221 return status;
222
223 s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
224
225 if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
226 var_size += size;
227 var_size += ucs2_strsize(name, 1024);
228 active_size += size;
229 active_size += VAR_METADATA_SIZE;
230 active_size += ucs2_strsize(name, 1024);
231 }
232
233 kfree(tmp);
234 }
235
236 return status;
167} 237}
168 238
169static efi_status_t virt_efi_set_variable(efi_char16_t *name, 239static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -172,9 +242,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
172 unsigned long data_size, 242 unsigned long data_size,
173 void *data) 243 void *data)
174{ 244{
175 return efi_call_virt5(set_variable, 245 efi_status_t status;
176 name, vendor, attr, 246 u32 orig_attr = 0;
177 data_size, data); 247 unsigned long orig_size = 0;
248
249 status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
250 NULL);
251
252 if (status != EFI_BUFFER_TOO_SMALL)
253 orig_size = 0;
254
255 status = efi_call_virt5(set_variable,
256 name, vendor, attr,
257 data_size, data);
258
259 if (status == EFI_SUCCESS) {
260 if (orig_size) {
261 active_size -= orig_size;
262 active_size -= ucs2_strsize(name, 1024);
263 active_size -= VAR_METADATA_SIZE;
264 }
265 if (data_size) {
266 active_size += data_size;
267 active_size += ucs2_strsize(name, 1024);
268 active_size += VAR_METADATA_SIZE;
269 }
270 }
271
272 return status;
178} 273}
179 274
180static efi_status_t virt_efi_query_variable_info(u32 attr, 275static efi_status_t virt_efi_query_variable_info(u32 attr,
@@ -258,10 +353,10 @@ static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
258 353
259int efi_set_rtc_mmss(unsigned long nowtime) 354int efi_set_rtc_mmss(unsigned long nowtime)
260{ 355{
261 int real_seconds, real_minutes;
262 efi_status_t status; 356 efi_status_t status;
263 efi_time_t eft; 357 efi_time_t eft;
264 efi_time_cap_t cap; 358 efi_time_cap_t cap;
359 struct rtc_time tm;
265 360
266 status = efi.get_time(&eft, &cap); 361 status = efi.get_time(&eft, &cap);
267 if (status != EFI_SUCCESS) { 362 if (status != EFI_SUCCESS) {
@@ -269,13 +364,20 @@ int efi_set_rtc_mmss(unsigned long nowtime)
269 return -1; 364 return -1;
270 } 365 }
271 366
272 real_seconds = nowtime % 60; 367 rtc_time_to_tm(nowtime, &tm);
273 real_minutes = nowtime / 60; 368 if (!rtc_valid_tm(&tm)) {
274 if (((abs(real_minutes - eft.minute) + 15)/30) & 1) 369 eft.year = tm.tm_year + 1900;
275 real_minutes += 30; 370 eft.month = tm.tm_mon + 1;
276 real_minutes %= 60; 371 eft.day = tm.tm_mday;
277 eft.minute = real_minutes; 372 eft.minute = tm.tm_min;
278 eft.second = real_seconds; 373 eft.second = tm.tm_sec;
374 eft.nanosecond = 0;
375 } else {
376 printk(KERN_ERR
377 "%s: Invalid EFI RTC value: write of %lx to EFI RTC failed\n",
378 __FUNCTION__, nowtime);
379 return -1;
380 }
279 381
280 status = efi.set_time(&eft); 382 status = efi.set_time(&eft);
281 if (status != EFI_SUCCESS) { 383 if (status != EFI_SUCCESS) {
@@ -682,6 +784,9 @@ void __init efi_init(void)
682 char vendor[100] = "unknown"; 784 char vendor[100] = "unknown";
683 int i = 0; 785 int i = 0;
684 void *tmp; 786 void *tmp;
787 struct setup_data *data;
788 struct efi_var_bootdata *efi_var_data;
789 u64 pa_data;
685 790
686#ifdef CONFIG_X86_32 791#ifdef CONFIG_X86_32
687 if (boot_params.efi_info.efi_systab_hi || 792 if (boot_params.efi_info.efi_systab_hi ||
@@ -699,6 +804,22 @@ void __init efi_init(void)
699 if (efi_systab_init(efi_phys.systab)) 804 if (efi_systab_init(efi_phys.systab))
700 return; 805 return;
701 806
807 pa_data = boot_params.hdr.setup_data;
808 while (pa_data) {
809 data = early_ioremap(pa_data, sizeof(*efi_var_data));
810 if (data->type == SETUP_EFI_VARS) {
811 efi_var_data = (struct efi_var_bootdata *)data;
812
813 efi_var_store_size = efi_var_data->store_size;
814 efi_var_remaining_size = efi_var_data->remaining_size;
815 efi_var_max_var_size = efi_var_data->max_var_size;
816 }
817 pa_data = data->next;
818 early_iounmap(data, sizeof(*efi_var_data));
819 }
820
821 boot_used_size = efi_var_store_size - efi_var_remaining_size;
822
702 set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); 823 set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
703 824
704 /* 825 /*
@@ -999,3 +1120,48 @@ u64 efi_mem_attributes(unsigned long phys_addr)
999 } 1120 }
1000 return 0; 1121 return 0;
1001} 1122}
1123
1124/*
1125 * Some firmware has serious problems when using more than 50% of the EFI
1126 * variable store, i.e. it triggers bugs that can brick machines. Ensure that
1127 * we never use more than this safe limit.
1128 *
1129 * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
1130 * store.
1131 */
1132efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
1133{
1134 efi_status_t status;
1135 u64 storage_size, remaining_size, max_size;
1136
1137 status = efi.query_variable_info(attributes, &storage_size,
1138 &remaining_size, &max_size);
1139 if (status != EFI_SUCCESS)
1140 return status;
1141
1142 if (!max_size && remaining_size > size)
1143 printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
1144 " is returning MaxVariableSize=0\n");
1145 /*
1146 * Some firmware implementations refuse to boot if there's insufficient
1147 * space in the variable store. We account for that by refusing the
1148 * write if permitting it would reduce the available space to under
1149 * 50%. However, some firmware won't reclaim variable space until
1150 * after the used (not merely the actively used) space drops below
1151 * a threshold. We can approximate that case with the value calculated
1152 * above. If both the firmware and our calculations indicate that the
1153 * available space would drop below 50%, refuse the write.
1154 */
1155
1156 if (!storage_size || size > remaining_size ||
1157 (max_size && size > max_size))
1158 return EFI_OUT_OF_RESOURCES;
1159
1160 if (!efi_no_storage_paranoia &&
1161 ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
1162 (remaining_size - size < storage_size / 2)))
1163 return EFI_OUT_OF_RESOURCES;
1164
1165 return EFI_SUCCESS;
1166}
1167EXPORT_SYMBOL_GPL(efi_query_variable_store);
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index e31bcd8f2eee..a0a0a4389bbd 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -356,8 +356,7 @@ static int __init sfi_parse_gpio(struct sfi_table_header *table)
356 num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry); 356 num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
357 pentry = (struct sfi_gpio_table_entry *)sb->pentry; 357 pentry = (struct sfi_gpio_table_entry *)sb->pentry;
358 358
359 gpio_table = (struct sfi_gpio_table_entry *) 359 gpio_table = kmalloc(num * sizeof(*pentry), GFP_KERNEL);
360 kmalloc(num * sizeof(*pentry), GFP_KERNEL);
361 if (!gpio_table) 360 if (!gpio_table)
362 return -1; 361 return -1;
363 memcpy(gpio_table, pentry, num * sizeof(*pentry)); 362 memcpy(gpio_table, pentry, num * sizeof(*pentry));
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
index 225bd0f0f675..d62b0a3b5c14 100644
--- a/arch/x86/platform/mrst/vrtc.c
+++ b/arch/x86/platform/mrst/vrtc.c
@@ -85,27 +85,35 @@ unsigned long vrtc_get_time(void)
85 return mktime(year, mon, mday, hour, min, sec); 85 return mktime(year, mon, mday, hour, min, sec);
86} 86}
87 87
88/* Only care about the minutes and seconds */
89int vrtc_set_mmss(unsigned long nowtime) 88int vrtc_set_mmss(unsigned long nowtime)
90{ 89{
91 int real_sec, real_min;
92 unsigned long flags; 90 unsigned long flags;
93 int vrtc_min; 91 struct rtc_time tm;
94 92 int year;
95 spin_lock_irqsave(&rtc_lock, flags); 93 int retval = 0;
96 vrtc_min = vrtc_cmos_read(RTC_MINUTES); 94
97 95 rtc_time_to_tm(nowtime, &tm);
98 real_sec = nowtime % 60; 96 if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) {
99 real_min = nowtime / 60; 97 /*
100 if (((abs(real_min - vrtc_min) + 15)/30) & 1) 98 * tm.year is the number of years since 1900, and the
101 real_min += 30; 99 * vrtc need the years since 1972.
102 real_min %= 60; 100 */
103 101 year = tm.tm_year - 72;
104 vrtc_cmos_write(real_sec, RTC_SECONDS); 102 spin_lock_irqsave(&rtc_lock, flags);
105 vrtc_cmos_write(real_min, RTC_MINUTES); 103 vrtc_cmos_write(year, RTC_YEAR);
106 spin_unlock_irqrestore(&rtc_lock, flags); 104 vrtc_cmos_write(tm.tm_mon, RTC_MONTH);
107 105 vrtc_cmos_write(tm.tm_mday, RTC_DAY_OF_MONTH);
108 return 0; 106 vrtc_cmos_write(tm.tm_hour, RTC_HOURS);
107 vrtc_cmos_write(tm.tm_min, RTC_MINUTES);
108 vrtc_cmos_write(tm.tm_sec, RTC_SECONDS);
109 spin_unlock_irqrestore(&rtc_lock, flags);
110 } else {
111 printk(KERN_ERR
112 "%s: Invalid vRTC value: write of %lx to vRTC failed\n",
113 __FUNCTION__, nowtime);
114 retval = -EINVAL;
115 }
116 return retval;
109} 117}
110 118
111void __init mrst_rtc_init(void) 119void __init mrst_rtc_init(void)
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 74704be7b1fe..9a2e590dd202 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -460,7 +460,6 @@ static int setup_power_button(struct platform_device *pdev)
460static void free_power_button(void) 460static void free_power_button(void)
461{ 461{
462 input_unregister_device(power_button_idev); 462 input_unregister_device(power_button_idev);
463 input_free_device(power_button_idev);
464} 463}
465 464
466static int setup_ebook_switch(struct platform_device *pdev) 465static int setup_ebook_switch(struct platform_device *pdev)
@@ -491,7 +490,6 @@ static int setup_ebook_switch(struct platform_device *pdev)
491static void free_ebook_switch(void) 490static void free_ebook_switch(void)
492{ 491{
493 input_unregister_device(ebook_switch_idev); 492 input_unregister_device(ebook_switch_idev);
494 input_free_device(ebook_switch_idev);
495} 493}
496 494
497static int setup_lid_switch(struct platform_device *pdev) 495static int setup_lid_switch(struct platform_device *pdev)
@@ -526,6 +524,7 @@ static int setup_lid_switch(struct platform_device *pdev)
526 524
527err_create_attr: 525err_create_attr:
528 input_unregister_device(lid_switch_idev); 526 input_unregister_device(lid_switch_idev);
527 lid_switch_idev = NULL;
529err_register: 528err_register:
530 input_free_device(lid_switch_idev); 529 input_free_device(lid_switch_idev);
531 return r; 530 return r;
@@ -535,7 +534,6 @@ static void free_lid_switch(void)
535{ 534{
536 device_remove_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode); 535 device_remove_file(&lid_switch_idev->dev, &dev_attr_lid_wake_mode);
537 input_unregister_device(lid_switch_idev); 536 input_unregister_device(lid_switch_idev);
538 input_free_device(lid_switch_idev);
539} 537}
540 538
541static int xo1_sci_probe(struct platform_device *pdev) 539static int xo1_sci_probe(struct platform_device *pdev)
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 98718f604eb6..5c86786bbfd2 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -159,10 +159,9 @@ static __init int uv_rtc_allocate_timers(void)
159{ 159{
160 int cpu; 160 int cpu;
161 161
162 blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL); 162 blade_info = kzalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL);
163 if (!blade_info) 163 if (!blade_info)
164 return -ENOMEM; 164 return -ENOMEM;
165 memset(blade_info, 0, uv_possible_blades * sizeof(void *));
166 165
167 for_each_present_cpu(cpu) { 166 for_each_present_cpu(cpu) {
168 int nid = cpu_to_node(cpu); 167 int nid = cpu_to_node(cpu);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 120cee1c3f8d..6d6e907cee46 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -11,6 +11,7 @@
11#include <linux/suspend.h> 11#include <linux/suspend.h>
12#include <linux/export.h> 12#include <linux/export.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/perf_event.h>
14 15
15#include <asm/pgtable.h> 16#include <asm/pgtable.h>
16#include <asm/proto.h> 17#include <asm/proto.h>
@@ -61,11 +62,9 @@ static void __save_processor_state(struct saved_context *ctxt)
61 * descriptor tables 62 * descriptor tables
62 */ 63 */
63#ifdef CONFIG_X86_32 64#ifdef CONFIG_X86_32
64 store_gdt(&ctxt->gdt);
65 store_idt(&ctxt->idt); 65 store_idt(&ctxt->idt);
66#else 66#else
67/* CONFIG_X86_64 */ 67/* CONFIG_X86_64 */
68 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
69 store_idt((struct desc_ptr *)&ctxt->idt_limit); 68 store_idt((struct desc_ptr *)&ctxt->idt_limit);
70#endif 69#endif
71 store_tr(ctxt->tr); 70 store_tr(ctxt->tr);
@@ -134,7 +133,10 @@ static void fix_processor_context(void)
134{ 133{
135 int cpu = smp_processor_id(); 134 int cpu = smp_processor_id();
136 struct tss_struct *t = &per_cpu(init_tss, cpu); 135 struct tss_struct *t = &per_cpu(init_tss, cpu);
137 136#ifdef CONFIG_X86_64
137 struct desc_struct *desc = get_cpu_gdt_table(cpu);
138 tss_desc tss;
139#endif
138 set_tss_desc(cpu, t); /* 140 set_tss_desc(cpu, t); /*
139 * This just modifies memory; should not be 141 * This just modifies memory; should not be
140 * necessary. But... This is necessary, because 142 * necessary. But... This is necessary, because
@@ -143,7 +145,9 @@ static void fix_processor_context(void)
143 */ 145 */
144 146
145#ifdef CONFIG_X86_64 147#ifdef CONFIG_X86_64
146 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; 148 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
149 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
150 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
147 151
148 syscall_init(); /* This sets MSR_*STAR and related */ 152 syscall_init(); /* This sets MSR_*STAR and related */
149#endif 153#endif
@@ -182,11 +186,9 @@ static void __restore_processor_state(struct saved_context *ctxt)
182 * ltr is done i fix_processor_context(). 186 * ltr is done i fix_processor_context().
183 */ 187 */
184#ifdef CONFIG_X86_32 188#ifdef CONFIG_X86_32
185 load_gdt(&ctxt->gdt);
186 load_idt(&ctxt->idt); 189 load_idt(&ctxt->idt);
187#else 190#else
188/* CONFIG_X86_64 */ 191/* CONFIG_X86_64 */
189 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
190 load_idt((const struct desc_ptr *)&ctxt->idt_limit); 192 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
191#endif 193#endif
192 194
@@ -228,6 +230,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
228 do_fpu_end(); 230 do_fpu_end();
229 x86_platform.restore_sched_clock_state(); 231 x86_platform.restore_sched_clock_state();
230 mtrr_bp_restore(); 232 mtrr_bp_restore();
233 perf_restore_debug_store();
231} 234}
232 235
233/* Needed by apm.c */ 236/* Needed by apm.c */
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index bae601f900ef..e8120346903b 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -39,4 +39,5 @@ $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/ina
39 39
40HOST_EXTRACFLAGS += -I$(srctree)/tools/include 40HOST_EXTRACFLAGS += -I$(srctree)/tools/include
41hostprogs-y += relocs 41hostprogs-y += relocs
42relocs-objs := relocs_32.o relocs_64.o relocs_common.o
42relocs: $(obj)/relocs 43relocs: $(obj)/relocs
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 79d67bd507fa..590be1090892 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -1,43 +1,36 @@
1#include <stdio.h> 1/* This is included from relocs_32/64.c */
2#include <stdarg.h> 2
3#include <stdlib.h> 3#define ElfW(type) _ElfW(ELF_BITS, type)
4#include <stdint.h> 4#define _ElfW(bits, type) __ElfW(bits, type)
5#include <string.h> 5#define __ElfW(bits, type) Elf##bits##_##type
6#include <errno.h> 6
7#include <unistd.h> 7#define Elf_Addr ElfW(Addr)
8#include <elf.h> 8#define Elf_Ehdr ElfW(Ehdr)
9#include <byteswap.h> 9#define Elf_Phdr ElfW(Phdr)
10#define USE_BSD 10#define Elf_Shdr ElfW(Shdr)
11#include <endian.h> 11#define Elf_Sym ElfW(Sym)
12#include <regex.h> 12
13#include <tools/le_byteshift.h> 13static Elf_Ehdr ehdr;
14 14
15static void die(char *fmt, ...); 15struct relocs {
16 16 uint32_t *offset;
17#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 17 unsigned long count;
18static Elf32_Ehdr ehdr; 18 unsigned long size;
19static unsigned long reloc_count, reloc_idx; 19};
20static unsigned long *relocs; 20
21static unsigned long reloc16_count, reloc16_idx; 21static struct relocs relocs16;
22static unsigned long *relocs16; 22static struct relocs relocs32;
23static struct relocs relocs64;
23 24
24struct section { 25struct section {
25 Elf32_Shdr shdr; 26 Elf_Shdr shdr;
26 struct section *link; 27 struct section *link;
27 Elf32_Sym *symtab; 28 Elf_Sym *symtab;
28 Elf32_Rel *reltab; 29 Elf_Rel *reltab;
29 char *strtab; 30 char *strtab;
30}; 31};
31static struct section *secs; 32static struct section *secs;
32 33
33enum symtype {
34 S_ABS,
35 S_REL,
36 S_SEG,
37 S_LIN,
38 S_NSYMTYPES
39};
40
41static const char * const sym_regex_kernel[S_NSYMTYPES] = { 34static const char * const sym_regex_kernel[S_NSYMTYPES] = {
42/* 35/*
43 * Following symbols have been audited. There values are constant and do 36 * Following symbols have been audited. There values are constant and do
@@ -49,6 +42,9 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
49 "^(xen_irq_disable_direct_reloc$|" 42 "^(xen_irq_disable_direct_reloc$|"
50 "xen_save_fl_direct_reloc$|" 43 "xen_save_fl_direct_reloc$|"
51 "VDSO|" 44 "VDSO|"
45#if ELF_BITS == 64
46 "__vvar_page|"
47#endif
52 "__crc_)", 48 "__crc_)",
53 49
54/* 50/*
@@ -72,6 +68,11 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
72 "__end_rodata|" 68 "__end_rodata|"
73 "__initramfs_start|" 69 "__initramfs_start|"
74 "(jiffies|jiffies_64)|" 70 "(jiffies|jiffies_64)|"
71#if ELF_BITS == 64
72 "__per_cpu_load|"
73 "init_per_cpu__.*|"
74 "__end_rodata_hpage_align|"
75#endif
75 "_end)$" 76 "_end)$"
76}; 77};
77 78
@@ -132,15 +133,6 @@ static void regex_init(int use_real_mode)
132 } 133 }
133} 134}
134 135
135static void die(char *fmt, ...)
136{
137 va_list ap;
138 va_start(ap, fmt);
139 vfprintf(stderr, fmt, ap);
140 va_end(ap);
141 exit(1);
142}
143
144static const char *sym_type(unsigned type) 136static const char *sym_type(unsigned type)
145{ 137{
146 static const char *type_name[] = { 138 static const char *type_name[] = {
@@ -198,6 +190,24 @@ static const char *rel_type(unsigned type)
198{ 190{
199 static const char *type_name[] = { 191 static const char *type_name[] = {
200#define REL_TYPE(X) [X] = #X 192#define REL_TYPE(X) [X] = #X
193#if ELF_BITS == 64
194 REL_TYPE(R_X86_64_NONE),
195 REL_TYPE(R_X86_64_64),
196 REL_TYPE(R_X86_64_PC32),
197 REL_TYPE(R_X86_64_GOT32),
198 REL_TYPE(R_X86_64_PLT32),
199 REL_TYPE(R_X86_64_COPY),
200 REL_TYPE(R_X86_64_GLOB_DAT),
201 REL_TYPE(R_X86_64_JUMP_SLOT),
202 REL_TYPE(R_X86_64_RELATIVE),
203 REL_TYPE(R_X86_64_GOTPCREL),
204 REL_TYPE(R_X86_64_32),
205 REL_TYPE(R_X86_64_32S),
206 REL_TYPE(R_X86_64_16),
207 REL_TYPE(R_X86_64_PC16),
208 REL_TYPE(R_X86_64_8),
209 REL_TYPE(R_X86_64_PC8),
210#else
201 REL_TYPE(R_386_NONE), 211 REL_TYPE(R_386_NONE),
202 REL_TYPE(R_386_32), 212 REL_TYPE(R_386_32),
203 REL_TYPE(R_386_PC32), 213 REL_TYPE(R_386_PC32),
@@ -213,6 +223,7 @@ static const char *rel_type(unsigned type)
213 REL_TYPE(R_386_PC8), 223 REL_TYPE(R_386_PC8),
214 REL_TYPE(R_386_16), 224 REL_TYPE(R_386_16),
215 REL_TYPE(R_386_PC16), 225 REL_TYPE(R_386_PC16),
226#endif
216#undef REL_TYPE 227#undef REL_TYPE
217 }; 228 };
218 const char *name = "unknown type rel type name"; 229 const char *name = "unknown type rel type name";
@@ -240,7 +251,7 @@ static const char *sec_name(unsigned shndx)
240 return name; 251 return name;
241} 252}
242 253
243static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym) 254static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
244{ 255{
245 const char *name; 256 const char *name;
246 name = "<noname>"; 257 name = "<noname>";
@@ -253,15 +264,42 @@ static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
253 return name; 264 return name;
254} 265}
255 266
267static Elf_Sym *sym_lookup(const char *symname)
268{
269 int i;
270 for (i = 0; i < ehdr.e_shnum; i++) {
271 struct section *sec = &secs[i];
272 long nsyms;
273 char *strtab;
274 Elf_Sym *symtab;
275 Elf_Sym *sym;
276
277 if (sec->shdr.sh_type != SHT_SYMTAB)
278 continue;
256 279
280 nsyms = sec->shdr.sh_size/sizeof(Elf_Sym);
281 symtab = sec->symtab;
282 strtab = sec->link->strtab;
283
284 for (sym = symtab; --nsyms >= 0; sym++) {
285 if (!sym->st_name)
286 continue;
287 if (strcmp(symname, strtab + sym->st_name) == 0)
288 return sym;
289 }
290 }
291 return 0;
292}
257 293
258#if BYTE_ORDER == LITTLE_ENDIAN 294#if BYTE_ORDER == LITTLE_ENDIAN
259#define le16_to_cpu(val) (val) 295#define le16_to_cpu(val) (val)
260#define le32_to_cpu(val) (val) 296#define le32_to_cpu(val) (val)
297#define le64_to_cpu(val) (val)
261#endif 298#endif
262#if BYTE_ORDER == BIG_ENDIAN 299#if BYTE_ORDER == BIG_ENDIAN
263#define le16_to_cpu(val) bswap_16(val) 300#define le16_to_cpu(val) bswap_16(val)
264#define le32_to_cpu(val) bswap_32(val) 301#define le32_to_cpu(val) bswap_32(val)
302#define le64_to_cpu(val) bswap_64(val)
265#endif 303#endif
266 304
267static uint16_t elf16_to_cpu(uint16_t val) 305static uint16_t elf16_to_cpu(uint16_t val)
@@ -274,6 +312,23 @@ static uint32_t elf32_to_cpu(uint32_t val)
274 return le32_to_cpu(val); 312 return le32_to_cpu(val);
275} 313}
276 314
315#define elf_half_to_cpu(x) elf16_to_cpu(x)
316#define elf_word_to_cpu(x) elf32_to_cpu(x)
317
318#if ELF_BITS == 64
319static uint64_t elf64_to_cpu(uint64_t val)
320{
321 return le64_to_cpu(val);
322}
323#define elf_addr_to_cpu(x) elf64_to_cpu(x)
324#define elf_off_to_cpu(x) elf64_to_cpu(x)
325#define elf_xword_to_cpu(x) elf64_to_cpu(x)
326#else
327#define elf_addr_to_cpu(x) elf32_to_cpu(x)
328#define elf_off_to_cpu(x) elf32_to_cpu(x)
329#define elf_xword_to_cpu(x) elf32_to_cpu(x)
330#endif
331
277static void read_ehdr(FILE *fp) 332static void read_ehdr(FILE *fp)
278{ 333{
279 if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) { 334 if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
@@ -283,8 +338,8 @@ static void read_ehdr(FILE *fp)
283 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) { 338 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) {
284 die("No ELF magic\n"); 339 die("No ELF magic\n");
285 } 340 }
286 if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) { 341 if (ehdr.e_ident[EI_CLASS] != ELF_CLASS) {
287 die("Not a 32 bit executable\n"); 342 die("Not a %d bit executable\n", ELF_BITS);
288 } 343 }
289 if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) { 344 if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
290 die("Not a LSB ELF executable\n"); 345 die("Not a LSB ELF executable\n");
@@ -293,36 +348,36 @@ static void read_ehdr(FILE *fp)
293 die("Unknown ELF version\n"); 348 die("Unknown ELF version\n");
294 } 349 }
295 /* Convert the fields to native endian */ 350 /* Convert the fields to native endian */
296 ehdr.e_type = elf16_to_cpu(ehdr.e_type); 351 ehdr.e_type = elf_half_to_cpu(ehdr.e_type);
297 ehdr.e_machine = elf16_to_cpu(ehdr.e_machine); 352 ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine);
298 ehdr.e_version = elf32_to_cpu(ehdr.e_version); 353 ehdr.e_version = elf_word_to_cpu(ehdr.e_version);
299 ehdr.e_entry = elf32_to_cpu(ehdr.e_entry); 354 ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry);
300 ehdr.e_phoff = elf32_to_cpu(ehdr.e_phoff); 355 ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff);
301 ehdr.e_shoff = elf32_to_cpu(ehdr.e_shoff); 356 ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff);
302 ehdr.e_flags = elf32_to_cpu(ehdr.e_flags); 357 ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags);
303 ehdr.e_ehsize = elf16_to_cpu(ehdr.e_ehsize); 358 ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize);
304 ehdr.e_phentsize = elf16_to_cpu(ehdr.e_phentsize); 359 ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize);
305 ehdr.e_phnum = elf16_to_cpu(ehdr.e_phnum); 360 ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum);
306 ehdr.e_shentsize = elf16_to_cpu(ehdr.e_shentsize); 361 ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize);
307 ehdr.e_shnum = elf16_to_cpu(ehdr.e_shnum); 362 ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum);
308 ehdr.e_shstrndx = elf16_to_cpu(ehdr.e_shstrndx); 363 ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx);
309 364
310 if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) { 365 if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
311 die("Unsupported ELF header type\n"); 366 die("Unsupported ELF header type\n");
312 } 367 }
313 if (ehdr.e_machine != EM_386) { 368 if (ehdr.e_machine != ELF_MACHINE) {
314 die("Not for x86\n"); 369 die("Not for %s\n", ELF_MACHINE_NAME);
315 } 370 }
316 if (ehdr.e_version != EV_CURRENT) { 371 if (ehdr.e_version != EV_CURRENT) {
317 die("Unknown ELF version\n"); 372 die("Unknown ELF version\n");
318 } 373 }
319 if (ehdr.e_ehsize != sizeof(Elf32_Ehdr)) { 374 if (ehdr.e_ehsize != sizeof(Elf_Ehdr)) {
320 die("Bad Elf header size\n"); 375 die("Bad Elf header size\n");
321 } 376 }
322 if (ehdr.e_phentsize != sizeof(Elf32_Phdr)) { 377 if (ehdr.e_phentsize != sizeof(Elf_Phdr)) {
323 die("Bad program header entry\n"); 378 die("Bad program header entry\n");
324 } 379 }
325 if (ehdr.e_shentsize != sizeof(Elf32_Shdr)) { 380 if (ehdr.e_shentsize != sizeof(Elf_Shdr)) {
326 die("Bad section header entry\n"); 381 die("Bad section header entry\n");
327 } 382 }
328 if (ehdr.e_shstrndx >= ehdr.e_shnum) { 383 if (ehdr.e_shstrndx >= ehdr.e_shnum) {
@@ -333,7 +388,7 @@ static void read_ehdr(FILE *fp)
333static void read_shdrs(FILE *fp) 388static void read_shdrs(FILE *fp)
334{ 389{
335 int i; 390 int i;
336 Elf32_Shdr shdr; 391 Elf_Shdr shdr;
337 392
338 secs = calloc(ehdr.e_shnum, sizeof(struct section)); 393 secs = calloc(ehdr.e_shnum, sizeof(struct section));
339 if (!secs) { 394 if (!secs) {
@@ -349,16 +404,16 @@ static void read_shdrs(FILE *fp)
349 if (fread(&shdr, sizeof shdr, 1, fp) != 1) 404 if (fread(&shdr, sizeof shdr, 1, fp) != 1)
350 die("Cannot read ELF section headers %d/%d: %s\n", 405 die("Cannot read ELF section headers %d/%d: %s\n",
351 i, ehdr.e_shnum, strerror(errno)); 406 i, ehdr.e_shnum, strerror(errno));
352 sec->shdr.sh_name = elf32_to_cpu(shdr.sh_name); 407 sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
353 sec->shdr.sh_type = elf32_to_cpu(shdr.sh_type); 408 sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type);
354 sec->shdr.sh_flags = elf32_to_cpu(shdr.sh_flags); 409 sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags);
355 sec->shdr.sh_addr = elf32_to_cpu(shdr.sh_addr); 410 sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr);
356 sec->shdr.sh_offset = elf32_to_cpu(shdr.sh_offset); 411 sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset);
357 sec->shdr.sh_size = elf32_to_cpu(shdr.sh_size); 412 sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size);
358 sec->shdr.sh_link = elf32_to_cpu(shdr.sh_link); 413 sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link);
359 sec->shdr.sh_info = elf32_to_cpu(shdr.sh_info); 414 sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info);
360 sec->shdr.sh_addralign = elf32_to_cpu(shdr.sh_addralign); 415 sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign);
361 sec->shdr.sh_entsize = elf32_to_cpu(shdr.sh_entsize); 416 sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize);
362 if (sec->shdr.sh_link < ehdr.e_shnum) 417 if (sec->shdr.sh_link < ehdr.e_shnum)
363 sec->link = &secs[sec->shdr.sh_link]; 418 sec->link = &secs[sec->shdr.sh_link];
364 } 419 }
@@ -412,12 +467,12 @@ static void read_symtabs(FILE *fp)
412 die("Cannot read symbol table: %s\n", 467 die("Cannot read symbol table: %s\n",
413 strerror(errno)); 468 strerror(errno));
414 } 469 }
415 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { 470 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) {
416 Elf32_Sym *sym = &sec->symtab[j]; 471 Elf_Sym *sym = &sec->symtab[j];
417 sym->st_name = elf32_to_cpu(sym->st_name); 472 sym->st_name = elf_word_to_cpu(sym->st_name);
418 sym->st_value = elf32_to_cpu(sym->st_value); 473 sym->st_value = elf_addr_to_cpu(sym->st_value);
419 sym->st_size = elf32_to_cpu(sym->st_size); 474 sym->st_size = elf_xword_to_cpu(sym->st_size);
420 sym->st_shndx = elf16_to_cpu(sym->st_shndx); 475 sym->st_shndx = elf_half_to_cpu(sym->st_shndx);
421 } 476 }
422 } 477 }
423} 478}
@@ -428,7 +483,7 @@ static void read_relocs(FILE *fp)
428 int i,j; 483 int i,j;
429 for (i = 0; i < ehdr.e_shnum; i++) { 484 for (i = 0; i < ehdr.e_shnum; i++) {
430 struct section *sec = &secs[i]; 485 struct section *sec = &secs[i];
431 if (sec->shdr.sh_type != SHT_REL) { 486 if (sec->shdr.sh_type != SHT_REL_TYPE) {
432 continue; 487 continue;
433 } 488 }
434 sec->reltab = malloc(sec->shdr.sh_size); 489 sec->reltab = malloc(sec->shdr.sh_size);
@@ -445,10 +500,13 @@ static void read_relocs(FILE *fp)
445 die("Cannot read symbol table: %s\n", 500 die("Cannot read symbol table: %s\n",
446 strerror(errno)); 501 strerror(errno));
447 } 502 }
448 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { 503 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
449 Elf32_Rel *rel = &sec->reltab[j]; 504 Elf_Rel *rel = &sec->reltab[j];
450 rel->r_offset = elf32_to_cpu(rel->r_offset); 505 rel->r_offset = elf_addr_to_cpu(rel->r_offset);
451 rel->r_info = elf32_to_cpu(rel->r_info); 506 rel->r_info = elf_xword_to_cpu(rel->r_info);
507#if (SHT_REL_TYPE == SHT_RELA)
508 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
509#endif
452 } 510 }
453 } 511 }
454} 512}
@@ -457,6 +515,13 @@ static void read_relocs(FILE *fp)
457static void print_absolute_symbols(void) 515static void print_absolute_symbols(void)
458{ 516{
459 int i; 517 int i;
518 const char *format;
519
520 if (ELF_BITS == 64)
521 format = "%5d %016"PRIx64" %5"PRId64" %10s %10s %12s %s\n";
522 else
523 format = "%5d %08"PRIx32" %5"PRId32" %10s %10s %12s %s\n";
524
460 printf("Absolute symbols\n"); 525 printf("Absolute symbols\n");
461 printf(" Num: Value Size Type Bind Visibility Name\n"); 526 printf(" Num: Value Size Type Bind Visibility Name\n");
462 for (i = 0; i < ehdr.e_shnum; i++) { 527 for (i = 0; i < ehdr.e_shnum; i++) {
@@ -468,19 +533,19 @@ static void print_absolute_symbols(void)
468 continue; 533 continue;
469 } 534 }
470 sym_strtab = sec->link->strtab; 535 sym_strtab = sec->link->strtab;
471 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { 536 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) {
472 Elf32_Sym *sym; 537 Elf_Sym *sym;
473 const char *name; 538 const char *name;
474 sym = &sec->symtab[j]; 539 sym = &sec->symtab[j];
475 name = sym_name(sym_strtab, sym); 540 name = sym_name(sym_strtab, sym);
476 if (sym->st_shndx != SHN_ABS) { 541 if (sym->st_shndx != SHN_ABS) {
477 continue; 542 continue;
478 } 543 }
479 printf("%5d %08x %5d %10s %10s %12s %s\n", 544 printf(format,
480 j, sym->st_value, sym->st_size, 545 j, sym->st_value, sym->st_size,
481 sym_type(ELF32_ST_TYPE(sym->st_info)), 546 sym_type(ELF_ST_TYPE(sym->st_info)),
482 sym_bind(ELF32_ST_BIND(sym->st_info)), 547 sym_bind(ELF_ST_BIND(sym->st_info)),
483 sym_visibility(ELF32_ST_VISIBILITY(sym->st_other)), 548 sym_visibility(ELF_ST_VISIBILITY(sym->st_other)),
484 name); 549 name);
485 } 550 }
486 } 551 }
@@ -490,14 +555,20 @@ static void print_absolute_symbols(void)
490static void print_absolute_relocs(void) 555static void print_absolute_relocs(void)
491{ 556{
492 int i, printed = 0; 557 int i, printed = 0;
558 const char *format;
559
560 if (ELF_BITS == 64)
561 format = "%016"PRIx64" %016"PRIx64" %10s %016"PRIx64" %s\n";
562 else
563 format = "%08"PRIx32" %08"PRIx32" %10s %08"PRIx32" %s\n";
493 564
494 for (i = 0; i < ehdr.e_shnum; i++) { 565 for (i = 0; i < ehdr.e_shnum; i++) {
495 struct section *sec = &secs[i]; 566 struct section *sec = &secs[i];
496 struct section *sec_applies, *sec_symtab; 567 struct section *sec_applies, *sec_symtab;
497 char *sym_strtab; 568 char *sym_strtab;
498 Elf32_Sym *sh_symtab; 569 Elf_Sym *sh_symtab;
499 int j; 570 int j;
500 if (sec->shdr.sh_type != SHT_REL) { 571 if (sec->shdr.sh_type != SHT_REL_TYPE) {
501 continue; 572 continue;
502 } 573 }
503 sec_symtab = sec->link; 574 sec_symtab = sec->link;
@@ -507,12 +578,12 @@ static void print_absolute_relocs(void)
507 } 578 }
508 sh_symtab = sec_symtab->symtab; 579 sh_symtab = sec_symtab->symtab;
509 sym_strtab = sec_symtab->link->strtab; 580 sym_strtab = sec_symtab->link->strtab;
510 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { 581 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
511 Elf32_Rel *rel; 582 Elf_Rel *rel;
512 Elf32_Sym *sym; 583 Elf_Sym *sym;
513 const char *name; 584 const char *name;
514 rel = &sec->reltab[j]; 585 rel = &sec->reltab[j];
515 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)]; 586 sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
516 name = sym_name(sym_strtab, sym); 587 name = sym_name(sym_strtab, sym);
517 if (sym->st_shndx != SHN_ABS) { 588 if (sym->st_shndx != SHN_ABS) {
518 continue; 589 continue;
@@ -542,10 +613,10 @@ static void print_absolute_relocs(void)
542 printed = 1; 613 printed = 1;
543 } 614 }
544 615
545 printf("%08x %08x %10s %08x %s\n", 616 printf(format,
546 rel->r_offset, 617 rel->r_offset,
547 rel->r_info, 618 rel->r_info,
548 rel_type(ELF32_R_TYPE(rel->r_info)), 619 rel_type(ELF_R_TYPE(rel->r_info)),
549 sym->st_value, 620 sym->st_value,
550 name); 621 name);
551 } 622 }
@@ -555,19 +626,34 @@ static void print_absolute_relocs(void)
555 printf("\n"); 626 printf("\n");
556} 627}
557 628
558static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym), 629static void add_reloc(struct relocs *r, uint32_t offset)
559 int use_real_mode) 630{
631 if (r->count == r->size) {
632 unsigned long newsize = r->size + 50000;
633 void *mem = realloc(r->offset, newsize * sizeof(r->offset[0]));
634
635 if (!mem)
636 die("realloc of %ld entries for relocs failed\n",
637 newsize);
638 r->offset = mem;
639 r->size = newsize;
640 }
641 r->offset[r->count++] = offset;
642}
643
644static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
645 Elf_Sym *sym, const char *symname))
560{ 646{
561 int i; 647 int i;
562 /* Walk through the relocations */ 648 /* Walk through the relocations */
563 for (i = 0; i < ehdr.e_shnum; i++) { 649 for (i = 0; i < ehdr.e_shnum; i++) {
564 char *sym_strtab; 650 char *sym_strtab;
565 Elf32_Sym *sh_symtab; 651 Elf_Sym *sh_symtab;
566 struct section *sec_applies, *sec_symtab; 652 struct section *sec_applies, *sec_symtab;
567 int j; 653 int j;
568 struct section *sec = &secs[i]; 654 struct section *sec = &secs[i];
569 655
570 if (sec->shdr.sh_type != SHT_REL) { 656 if (sec->shdr.sh_type != SHT_REL_TYPE) {
571 continue; 657 continue;
572 } 658 }
573 sec_symtab = sec->link; 659 sec_symtab = sec->link;
@@ -577,101 +663,281 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
577 } 663 }
578 sh_symtab = sec_symtab->symtab; 664 sh_symtab = sec_symtab->symtab;
579 sym_strtab = sec_symtab->link->strtab; 665 sym_strtab = sec_symtab->link->strtab;
580 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { 666 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
581 Elf32_Rel *rel; 667 Elf_Rel *rel = &sec->reltab[j];
582 Elf32_Sym *sym; 668 Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
583 unsigned r_type; 669 const char *symname = sym_name(sym_strtab, sym);
584 const char *symname;
585 int shn_abs;
586 670
587 rel = &sec->reltab[j]; 671 process(sec, rel, sym, symname);
588 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)]; 672 }
589 r_type = ELF32_R_TYPE(rel->r_info); 673 }
590 674}
591 shn_abs = sym->st_shndx == SHN_ABS;
592
593 switch (r_type) {
594 case R_386_NONE:
595 case R_386_PC32:
596 case R_386_PC16:
597 case R_386_PC8:
598 /*
599 * NONE can be ignored and and PC relative
600 * relocations don't need to be adjusted.
601 */
602 break;
603 675
604 case R_386_16: 676/*
605 symname = sym_name(sym_strtab, sym); 677 * The .data..percpu section is a special case for x86_64 SMP kernels.
606 if (!use_real_mode) 678 * It is used to initialize the actual per_cpu areas and to provide
607 goto bad; 679 * definitions for the per_cpu variables that correspond to their offsets
608 if (shn_abs) { 680 * within the percpu area. Since the values of all of the symbols need
609 if (is_reloc(S_ABS, symname)) 681 * to be offsets from the start of the per_cpu area the virtual address
610 break; 682 * (sh_addr) of .data..percpu is 0 in SMP kernels.
611 else if (!is_reloc(S_SEG, symname)) 683 *
612 goto bad; 684 * This means that:
613 } else { 685 *
614 if (is_reloc(S_LIN, symname)) 686 * Relocations that reference symbols in the per_cpu area do not
615 goto bad; 687 * need further relocation (since the value is an offset relative
616 else 688 * to the start of the per_cpu area that does not change).
617 break; 689 *
618 } 690 * Relocations that apply to the per_cpu area need to have their
619 visit(rel, sym); 691 * offset adjusted by by the value of __per_cpu_load to make them
620 break; 692 * point to the correct place in the loaded image (because the
693 * virtual address of .data..percpu is 0).
694 *
695 * For non SMP kernels .data..percpu is linked as part of the normal
696 * kernel data and does not require special treatment.
697 *
698 */
699static int per_cpu_shndx = -1;
700Elf_Addr per_cpu_load_addr;
621 701
622 case R_386_32: 702static void percpu_init(void)
623 symname = sym_name(sym_strtab, sym); 703{
624 if (shn_abs) { 704 int i;
625 if (is_reloc(S_ABS, symname)) 705 for (i = 0; i < ehdr.e_shnum; i++) {
626 break; 706 ElfW(Sym) *sym;
627 else if (!is_reloc(S_REL, symname)) 707 if (strcmp(sec_name(i), ".data..percpu"))
628 goto bad; 708 continue;
629 } else { 709
630 if (use_real_mode && 710 if (secs[i].shdr.sh_addr != 0) /* non SMP kernel */
631 !is_reloc(S_LIN, symname)) 711 return;
632 break; 712
633 } 713 sym = sym_lookup("__per_cpu_load");
634 visit(rel, sym); 714 if (!sym)
635 break; 715 die("can't find __per_cpu_load\n");
636 default: 716
637 die("Unsupported relocation type: %s (%d)\n", 717 per_cpu_shndx = i;
638 rel_type(r_type), r_type); 718 per_cpu_load_addr = sym->st_value;
719 return;
720 }
721}
722
723#if ELF_BITS == 64
724
725/*
726 * Check to see if a symbol lies in the .data..percpu section.
727 * For some as yet not understood reason the "__init_begin"
728 * symbol which immediately preceeds the .data..percpu section
729 * also shows up as it it were part of it so we do an explict
730 * check for that symbol name and ignore it.
731 */
732static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
733{
734 return (sym->st_shndx == per_cpu_shndx) &&
735 strcmp(symname, "__init_begin");
736}
737
738
739static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
740 const char *symname)
741{
742 unsigned r_type = ELF64_R_TYPE(rel->r_info);
743 ElfW(Addr) offset = rel->r_offset;
744 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
745
746 if (sym->st_shndx == SHN_UNDEF)
747 return 0;
748
749 /*
750 * Adjust the offset if this reloc applies to the percpu section.
751 */
752 if (sec->shdr.sh_info == per_cpu_shndx)
753 offset += per_cpu_load_addr;
754
755 switch (r_type) {
756 case R_X86_64_NONE:
757 case R_X86_64_PC32:
758 /*
759 * NONE can be ignored and PC relative relocations don't
760 * need to be adjusted.
761 */
762 break;
763
764 case R_X86_64_32:
765 case R_X86_64_32S:
766 case R_X86_64_64:
767 /*
768 * References to the percpu area don't need to be adjusted.
769 */
770 if (is_percpu_sym(sym, symname))
771 break;
772
773 if (shn_abs) {
774 /*
775 * Whitelisted absolute symbols do not require
776 * relocation.
777 */
778 if (is_reloc(S_ABS, symname))
639 break; 779 break;
640 bad: 780
641 symname = sym_name(sym_strtab, sym); 781 die("Invalid absolute %s relocation: %s\n",
642 die("Invalid %s %s relocation: %s\n", 782 rel_type(r_type), symname);
643 shn_abs ? "absolute" : "relative", 783 break;
644 rel_type(r_type), symname);
645 }
646 } 784 }
785
786 /*
787 * Relocation offsets for 64 bit kernels are output
788 * as 32 bits and sign extended back to 64 bits when
789 * the relocations are processed.
790 * Make sure that the offset will fit.
791 */
792 if ((int32_t)offset != (int64_t)offset)
793 die("Relocation offset doesn't fit in 32 bits\n");
794
795 if (r_type == R_X86_64_64)
796 add_reloc(&relocs64, offset);
797 else
798 add_reloc(&relocs32, offset);
799 break;
800
801 default:
802 die("Unsupported relocation type: %s (%d)\n",
803 rel_type(r_type), r_type);
804 break;
647 } 805 }
806
807 return 0;
648} 808}
649 809
650static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym) 810#else
811
812static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
813 const char *symname)
651{ 814{
652 if (ELF32_R_TYPE(rel->r_info) == R_386_16) 815 unsigned r_type = ELF32_R_TYPE(rel->r_info);
653 reloc16_count++; 816 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
654 else 817
655 reloc_count++; 818 switch (r_type) {
819 case R_386_NONE:
820 case R_386_PC32:
821 case R_386_PC16:
822 case R_386_PC8:
823 /*
824 * NONE can be ignored and PC relative relocations don't
825 * need to be adjusted.
826 */
827 break;
828
829 case R_386_32:
830 if (shn_abs) {
831 /*
832 * Whitelisted absolute symbols do not require
833 * relocation.
834 */
835 if (is_reloc(S_ABS, symname))
836 break;
837
838 die("Invalid absolute %s relocation: %s\n",
839 rel_type(r_type), symname);
840 break;
841 }
842
843 add_reloc(&relocs32, rel->r_offset);
844 break;
845
846 default:
847 die("Unsupported relocation type: %s (%d)\n",
848 rel_type(r_type), r_type);
849 break;
850 }
851
852 return 0;
656} 853}
657 854
658static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym) 855static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
856 const char *symname)
659{ 857{
660 /* Remember the address that needs to be adjusted. */ 858 unsigned r_type = ELF32_R_TYPE(rel->r_info);
661 if (ELF32_R_TYPE(rel->r_info) == R_386_16) 859 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
662 relocs16[reloc16_idx++] = rel->r_offset; 860
663 else 861 switch (r_type) {
664 relocs[reloc_idx++] = rel->r_offset; 862 case R_386_NONE:
863 case R_386_PC32:
864 case R_386_PC16:
865 case R_386_PC8:
866 /*
867 * NONE can be ignored and PC relative relocations don't
868 * need to be adjusted.
869 */
870 break;
871
872 case R_386_16:
873 if (shn_abs) {
874 /*
875 * Whitelisted absolute symbols do not require
876 * relocation.
877 */
878 if (is_reloc(S_ABS, symname))
879 break;
880
881 if (is_reloc(S_SEG, symname)) {
882 add_reloc(&relocs16, rel->r_offset);
883 break;
884 }
885 } else {
886 if (!is_reloc(S_LIN, symname))
887 break;
888 }
889 die("Invalid %s %s relocation: %s\n",
890 shn_abs ? "absolute" : "relative",
891 rel_type(r_type), symname);
892 break;
893
894 case R_386_32:
895 if (shn_abs) {
896 /*
897 * Whitelisted absolute symbols do not require
898 * relocation.
899 */
900 if (is_reloc(S_ABS, symname))
901 break;
902
903 if (is_reloc(S_REL, symname)) {
904 add_reloc(&relocs32, rel->r_offset);
905 break;
906 }
907 } else {
908 if (is_reloc(S_LIN, symname))
909 add_reloc(&relocs32, rel->r_offset);
910 break;
911 }
912 die("Invalid %s %s relocation: %s\n",
913 shn_abs ? "absolute" : "relative",
914 rel_type(r_type), symname);
915 break;
916
917 default:
918 die("Unsupported relocation type: %s (%d)\n",
919 rel_type(r_type), r_type);
920 break;
921 }
922
923 return 0;
665} 924}
666 925
926#endif
927
667static int cmp_relocs(const void *va, const void *vb) 928static int cmp_relocs(const void *va, const void *vb)
668{ 929{
669 const unsigned long *a, *b; 930 const uint32_t *a, *b;
670 a = va; b = vb; 931 a = va; b = vb;
671 return (*a == *b)? 0 : (*a > *b)? 1 : -1; 932 return (*a == *b)? 0 : (*a > *b)? 1 : -1;
672} 933}
673 934
674static int write32(unsigned int v, FILE *f) 935static void sort_relocs(struct relocs *r)
936{
937 qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
938}
939
940static int write32(uint32_t v, FILE *f)
675{ 941{
676 unsigned char buf[4]; 942 unsigned char buf[4];
677 943
@@ -679,33 +945,40 @@ static int write32(unsigned int v, FILE *f)
679 return fwrite(buf, 1, 4, f) == 4 ? 0 : -1; 945 return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
680} 946}
681 947
948static int write32_as_text(uint32_t v, FILE *f)
949{
950 return fprintf(f, "\t.long 0x%08"PRIx32"\n", v) > 0 ? 0 : -1;
951}
952
682static void emit_relocs(int as_text, int use_real_mode) 953static void emit_relocs(int as_text, int use_real_mode)
683{ 954{
684 int i; 955 int i;
685 /* Count how many relocations I have and allocate space for them. */ 956 int (*write_reloc)(uint32_t, FILE *) = write32;
686 reloc_count = 0; 957 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
687 walk_relocs(count_reloc, use_real_mode); 958 const char *symname);
688 relocs = malloc(reloc_count * sizeof(relocs[0])); 959
689 if (!relocs) { 960#if ELF_BITS == 64
690 die("malloc of %d entries for relocs failed\n", 961 if (!use_real_mode)
691 reloc_count); 962 do_reloc = do_reloc64;
692 } 963 else
964 die("--realmode not valid for a 64-bit ELF file");
965#else
966 if (!use_real_mode)
967 do_reloc = do_reloc32;
968 else
969 do_reloc = do_reloc_real;
970#endif
693 971
694 relocs16 = malloc(reloc16_count * sizeof(relocs[0]));
695 if (!relocs16) {
696 die("malloc of %d entries for relocs16 failed\n",
697 reloc16_count);
698 }
699 /* Collect up the relocations */ 972 /* Collect up the relocations */
700 reloc_idx = 0; 973 walk_relocs(do_reloc);
701 walk_relocs(collect_reloc, use_real_mode);
702 974
703 if (reloc16_count && !use_real_mode) 975 if (relocs16.count && !use_real_mode)
704 die("Segment relocations found but --realmode not specified\n"); 976 die("Segment relocations found but --realmode not specified\n");
705 977
706 /* Order the relocations for more efficient processing */ 978 /* Order the relocations for more efficient processing */
707 qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs); 979 sort_relocs(&relocs16);
708 qsort(relocs16, reloc16_count, sizeof(relocs16[0]), cmp_relocs); 980 sort_relocs(&relocs32);
981 sort_relocs(&relocs64);
709 982
710 /* Print the relocations */ 983 /* Print the relocations */
711 if (as_text) { 984 if (as_text) {
@@ -714,114 +987,60 @@ static void emit_relocs(int as_text, int use_real_mode)
714 */ 987 */
715 printf(".section \".data.reloc\",\"a\"\n"); 988 printf(".section \".data.reloc\",\"a\"\n");
716 printf(".balign 4\n"); 989 printf(".balign 4\n");
717 if (use_real_mode) { 990 write_reloc = write32_as_text;
718 printf("\t.long %lu\n", reloc16_count);
719 for (i = 0; i < reloc16_count; i++)
720 printf("\t.long 0x%08lx\n", relocs16[i]);
721 printf("\t.long %lu\n", reloc_count);
722 for (i = 0; i < reloc_count; i++) {
723 printf("\t.long 0x%08lx\n", relocs[i]);
724 }
725 } else {
726 /* Print a stop */
727 printf("\t.long 0x%08lx\n", (unsigned long)0);
728 for (i = 0; i < reloc_count; i++) {
729 printf("\t.long 0x%08lx\n", relocs[i]);
730 }
731 }
732
733 printf("\n");
734 } 991 }
735 else {
736 if (use_real_mode) {
737 write32(reloc16_count, stdout);
738 for (i = 0; i < reloc16_count; i++)
739 write32(relocs16[i], stdout);
740 write32(reloc_count, stdout);
741 992
742 /* Now print each relocation */ 993 if (use_real_mode) {
743 for (i = 0; i < reloc_count; i++) 994 write_reloc(relocs16.count, stdout);
744 write32(relocs[i], stdout); 995 for (i = 0; i < relocs16.count; i++)
745 } else { 996 write_reloc(relocs16.offset[i], stdout);
997
998 write_reloc(relocs32.count, stdout);
999 for (i = 0; i < relocs32.count; i++)
1000 write_reloc(relocs32.offset[i], stdout);
1001 } else {
1002 if (ELF_BITS == 64) {
746 /* Print a stop */ 1003 /* Print a stop */
747 write32(0, stdout); 1004 write_reloc(0, stdout);
748 1005
749 /* Now print each relocation */ 1006 /* Now print each relocation */
750 for (i = 0; i < reloc_count; i++) { 1007 for (i = 0; i < relocs64.count; i++)
751 write32(relocs[i], stdout); 1008 write_reloc(relocs64.offset[i], stdout);
752 }
753 } 1009 }
1010
1011 /* Print a stop */
1012 write_reloc(0, stdout);
1013
1014 /* Now print each relocation */
1015 for (i = 0; i < relocs32.count; i++)
1016 write_reloc(relocs32.offset[i], stdout);
754 } 1017 }
755} 1018}
756 1019
757static void usage(void) 1020#if ELF_BITS == 64
758{ 1021# define process process_64
759 die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n"); 1022#else
760} 1023# define process process_32
1024#endif
761 1025
762int main(int argc, char **argv) 1026void process(FILE *fp, int use_real_mode, int as_text,
1027 int show_absolute_syms, int show_absolute_relocs)
763{ 1028{
764 int show_absolute_syms, show_absolute_relocs;
765 int as_text, use_real_mode;
766 const char *fname;
767 FILE *fp;
768 int i;
769
770 show_absolute_syms = 0;
771 show_absolute_relocs = 0;
772 as_text = 0;
773 use_real_mode = 0;
774 fname = NULL;
775 for (i = 1; i < argc; i++) {
776 char *arg = argv[i];
777 if (*arg == '-') {
778 if (strcmp(arg, "--abs-syms") == 0) {
779 show_absolute_syms = 1;
780 continue;
781 }
782 if (strcmp(arg, "--abs-relocs") == 0) {
783 show_absolute_relocs = 1;
784 continue;
785 }
786 if (strcmp(arg, "--text") == 0) {
787 as_text = 1;
788 continue;
789 }
790 if (strcmp(arg, "--realmode") == 0) {
791 use_real_mode = 1;
792 continue;
793 }
794 }
795 else if (!fname) {
796 fname = arg;
797 continue;
798 }
799 usage();
800 }
801 if (!fname) {
802 usage();
803 }
804 regex_init(use_real_mode); 1029 regex_init(use_real_mode);
805 fp = fopen(fname, "r");
806 if (!fp) {
807 die("Cannot open %s: %s\n",
808 fname, strerror(errno));
809 }
810 read_ehdr(fp); 1030 read_ehdr(fp);
811 read_shdrs(fp); 1031 read_shdrs(fp);
812 read_strtabs(fp); 1032 read_strtabs(fp);
813 read_symtabs(fp); 1033 read_symtabs(fp);
814 read_relocs(fp); 1034 read_relocs(fp);
1035 if (ELF_BITS == 64)
1036 percpu_init();
815 if (show_absolute_syms) { 1037 if (show_absolute_syms) {
816 print_absolute_symbols(); 1038 print_absolute_symbols();
817 goto out; 1039 return;
818 } 1040 }
819 if (show_absolute_relocs) { 1041 if (show_absolute_relocs) {
820 print_absolute_relocs(); 1042 print_absolute_relocs();
821 goto out; 1043 return;
822 } 1044 }
823 emit_relocs(as_text, use_real_mode); 1045 emit_relocs(as_text, use_real_mode);
824out:
825 fclose(fp);
826 return 0;
827} 1046}
diff --git a/arch/x86/tools/relocs.h b/arch/x86/tools/relocs.h
new file mode 100644
index 000000000000..07cdb1eca4fa
--- /dev/null
+++ b/arch/x86/tools/relocs.h
@@ -0,0 +1,36 @@
1#ifndef RELOCS_H
2#define RELOCS_H
3
4#include <stdio.h>
5#include <stdarg.h>
6#include <stdlib.h>
7#include <stdint.h>
8#include <inttypes.h>
9#include <string.h>
10#include <errno.h>
11#include <unistd.h>
12#include <elf.h>
13#include <byteswap.h>
14#define USE_BSD
15#include <endian.h>
16#include <regex.h>
17#include <tools/le_byteshift.h>
18
19void die(char *fmt, ...);
20
21#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
22
23enum symtype {
24 S_ABS,
25 S_REL,
26 S_SEG,
27 S_LIN,
28 S_NSYMTYPES
29};
30
31void process_32(FILE *fp, int use_real_mode, int as_text,
32 int show_absolute_syms, int show_absolute_relocs);
33void process_64(FILE *fp, int use_real_mode, int as_text,
34 int show_absolute_syms, int show_absolute_relocs);
35
36#endif /* RELOCS_H */
diff --git a/arch/x86/tools/relocs_32.c b/arch/x86/tools/relocs_32.c
new file mode 100644
index 000000000000..b2ade2bb4162
--- /dev/null
+++ b/arch/x86/tools/relocs_32.c
@@ -0,0 +1,17 @@
1#include "relocs.h"
2
3#define ELF_BITS 32
4
5#define ELF_MACHINE EM_386
6#define ELF_MACHINE_NAME "i386"
7#define SHT_REL_TYPE SHT_REL
8#define Elf_Rel ElfW(Rel)
9
10#define ELF_CLASS ELFCLASS32
11#define ELF_R_SYM(val) ELF32_R_SYM(val)
12#define ELF_R_TYPE(val) ELF32_R_TYPE(val)
13#define ELF_ST_TYPE(o) ELF32_ST_TYPE(o)
14#define ELF_ST_BIND(o) ELF32_ST_BIND(o)
15#define ELF_ST_VISIBILITY(o) ELF32_ST_VISIBILITY(o)
16
17#include "relocs.c"
diff --git a/arch/x86/tools/relocs_64.c b/arch/x86/tools/relocs_64.c
new file mode 100644
index 000000000000..56b61b743c4c
--- /dev/null
+++ b/arch/x86/tools/relocs_64.c
@@ -0,0 +1,17 @@
1#include "relocs.h"
2
3#define ELF_BITS 64
4
5#define ELF_MACHINE EM_X86_64
6#define ELF_MACHINE_NAME "x86_64"
7#define SHT_REL_TYPE SHT_RELA
8#define Elf_Rel Elf64_Rela
9
10#define ELF_CLASS ELFCLASS64
11#define ELF_R_SYM(val) ELF64_R_SYM(val)
12#define ELF_R_TYPE(val) ELF64_R_TYPE(val)
13#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o)
14#define ELF_ST_BIND(o) ELF64_ST_BIND(o)
15#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o)
16
17#include "relocs.c"
diff --git a/arch/x86/tools/relocs_common.c b/arch/x86/tools/relocs_common.c
new file mode 100644
index 000000000000..44d396823a53
--- /dev/null
+++ b/arch/x86/tools/relocs_common.c
@@ -0,0 +1,76 @@
1#include "relocs.h"
2
3void die(char *fmt, ...)
4{
5 va_list ap;
6 va_start(ap, fmt);
7 vfprintf(stderr, fmt, ap);
8 va_end(ap);
9 exit(1);
10}
11
12static void usage(void)
13{
14 die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n");
15}
16
17int main(int argc, char **argv)
18{
19 int show_absolute_syms, show_absolute_relocs;
20 int as_text, use_real_mode;
21 const char *fname;
22 FILE *fp;
23 int i;
24 unsigned char e_ident[EI_NIDENT];
25
26 show_absolute_syms = 0;
27 show_absolute_relocs = 0;
28 as_text = 0;
29 use_real_mode = 0;
30 fname = NULL;
31 for (i = 1; i < argc; i++) {
32 char *arg = argv[i];
33 if (*arg == '-') {
34 if (strcmp(arg, "--abs-syms") == 0) {
35 show_absolute_syms = 1;
36 continue;
37 }
38 if (strcmp(arg, "--abs-relocs") == 0) {
39 show_absolute_relocs = 1;
40 continue;
41 }
42 if (strcmp(arg, "--text") == 0) {
43 as_text = 1;
44 continue;
45 }
46 if (strcmp(arg, "--realmode") == 0) {
47 use_real_mode = 1;
48 continue;
49 }
50 }
51 else if (!fname) {
52 fname = arg;
53 continue;
54 }
55 usage();
56 }
57 if (!fname) {
58 usage();
59 }
60 fp = fopen(fname, "r");
61 if (!fp) {
62 die("Cannot open %s: %s\n", fname, strerror(errno));
63 }
64 if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT) {
65 die("Cannot read %s: %s", fname, strerror(errno));
66 }
67 rewind(fp);
68 if (e_ident[EI_CLASS] == ELFCLASS64)
69 process_64(fp, use_real_mode, as_text,
70 show_absolute_syms, show_absolute_relocs);
71 else
72 process_32(fp, use_real_mode, as_text,
73 show_absolute_syms, show_absolute_relocs);
74 fclose(fp);
75 return 0;
76}
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 131dacd2748a..1a3c76505649 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -4,7 +4,7 @@
4 4
5config XEN 5config XEN
6 bool "Xen guest support" 6 bool "Xen guest support"
7 select PARAVIRT 7 depends on PARAVIRT
8 select PARAVIRT_CLOCK 8 select PARAVIRT_CLOCK
9 select XEN_HAVE_PVMMU 9 select XEN_HAVE_PVMMU
10 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS) 10 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c8e1c7b95c3b..53d4f680c9b5 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -31,6 +31,7 @@
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/memblock.h> 33#include <linux/memblock.h>
34#include <linux/edd.h>
34 35
35#include <xen/xen.h> 36#include <xen/xen.h>
36#include <xen/events.h> 37#include <xen/events.h>
@@ -1220,7 +1221,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1220 .alloc_ldt = xen_alloc_ldt, 1221 .alloc_ldt = xen_alloc_ldt,
1221 .free_ldt = xen_free_ldt, 1222 .free_ldt = xen_free_ldt,
1222 1223
1223 .store_gdt = native_store_gdt,
1224 .store_idt = native_store_idt, 1224 .store_idt = native_store_idt,
1225 .store_tr = xen_store_tr, 1225 .store_tr = xen_store_tr,
1226 1226
@@ -1306,6 +1306,55 @@ static const struct machine_ops xen_machine_ops __initconst = {
1306 .emergency_restart = xen_emergency_restart, 1306 .emergency_restart = xen_emergency_restart,
1307}; 1307};
1308 1308
1309static void __init xen_boot_params_init_edd(void)
1310{
1311#if IS_ENABLED(CONFIG_EDD)
1312 struct xen_platform_op op;
1313 struct edd_info *edd_info;
1314 u32 *mbr_signature;
1315 unsigned nr;
1316 int ret;
1317
1318 edd_info = boot_params.eddbuf;
1319 mbr_signature = boot_params.edd_mbr_sig_buffer;
1320
1321 op.cmd = XENPF_firmware_info;
1322
1323 op.u.firmware_info.type = XEN_FW_DISK_INFO;
1324 for (nr = 0; nr < EDDMAXNR; nr++) {
1325 struct edd_info *info = edd_info + nr;
1326
1327 op.u.firmware_info.index = nr;
1328 info->params.length = sizeof(info->params);
1329 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1330 &info->params);
1331 ret = HYPERVISOR_dom0_op(&op);
1332 if (ret)
1333 break;
1334
1335#define C(x) info->x = op.u.firmware_info.u.disk_info.x
1336 C(device);
1337 C(version);
1338 C(interface_support);
1339 C(legacy_max_cylinder);
1340 C(legacy_max_head);
1341 C(legacy_sectors_per_track);
1342#undef C
1343 }
1344 boot_params.eddbuf_entries = nr;
1345
1346 op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1347 for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1348 op.u.firmware_info.index = nr;
1349 ret = HYPERVISOR_dom0_op(&op);
1350 if (ret)
1351 break;
1352 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1353 }
1354 boot_params.edd_mbr_sig_buf_entries = nr;
1355#endif
1356}
1357
1309/* 1358/*
1310 * Set up the GDT and segment registers for -fstack-protector. Until 1359 * Set up the GDT and segment registers for -fstack-protector. Until
1311 * we do this, we have to be careful not to call any stack-protected 1360 * we do this, we have to be careful not to call any stack-protected
@@ -1508,6 +1557,8 @@ asmlinkage void __init xen_start_kernel(void)
1508 /* Avoid searching for BIOS MP tables */ 1557 /* Avoid searching for BIOS MP tables */
1509 x86_init.mpparse.find_smp_config = x86_init_noop; 1558 x86_init.mpparse.find_smp_config = x86_init_noop;
1510 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 1559 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
1560
1561 xen_boot_params_init_edd();
1511 } 1562 }
1512#ifdef CONFIG_PCI 1563#ifdef CONFIG_PCI
1513 /* PCI BIOS service won't work from a PV guest. */ 1564 /* PCI BIOS service won't work from a PV guest. */
@@ -1589,8 +1640,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1589 switch (action) { 1640 switch (action) {
1590 case CPU_UP_PREPARE: 1641 case CPU_UP_PREPARE:
1591 xen_vcpu_setup(cpu); 1642 xen_vcpu_setup(cpu);
1592 if (xen_have_vector_callback) 1643 if (xen_have_vector_callback) {
1593 xen_init_lock_cpu(cpu); 1644 xen_init_lock_cpu(cpu);
1645 if (xen_feature(XENFEAT_hvm_safe_pvclock))
1646 xen_setup_timer(cpu);
1647 }
1594 break; 1648 break;
1595 default: 1649 default:
1596 break; 1650 break;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index e8e34938c57d..fdc3ba28ca38 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3)
1467 __xen_write_cr3(true, cr3); 1467 __xen_write_cr3(true, cr3);
1468 1468
1469 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ 1469 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1470
1471 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1472} 1470}
1473#endif 1471#endif
1474 1472
@@ -1750,14 +1748,18 @@ static void *m2v(phys_addr_t maddr)
1750} 1748}
1751 1749
1752/* Set the page permissions on an identity-mapped pages */ 1750/* Set the page permissions on an identity-mapped pages */
1753static void set_page_prot(void *addr, pgprot_t prot) 1751static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
1754{ 1752{
1755 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; 1753 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1756 pte_t pte = pfn_pte(pfn, prot); 1754 pte_t pte = pfn_pte(pfn, prot);
1757 1755
1758 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) 1756 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1759 BUG(); 1757 BUG();
1760} 1758}
1759static void set_page_prot(void *addr, pgprot_t prot)
1760{
1761 return set_page_prot_flags(addr, prot, UVMF_NONE);
1762}
1761#ifdef CONFIG_X86_32 1763#ifdef CONFIG_X86_32
1762static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) 1764static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1763{ 1765{
@@ -1841,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1841 unsigned long addr) 1843 unsigned long addr)
1842{ 1844{
1843 if (*pt_base == PFN_DOWN(__pa(addr))) { 1845 if (*pt_base == PFN_DOWN(__pa(addr))) {
1844 set_page_prot((void *)addr, PAGE_KERNEL); 1846 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1845 clear_page((void *)addr); 1847 clear_page((void *)addr);
1846 (*pt_base)++; 1848 (*pt_base)++;
1847 } 1849 }
1848 if (*pt_end == PFN_DOWN(__pa(addr))) { 1850 if (*pt_end == PFN_DOWN(__pa(addr))) {
1849 set_page_prot((void *)addr, PAGE_KERNEL); 1851 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1850 clear_page((void *)addr); 1852 clear_page((void *)addr);
1851 (*pt_end)--; 1853 (*pt_end)--;
1852 } 1854 }
@@ -2041,9 +2043,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2041 2043
2042 switch (idx) { 2044 switch (idx) {
2043 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: 2045 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2044#ifdef CONFIG_X86_F00F_BUG 2046 case FIX_RO_IDT:
2045 case FIX_F00F_IDT:
2046#endif
2047#ifdef CONFIG_X86_32 2047#ifdef CONFIG_X86_32
2048 case FIX_WP_TEST: 2048 case FIX_WP_TEST:
2049 case FIX_VDSO: 2049 case FIX_VDSO:
@@ -2122,6 +2122,7 @@ static void __init xen_post_allocator_init(void)
2122#endif 2122#endif
2123 2123
2124#ifdef CONFIG_X86_64 2124#ifdef CONFIG_X86_64
2125 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2125 SetPagePinned(virt_to_page(level3_user_vsyscall)); 2126 SetPagePinned(virt_to_page(level3_user_vsyscall));
2126#endif 2127#endif
2127 xen_mark_init_mm_pinned(); 2128 xen_mark_init_mm_pinned();
@@ -2197,6 +2198,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2197 .lazy_mode = { 2198 .lazy_mode = {
2198 .enter = paravirt_enter_lazy_mmu, 2199 .enter = paravirt_enter_lazy_mmu,
2199 .leave = xen_leave_lazy_mmu, 2200 .leave = xen_leave_lazy_mmu,
2201 .flush = paravirt_flush_lazy_mmu,
2200 }, 2202 },
2201 2203
2202 .set_fixmap = xen_set_fixmap, 2204 .set_fixmap = xen_set_fixmap,
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 09ea61d2e02f..8ff37995d54e 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -95,7 +95,7 @@ static void __cpuinit cpu_bringup(void)
95static void __cpuinit cpu_bringup_and_idle(void) 95static void __cpuinit cpu_bringup_and_idle(void)
96{ 96{
97 cpu_bringup(); 97 cpu_bringup();
98 cpu_idle(); 98 cpu_startup_entry(CPUHP_ONLINE);
99} 99}
100 100
101static int xen_smp_intr_init(unsigned int cpu) 101static int xen_smp_intr_init(unsigned int cpu)
@@ -144,6 +144,13 @@ static int xen_smp_intr_init(unsigned int cpu)
144 goto fail; 144 goto fail;
145 per_cpu(xen_callfuncsingle_irq, cpu) = rc; 145 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
146 146
147 /*
148 * The IRQ worker on PVHVM goes through the native path and uses the
149 * IPI mechanism.
150 */
151 if (xen_hvm_domain())
152 return 0;
153
147 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); 154 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
148 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, 155 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
149 cpu, 156 cpu,
@@ -167,6 +174,9 @@ static int xen_smp_intr_init(unsigned int cpu)
167 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) 174 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
168 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), 175 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
169 NULL); 176 NULL);
177 if (xen_hvm_domain())
178 return rc;
179
170 if (per_cpu(xen_irq_work, cpu) >= 0) 180 if (per_cpu(xen_irq_work, cpu) >= 0)
171 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); 181 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
172 182
@@ -418,7 +428,7 @@ static int xen_cpu_disable(void)
418 428
419static void xen_cpu_die(unsigned int cpu) 429static void xen_cpu_die(unsigned int cpu)
420{ 430{
421 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { 431 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
422 current->state = TASK_UNINTERRUPTIBLE; 432 current->state = TASK_UNINTERRUPTIBLE;
423 schedule_timeout(HZ/10); 433 schedule_timeout(HZ/10);
424 } 434 }
@@ -426,7 +436,8 @@ static void xen_cpu_die(unsigned int cpu)
426 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 436 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
427 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 437 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
428 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); 438 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
429 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); 439 if (!xen_hvm_domain())
440 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
430 xen_uninit_lock_cpu(cpu); 441 xen_uninit_lock_cpu(cpu);
431 xen_teardown_timer(cpu); 442 xen_teardown_timer(cpu);
432} 443}
@@ -657,11 +668,7 @@ static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
657 668
658static void xen_hvm_cpu_die(unsigned int cpu) 669static void xen_hvm_cpu_die(unsigned int cpu)
659{ 670{
660 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); 671 xen_cpu_die(cpu);
661 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
662 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
663 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
664 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
665 native_cpu_die(cpu); 672 native_cpu_die(cpu);
666} 673}
667 674
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index f7a080ef0354..8b54603ce816 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -364,6 +364,16 @@ void __cpuinit xen_init_lock_cpu(int cpu)
364 int irq; 364 int irq;
365 const char *name; 365 const char *name;
366 366
367 WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
368 cpu, per_cpu(lock_kicker_irq, cpu));
369
370 /*
371 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
372 * (xen: disable PV spinlocks on HVM)
373 */
374 if (xen_hvm_domain())
375 return;
376
367 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); 377 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
368 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, 378 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
369 cpu, 379 cpu,
@@ -382,11 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu)
382 392
383void xen_uninit_lock_cpu(int cpu) 393void xen_uninit_lock_cpu(int cpu)
384{ 394{
395 /*
396 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
397 * (xen: disable PV spinlocks on HVM)
398 */
399 if (xen_hvm_domain())
400 return;
401
385 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); 402 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
403 per_cpu(lock_kicker_irq, cpu) = -1;
386} 404}
387 405
388void __init xen_init_spinlocks(void) 406void __init xen_init_spinlocks(void)
389{ 407{
408 /*
409 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
410 * (xen: disable PV spinlocks on HVM)
411 */
412 if (xen_hvm_domain())
413 return;
414
390 BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t)); 415 BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
391 416
392 pv_lock_ops.spin_is_locked = xen_spin_is_locked; 417 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 0296a9522501..3d88bfdf9e1c 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -377,7 +377,7 @@ static const struct clock_event_device xen_vcpuop_clockevent = {
377 377
378static const struct clock_event_device *xen_clockevent = 378static const struct clock_event_device *xen_clockevent =
379 &xen_timerop_clockevent; 379 &xen_timerop_clockevent;
380static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events); 380static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 };
381 381
382static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) 382static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
383{ 383{
@@ -401,6 +401,9 @@ void xen_setup_timer(int cpu)
401 struct clock_event_device *evt; 401 struct clock_event_device *evt;
402 int irq; 402 int irq;
403 403
404 evt = &per_cpu(xen_clock_events, cpu);
405 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
406
404 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); 407 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
405 408
406 name = kasprintf(GFP_KERNEL, "timer%d", cpu); 409 name = kasprintf(GFP_KERNEL, "timer%d", cpu);
@@ -413,7 +416,6 @@ void xen_setup_timer(int cpu)
413 IRQF_FORCE_RESUME, 416 IRQF_FORCE_RESUME,
414 name, NULL); 417 name, NULL);
415 418
416 evt = &per_cpu(xen_clock_events, cpu);
417 memcpy(evt, xen_clockevent, sizeof(*evt)); 419 memcpy(evt, xen_clockevent, sizeof(*evt));
418 420
419 evt->cpumask = cpumask_of(cpu); 421 evt->cpumask = cpumask_of(cpu);
@@ -426,6 +428,7 @@ void xen_teardown_timer(int cpu)
426 BUG_ON(cpu == 0); 428 BUG_ON(cpu == 0);
427 evt = &per_cpu(xen_clock_events, cpu); 429 evt = &per_cpu(xen_clock_events, cpu);
428 unbind_from_irqhandler(evt->irq, NULL); 430 unbind_from_irqhandler(evt->irq, NULL);
431 evt->irq = -1;
429} 432}
430 433
431void xen_setup_cpu_clockevents(void) 434void xen_setup_cpu_clockevents(void)
@@ -497,7 +500,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
497{ 500{
498 int cpu = smp_processor_id(); 501 int cpu = smp_processor_id();
499 xen_setup_runstate_info(cpu); 502 xen_setup_runstate_info(cpu);
500 xen_setup_timer(cpu); 503 /*
504 * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
505 * doing it xen_hvm_cpu_notify (which gets called by smp_init during
506 * early bootup and also during CPU hotplug events).
507 */
501 xen_setup_cpu_clockevents(); 508 xen_setup_cpu_clockevents();
502} 509}
503 510
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 35876ffac11d..b09de49dbec5 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -9,7 +9,7 @@ config XTENSA
9 select HAVE_IDE 9 select HAVE_IDE
10 select GENERIC_ATOMIC64 10 select GENERIC_ATOMIC64
11 select HAVE_GENERIC_HARDIRQS 11 select HAVE_GENERIC_HARDIRQS
12 select HAVE_VIRT_TO_BUS 12 select VIRT_TO_BUS
13 select GENERIC_IRQ_SHOW 13 select GENERIC_IRQ_SHOW
14 select GENERIC_CPU_DEVICES 14 select GENERIC_CPU_DEVICES
15 select MODULES_USE_ELF_RELA 15 select MODULES_USE_ELF_RELA
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 5cd82e9f601c..1c85323f01d7 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -105,19 +105,9 @@ void coprocessor_flush_all(struct thread_info *ti)
105/* 105/*
106 * Powermanagement idle function, if any is provided by the platform. 106 * Powermanagement idle function, if any is provided by the platform.
107 */ 107 */
108 108void arch_cpu_idle(void)
109void cpu_idle(void)
110{ 109{
111 local_irq_enable(); 110 platform_idle();
112
113 /* endless idle loop with no priority at all */
114 while (1) {
115 rcu_idle_enter();
116 while (!need_resched())
117 platform_idle();
118 rcu_idle_exit();
119 schedule_preempt_disabled();
120 }
121} 111}
122 112
123/* 113/*
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 7a5156ffebb6..bba125b4bb06 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -208,32 +208,17 @@ void __init mem_init(void)
208 highmemsize >> 10); 208 highmemsize >> 10);
209} 209}
210 210
211void
212free_reserved_mem(void *start, void *end)
213{
214 for (; start < end; start += PAGE_SIZE) {
215 ClearPageReserved(virt_to_page(start));
216 init_page_count(virt_to_page(start));
217 free_page((unsigned long)start);
218 totalram_pages++;
219 }
220}
221
222#ifdef CONFIG_BLK_DEV_INITRD 211#ifdef CONFIG_BLK_DEV_INITRD
223extern int initrd_is_mapped; 212extern int initrd_is_mapped;
224 213
225void free_initrd_mem(unsigned long start, unsigned long end) 214void free_initrd_mem(unsigned long start, unsigned long end)
226{ 215{
227 if (initrd_is_mapped) { 216 if (initrd_is_mapped)
228 free_reserved_mem((void*)start, (void*)end); 217 free_reserved_area(start, end, 0, "initrd");
229 printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
230 }
231} 218}
232#endif 219#endif
233 220
234void free_initmem(void) 221void free_initmem(void)
235{ 222{
236 free_reserved_mem(__init_begin, __init_end); 223 free_initmem_default(0);
237 printk("Freeing unused kernel memory: %zuk freed\n",
238 (__init_end - __init_begin) >> 10);
239} 224}